From bdaa1a5d6c45ac8e654136924ff56ff84b086941 Mon Sep 17 00:00:00 2001 From: al8n Date: Tue, 24 Dec 2024 16:30:34 +0800 Subject: [PATCH] Finish statefull key comparison --- .github/workflows/ci.yml | 56 +- .github/workflows/release.yml | 42 - CHANGELOG.md | 110 +- Cargo.toml | 73 +- README.md | 145 +- .../benches => benches}/btree.rs | 0 .../benches => benches}/hash.rs | 0 .../benches => benches}/skiplist.rs | 2 +- .../benches => benches}/skipmap.rs | 2 +- build-common.rs | 13 - ci/check-features.sh | 17 +- ci/crossbeam-epoch-loom.sh | 11 - ci/miri.sh | 24 +- ci/no_atomic.sh | 31 - ci/san.sh | 8 - crossbeam-channel/CHANGELOG.md | 252 -- crossbeam-channel/Cargo.toml | 35 - crossbeam-channel/LICENSE-APACHE | 201 -- crossbeam-channel/LICENSE-MIT | 27 - crossbeam-channel/LICENSE-THIRD-PARTY | 593 ----- crossbeam-channel/README.md | 84 - crossbeam-channel/benches/crossbeam.rs | 712 ------ crossbeam-channel/benchmarks/Cargo.toml | 74 - crossbeam-channel/benchmarks/README.md | 43 - crossbeam-channel/benchmarks/atomicring.rs | 140 -- .../benchmarks/atomicringqueue.rs | 123 - crossbeam-channel/benchmarks/bus.rs | 57 - .../benchmarks/crossbeam-channel.rs | 187 -- .../benchmarks/crossbeam-deque.rs | 67 - crossbeam-channel/benchmarks/flume.rs | 167 -- .../benchmarks/futures-channel.rs | 180 -- crossbeam-channel/benchmarks/go.go | 201 -- crossbeam-channel/benchmarks/lockfree.rs | 108 - crossbeam-channel/benchmarks/message.rs | 17 - crossbeam-channel/benchmarks/mpmc.rs | 140 -- crossbeam-channel/benchmarks/mpsc.rs | 169 -- crossbeam-channel/benchmarks/plot.png | Bin 119450 -> 0 bytes crossbeam-channel/benchmarks/plot.py | 148 -- crossbeam-channel/benchmarks/run.sh | 24 - crossbeam-channel/benchmarks/segqueue.rs | 117 - crossbeam-channel/examples/fibonacci.rs | 25 - crossbeam-channel/examples/matching.rs | 72 - crossbeam-channel/examples/stopwatch.rs | 56 - crossbeam-channel/src/channel.rs | 1539 ------------ crossbeam-channel/src/context.rs | 178 -- crossbeam-channel/src/counter.rs | 145 -- crossbeam-channel/src/err.rs | 354 --- crossbeam-channel/src/flavors/array.rs | 639 ----- crossbeam-channel/src/flavors/at.rs | 193 -- crossbeam-channel/src/flavors/list.rs | 775 ------ crossbeam-channel/src/flavors/mod.rs | 17 - crossbeam-channel/src/flavors/never.rs | 110 - crossbeam-channel/src/flavors/tick.rs | 163 -- crossbeam-channel/src/flavors/zero.rs | 496 ---- crossbeam-channel/src/lib.rs | 377 --- crossbeam-channel/src/select.rs | 1323 ---------- crossbeam-channel/src/select_macro.rs | 1154 --------- crossbeam-channel/src/utils.rs | 58 - crossbeam-channel/src/waker.rs | 287 --- crossbeam-channel/tests/after.rs | 336 --- crossbeam-channel/tests/array.rs | 744 ------ crossbeam-channel/tests/golang.rs | 2152 ---------------- crossbeam-channel/tests/iter.rs | 110 - crossbeam-channel/tests/list.rs | 597 ----- crossbeam-channel/tests/mpsc.rs | 2126 ---------------- crossbeam-channel/tests/never.rs | 95 - crossbeam-channel/tests/ready.rs | 851 ------- crossbeam-channel/tests/same_channel.rs | 114 - crossbeam-channel/tests/select.rs | 1326 ---------- crossbeam-channel/tests/select_macro.rs | 1597 ------------ crossbeam-channel/tests/thread_locals.rs | 53 - crossbeam-channel/tests/tick.rs | 352 --- crossbeam-channel/tests/zero.rs | 587 ----- crossbeam-deque/CHANGELOG.md | 141 -- crossbeam-deque/Cargo.toml | 34 - crossbeam-deque/LICENSE-APACHE | 201 -- crossbeam-deque/LICENSE-MIT | 27 - crossbeam-deque/README.md | 46 - crossbeam-deque/src/deque.rs | 2209 ----------------- crossbeam-deque/src/lib.rs | 101 - crossbeam-deque/tests/fifo.rs | 357 --- crossbeam-deque/tests/injector.rs | 391 --- crossbeam-deque/tests/lifo.rs | 359 --- crossbeam-deque/tests/steal.rs | 212 -- crossbeam-epoch/CHANGELOG.md | 204 -- crossbeam-epoch/Cargo.toml | 50 - crossbeam-epoch/LICENSE-APACHE | 201 -- crossbeam-epoch/LICENSE-MIT | 27 - crossbeam-epoch/README.md | 53 - crossbeam-epoch/benches/defer.rs | 69 - crossbeam-epoch/benches/flush.rs | 52 - crossbeam-epoch/benches/pin.rs | 31 - crossbeam-epoch/examples/sanitize.rs | 66 - crossbeam-epoch/src/atomic.rs | 1609 ------------ crossbeam-epoch/src/collector.rs | 466 ---- crossbeam-epoch/src/default.rs | 93 - crossbeam-epoch/src/deferred.rs | 149 -- crossbeam-epoch/src/epoch.rs | 132 - crossbeam-epoch/src/guard.rs | 525 ---- crossbeam-epoch/src/internal.rs | 602 ----- crossbeam-epoch/src/lib.rs | 161 -- crossbeam-epoch/src/sync/list.rs | 488 ---- crossbeam-epoch/src/sync/mod.rs | 7 - crossbeam-epoch/src/sync/once_lock.rs | 1 - crossbeam-epoch/src/sync/queue.rs | 469 ---- crossbeam-epoch/tests/loom.rs | 157 -- crossbeam-queue/CHANGELOG.md | 88 - crossbeam-queue/Cargo.toml | 37 - crossbeam-queue/LICENSE-APACHE | 201 -- crossbeam-queue/LICENSE-MIT | 27 - crossbeam-queue/README.md | 54 - crossbeam-queue/src/array_queue.rs | 538 ---- crossbeam-queue/src/lib.rs | 29 - crossbeam-queue/src/seg_queue.rs | 566 ----- crossbeam-queue/tests/array_queue.rs | 374 --- crossbeam-queue/tests/seg_queue.rs | 210 -- crossbeam-skiplist/CHANGELOG.md | 21 - crossbeam-skiplist/Cargo.toml | 39 - crossbeam-skiplist/LICENSE-APACHE | 201 -- crossbeam-skiplist/LICENSE-MIT | 27 - crossbeam-skiplist/README.md | 52 - crossbeam-skiplist/src/equivalent.rs | 6 - crossbeam-skiplist/src/lib.rs | 262 -- crossbeam-utils/CHANGELOG.md | 243 -- crossbeam-utils/Cargo.toml | 45 - crossbeam-utils/LICENSE-APACHE | 201 -- crossbeam-utils/LICENSE-MIT | 27 - crossbeam-utils/README.md | 73 - crossbeam-utils/benches/atomic_cell.rs | 156 -- crossbeam-utils/build-common.rs | 1 - crossbeam-utils/build.rs | 48 - crossbeam-utils/no_atomic.rs | 1 - crossbeam-utils/src/atomic/atomic_cell.rs | 1243 ---------- crossbeam-utils/src/atomic/consume.rs | 111 - crossbeam-utils/src/atomic/mod.rs | 32 - crossbeam-utils/src/atomic/seq_lock.rs | 112 - crossbeam-utils/src/atomic/seq_lock_wide.rs | 155 -- crossbeam-utils/src/backoff.rs | 289 --- crossbeam-utils/src/cache_padded.rs | 217 -- crossbeam-utils/src/lib.rs | 92 - crossbeam-utils/src/sync/mod.rs | 17 - crossbeam-utils/src/sync/once_lock.rs | 90 - crossbeam-utils/src/sync/parker.rs | 433 ---- crossbeam-utils/src/sync/sharded_lock.rs | 638 ----- crossbeam-utils/src/sync/wait_group.rs | 146 -- crossbeam-utils/src/thread.rs | 633 ----- crossbeam-utils/tests/atomic_cell.rs | 395 --- crossbeam-utils/tests/cache_padded.rs | 113 - crossbeam-utils/tests/parker.rs | 49 - crossbeam-utils/tests/sharded_lock.rs | 252 -- crossbeam-utils/tests/thread.rs | 215 -- crossbeam-utils/tests/wait_group.rs | 67 - .../examples => examples}/simple.rs | 2 +- no_atomic.rs | 9 - {crossbeam-skiplist/src => src}/base.rs | 551 ++-- src/equivalentor.rs | 12 + src/lib.rs | 282 ++- {crossbeam-skiplist/src => src}/map.rs | 231 +- {crossbeam-skiplist/src => src}/set.rs | 209 +- {crossbeam-skiplist/tests => tests}/base.rs | 12 +- {crossbeam-skiplist/tests => tests}/map.rs | 2 +- {crossbeam-skiplist/tests => tests}/set.rs | 2 +- tests/subcrates.rs | 47 - tools/publish.sh | 49 - 164 files changed, 881 insertions(+), 42444 deletions(-) delete mode 100644 .github/workflows/release.yml rename {crossbeam-skiplist/benches => benches}/btree.rs (100%) rename {crossbeam-skiplist/benches => benches}/hash.rs (100%) rename {crossbeam-skiplist/benches => benches}/skiplist.rs (98%) rename {crossbeam-skiplist/benches => benches}/skipmap.rs (97%) delete mode 100644 build-common.rs delete mode 100755 ci/crossbeam-epoch-loom.sh delete mode 100755 ci/no_atomic.sh delete mode 100644 crossbeam-channel/CHANGELOG.md delete mode 100644 crossbeam-channel/Cargo.toml delete mode 100644 crossbeam-channel/LICENSE-APACHE delete mode 100644 crossbeam-channel/LICENSE-MIT delete mode 100644 crossbeam-channel/LICENSE-THIRD-PARTY delete mode 100644 crossbeam-channel/README.md delete mode 100644 crossbeam-channel/benches/crossbeam.rs delete mode 100644 crossbeam-channel/benchmarks/Cargo.toml delete mode 100644 crossbeam-channel/benchmarks/README.md delete mode 100644 crossbeam-channel/benchmarks/atomicring.rs delete mode 100644 crossbeam-channel/benchmarks/atomicringqueue.rs delete mode 100644 crossbeam-channel/benchmarks/bus.rs delete mode 100644 crossbeam-channel/benchmarks/crossbeam-channel.rs delete mode 100644 crossbeam-channel/benchmarks/crossbeam-deque.rs delete mode 100644 crossbeam-channel/benchmarks/flume.rs delete mode 100644 crossbeam-channel/benchmarks/futures-channel.rs delete mode 100644 crossbeam-channel/benchmarks/go.go delete mode 100644 crossbeam-channel/benchmarks/lockfree.rs delete mode 100644 crossbeam-channel/benchmarks/message.rs delete mode 100644 crossbeam-channel/benchmarks/mpmc.rs delete mode 100644 crossbeam-channel/benchmarks/mpsc.rs delete mode 100644 crossbeam-channel/benchmarks/plot.png delete mode 100755 crossbeam-channel/benchmarks/plot.py delete mode 100755 crossbeam-channel/benchmarks/run.sh delete mode 100644 crossbeam-channel/benchmarks/segqueue.rs delete mode 100644 crossbeam-channel/examples/fibonacci.rs delete mode 100644 crossbeam-channel/examples/matching.rs delete mode 100644 crossbeam-channel/examples/stopwatch.rs delete mode 100644 crossbeam-channel/src/channel.rs delete mode 100644 crossbeam-channel/src/context.rs delete mode 100644 crossbeam-channel/src/counter.rs delete mode 100644 crossbeam-channel/src/err.rs delete mode 100644 crossbeam-channel/src/flavors/array.rs delete mode 100644 crossbeam-channel/src/flavors/at.rs delete mode 100644 crossbeam-channel/src/flavors/list.rs delete mode 100644 crossbeam-channel/src/flavors/mod.rs delete mode 100644 crossbeam-channel/src/flavors/never.rs delete mode 100644 crossbeam-channel/src/flavors/tick.rs delete mode 100644 crossbeam-channel/src/flavors/zero.rs delete mode 100644 crossbeam-channel/src/lib.rs delete mode 100644 crossbeam-channel/src/select.rs delete mode 100644 crossbeam-channel/src/select_macro.rs delete mode 100644 crossbeam-channel/src/utils.rs delete mode 100644 crossbeam-channel/src/waker.rs delete mode 100644 crossbeam-channel/tests/after.rs delete mode 100644 crossbeam-channel/tests/array.rs delete mode 100644 crossbeam-channel/tests/golang.rs delete mode 100644 crossbeam-channel/tests/iter.rs delete mode 100644 crossbeam-channel/tests/list.rs delete mode 100644 crossbeam-channel/tests/mpsc.rs delete mode 100644 crossbeam-channel/tests/never.rs delete mode 100644 crossbeam-channel/tests/ready.rs delete mode 100644 crossbeam-channel/tests/same_channel.rs delete mode 100644 crossbeam-channel/tests/select.rs delete mode 100644 crossbeam-channel/tests/select_macro.rs delete mode 100644 crossbeam-channel/tests/thread_locals.rs delete mode 100644 crossbeam-channel/tests/tick.rs delete mode 100644 crossbeam-channel/tests/zero.rs delete mode 100644 crossbeam-deque/CHANGELOG.md delete mode 100644 crossbeam-deque/Cargo.toml delete mode 100644 crossbeam-deque/LICENSE-APACHE delete mode 100644 crossbeam-deque/LICENSE-MIT delete mode 100644 crossbeam-deque/README.md delete mode 100644 crossbeam-deque/src/deque.rs delete mode 100644 crossbeam-deque/src/lib.rs delete mode 100644 crossbeam-deque/tests/fifo.rs delete mode 100644 crossbeam-deque/tests/injector.rs delete mode 100644 crossbeam-deque/tests/lifo.rs delete mode 100644 crossbeam-deque/tests/steal.rs delete mode 100644 crossbeam-epoch/CHANGELOG.md delete mode 100644 crossbeam-epoch/Cargo.toml delete mode 100644 crossbeam-epoch/LICENSE-APACHE delete mode 100644 crossbeam-epoch/LICENSE-MIT delete mode 100644 crossbeam-epoch/README.md delete mode 100644 crossbeam-epoch/benches/defer.rs delete mode 100644 crossbeam-epoch/benches/flush.rs delete mode 100644 crossbeam-epoch/benches/pin.rs delete mode 100644 crossbeam-epoch/examples/sanitize.rs delete mode 100644 crossbeam-epoch/src/atomic.rs delete mode 100644 crossbeam-epoch/src/collector.rs delete mode 100644 crossbeam-epoch/src/default.rs delete mode 100644 crossbeam-epoch/src/deferred.rs delete mode 100644 crossbeam-epoch/src/epoch.rs delete mode 100644 crossbeam-epoch/src/guard.rs delete mode 100644 crossbeam-epoch/src/internal.rs delete mode 100644 crossbeam-epoch/src/lib.rs delete mode 100644 crossbeam-epoch/src/sync/list.rs delete mode 100644 crossbeam-epoch/src/sync/mod.rs delete mode 120000 crossbeam-epoch/src/sync/once_lock.rs delete mode 100644 crossbeam-epoch/src/sync/queue.rs delete mode 100644 crossbeam-epoch/tests/loom.rs delete mode 100644 crossbeam-queue/CHANGELOG.md delete mode 100644 crossbeam-queue/Cargo.toml delete mode 100644 crossbeam-queue/LICENSE-APACHE delete mode 100644 crossbeam-queue/LICENSE-MIT delete mode 100644 crossbeam-queue/README.md delete mode 100644 crossbeam-queue/src/array_queue.rs delete mode 100644 crossbeam-queue/src/lib.rs delete mode 100644 crossbeam-queue/src/seg_queue.rs delete mode 100644 crossbeam-queue/tests/array_queue.rs delete mode 100644 crossbeam-queue/tests/seg_queue.rs delete mode 100644 crossbeam-skiplist/CHANGELOG.md delete mode 100644 crossbeam-skiplist/Cargo.toml delete mode 100644 crossbeam-skiplist/LICENSE-APACHE delete mode 100644 crossbeam-skiplist/LICENSE-MIT delete mode 100644 crossbeam-skiplist/README.md delete mode 100644 crossbeam-skiplist/src/equivalent.rs delete mode 100644 crossbeam-skiplist/src/lib.rs delete mode 100644 crossbeam-utils/CHANGELOG.md delete mode 100644 crossbeam-utils/Cargo.toml delete mode 100644 crossbeam-utils/LICENSE-APACHE delete mode 100644 crossbeam-utils/LICENSE-MIT delete mode 100644 crossbeam-utils/README.md delete mode 100644 crossbeam-utils/benches/atomic_cell.rs delete mode 120000 crossbeam-utils/build-common.rs delete mode 100644 crossbeam-utils/build.rs delete mode 120000 crossbeam-utils/no_atomic.rs delete mode 100644 crossbeam-utils/src/atomic/atomic_cell.rs delete mode 100644 crossbeam-utils/src/atomic/consume.rs delete mode 100644 crossbeam-utils/src/atomic/mod.rs delete mode 100644 crossbeam-utils/src/atomic/seq_lock.rs delete mode 100644 crossbeam-utils/src/atomic/seq_lock_wide.rs delete mode 100644 crossbeam-utils/src/backoff.rs delete mode 100644 crossbeam-utils/src/cache_padded.rs delete mode 100644 crossbeam-utils/src/lib.rs delete mode 100644 crossbeam-utils/src/sync/mod.rs delete mode 100644 crossbeam-utils/src/sync/once_lock.rs delete mode 100644 crossbeam-utils/src/sync/parker.rs delete mode 100644 crossbeam-utils/src/sync/sharded_lock.rs delete mode 100644 crossbeam-utils/src/sync/wait_group.rs delete mode 100644 crossbeam-utils/src/thread.rs delete mode 100644 crossbeam-utils/tests/atomic_cell.rs delete mode 100644 crossbeam-utils/tests/cache_padded.rs delete mode 100644 crossbeam-utils/tests/parker.rs delete mode 100644 crossbeam-utils/tests/sharded_lock.rs delete mode 100644 crossbeam-utils/tests/thread.rs delete mode 100644 crossbeam-utils/tests/wait_group.rs rename {crossbeam-skiplist/examples => examples}/simple.rs (92%) delete mode 100644 no_atomic.rs rename {crossbeam-skiplist/src => src}/base.rs (86%) create mode 100644 src/equivalentor.rs rename {crossbeam-skiplist/src => src}/map.rs (78%) rename {crossbeam-skiplist/src => src}/set.rs (73%) rename {crossbeam-skiplist/tests => tests}/base.rs (98%) rename {crossbeam-skiplist/tests => tests}/map.rs (99%) rename {crossbeam-skiplist/tests => tests}/set.rs (99%) delete mode 100644 tests/subcrates.rs delete mode 100755 tools/publish.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d63274e72..564904e93 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,7 +7,7 @@ on: pull_request: push: branches: - - master + - crossbeam-skiplist-fd schedule: - cron: '0 1 * * *' @@ -38,11 +38,10 @@ jobs: strategy: fail-fast: false matrix: - # aarch64/x86_64 macOS and aarch64 Linux are tested on Cirrus CI include: - - rust: '1.63' + - rust: '1.81' os: ubuntu-latest - - rust: '1.63' + - rust: '1.81' os: windows-latest - rust: stable os: ubuntu-latest @@ -117,43 +116,6 @@ jobs: - name: dependency tree check run: ci/dependencies.sh - # When this job failed, run ci/no_atomic.sh and commit result changes. - codegen: - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - timeout-minutes: 60 - steps: - - uses: taiki-e/checkout-action@v1 - - name: Install Rust - run: rustup update stable - - run: ci/no_atomic.sh - - run: git add -N . && git diff --exit-code - if: github.repository_owner != 'crossbeam-rs' || github.event_name != 'schedule' - - id: diff - run: | - git config user.name "Taiki Endo" - git config user.email "te316e89@gmail.com" - git add -N . - if ! git diff --exit-code; then - git add . - git commit -m "Update no_atomic.rs" - echo 'success=false' >>"${GITHUB_OUTPUT}" - fi - if: github.repository_owner == 'crossbeam-rs' && github.event_name == 'schedule' - - uses: peter-evans/create-pull-request@v7 - with: - title: Update no_atomic.rs - body: | - Auto-generated by [create-pull-request][1] - [Please close and immediately reopen this pull request to run CI.][2] - - [1]: https://github.com/peter-evans/create-pull-request - [2]: https://github.com/peter-evans/create-pull-request/blob/HEAD/docs/concepts-guidelines.md#workarounds-to-trigger-further-workflow-runs - branch: update-no-atomic-rs - if: github.repository_owner == 'crossbeam-rs' && github.event_name == 'schedule' && steps.diff.outputs.success == 'false' - # Check formatting. rustfmt: runs-on: ubuntu-latest @@ -182,7 +144,6 @@ jobs: fail-fast: false matrix: group: - - channel - others runs-on: ubuntu-latest timeout-minutes: 120 # TODO @@ -218,17 +179,6 @@ jobs: - name: Run sanitizers run: ci/san.sh - # Run loom tests. - loom: - runs-on: ubuntu-latest - timeout-minutes: 60 - steps: - - uses: taiki-e/checkout-action@v1 - - name: Install Rust - run: rustup update stable - - name: loom - run: ci/crossbeam-epoch-loom.sh - # Check if the document can be generated without warning. docs: runs-on: ubuntu-latest diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 501d20a72..000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,42 +0,0 @@ -name: Release - -permissions: - contents: write - -on: - push: - tags: - - crossbeam-[0-9]+.* - - crossbeam-[a-z]+-[0-9]+.* - -defaults: - run: - shell: bash - -jobs: - create-release: - if: github.repository_owner == 'crossbeam-rs' - runs-on: ubuntu-latest - timeout-minutes: 60 - steps: - - uses: taiki-e/checkout-action@v1 - - uses: taiki-e/create-gh-release-action@v1 - with: - prefix: crossbeam(-[a-z]+)? - changelog: $prefix/CHANGELOG.md - title: $prefix $version - branch: master - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - if: "!startsWith(github.ref_name, 'crossbeam-0')" - # Since the crossbeam crate is located at the root, we need to handle it differently. - # TODO: Consider moving crossbeam crate to a subdirectory. - - uses: taiki-e/create-gh-release-action@v1 - with: - prefix: crossbeam(-[a-z]+)? - changelog: CHANGELOG.md - title: crossbeam $version - branch: master - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - if: startsWith(github.ref_name, 'crossbeam-0') diff --git a/CHANGELOG.md b/CHANGELOG.md index e881a9db3..4296bf762 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,105 +1,25 @@ -# Version 0.8.4 +# Version 0.1.4 -- Remove dependency on `cfg-if`. (#1072) - -# Version 0.8.3 - -- Bump the minimum supported Rust version to 1.61. (#1037) - -# Version 0.8.2 - -- Bump the minimum supported Rust version to 1.38. (#877) - -# Version 0.8.1 - -- Support targets that do not have atomic CAS on stable Rust (#698) - -# Version 0.8.0 - -- Bump the minimum supported Rust version to 1.36. -- Bump `crossbeam-channel` to `0.5`. -- Bump `crossbeam-deque` to `0.8`. -- Bump `crossbeam-epoch` to `0.9`. -- Bump `crossbeam-queue` to `0.3`. -- Bump `crossbeam-utils` to `0.8`. - -# Version 0.7.3 - -- Fix breakage with nightly feature due to rust-lang/rust#65214. -- Bump `crossbeam-channel` to `0.4`. -- Bump `crossbeam-epoch` to `0.8`. -- Bump `crossbeam-queue` to `0.2`. -- Bump `crossbeam-utils` to `0.7`. - -# Version 0.7.2 +- Use `dbutils::equivalentor::Comparator` and `dbutils::equivalentor::QueryComparator` for more flexible customizied key comparasions. -- Bump `crossbeam-channel` to `0.3.9`. -- Bump `crossbeam-epoch` to `0.7.2`. -- Bump `crossbeam-utils` to `0.6.6`. +# Version 0.1.3 -# Version 0.7.1 - -- Bump `crossbeam-utils` to `0.6.5`. - -# Version 0.7.0 - -- Remove `ArcCell`, `MsQueue`, and `TreiberStack`. -- Change the interface of `ShardedLock` to match `RwLock`. -- Add `SegQueue::len()`. -- Rename `SegQueue::try_pop()` to `SegQueue::pop()`. -- Change the return type of `SegQueue::pop()` to `Result`. -- Introduce `ArrayQueue`. -- Update dependencies. - -# Version 0.6.0 - -- Update dependencies. - -# Version 0.5.0 - -- Update `crossbeam-channel` to 0.3. -- Update `crossbeam-utils` to 0.6. -- Add `AtomicCell`, `SharedLock`, and `WaitGroup`. - -# Version 0.4.1 - -- Fix a double-free bug in `MsQueue` and `SegQueue`. - -# Version 0.4 +- Remove dependency on `cfg-if`. (#1072) -- Switch to the new implementation of epoch-based reclamation in - [`crossbeam-epoch`](https://github.com/crossbeam-rs/crossbeam-epoch), fixing numerous bugs in the - old implementation. Its API is changed in a backward-incompatible way. -- Switch to the new implementation of `CachePadded` and scoped thread in - [`crossbeam-utils`](https://github.com/crossbeam-rs/crossbeam-utils). The scoped thread API is - changed in a backward-incompatible way. -- Switch to the new implementation of Chase-Lev deque in - [`crossbeam-deque`](https://github.com/crossbeam-rs/crossbeam-deque). Its API is changed in a - backward-incompatible way. -- Export channel implemented in - [`crossbeam-channel`](https://github.com/crossbeam-rs/crossbeam-channel). -- Remove `AtomicOption`. -- Implement `Default` and `From` traits. +# Version 0.1.2 -# Version 0.3 +- Bump the minimum supported Rust version to 1.61. (#1037) +- Add `compare_insert`. (#976) +- Improve support for targets without atomic CAS. (#1037) +- Remove build script. (#1037) +- Remove dependency on `scopeguard`. (#1045) -- Introduced `ScopedThreadBuilder` with the ability to name threads and set stack size -- `Worker` methods in the Chase-Lev deque don't require mutable access anymore -- Fixed a bug when unblocking `pop()` in `MsQueue` -- Implemented `Drop` for `MsQueue`, `SegQueue`, and `TreiberStack` -- Implemented `Default` for `TreiberStack` -- Added `is_empty` to `SegQueue` -- Renamed `mem::epoch` to `epoch` -- Other bug fixes +# Version 0.1.1 -# Version 0.2 +- Fix `get_unchecked` panic by raw pointer calculation. (#940) -- Changed existing non-blocking `pop` methods to `try_pop` -- Added blocking `pop` support to Michael-Scott queue -- Added Chase-Lev work-stealing deque +# Version 0.1.0 -# Version 0.1 +**Note:** This release has been yanked due to bug fixed in 0.1.1. -- Added [epoch-based memory management](http://aturon.github.io/blog/2015/08/27/epoch/) -- Added Michael-Scott queue -- Added Segmented array queue +- Initial implementation. diff --git a/Cargo.toml b/Cargo.toml index e19e10d6b..8b24fdb74 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,76 +1,39 @@ [package] -name = "crossbeam" +name = "crossbeam-skiplist-fd" # When publishing a new version: # - Update CHANGELOG.md # - Update README.md (when increasing major or minor version) -# - Run './tools/publish.sh crossbeam ' -version = "0.8.4" +version = "0.1.4" edition = "2021" -rust-version = "1.61" +rust-version = "1.81" license = "MIT OR Apache-2.0" -repository = "https://github.com/crossbeam-rs/crossbeam" -homepage = "https://github.com/crossbeam-rs/crossbeam" -description = "Tools for concurrent programming" -keywords = ["atomic", "garbage", "non-blocking", "lock-free", "rcu"] -categories = ["concurrency", "memory-management", "data-structures", "no-std"] -exclude = ["/.*", "/ci", "/tools"] +repository = "https://github.com/al8n/crossbeam" +homepage = "https://github.com/al8n/crossbeam/tree/crossbeam-skiplist-fd" +description = "A long-term maintained forked version of the crossbeam-skiplist for supporting more flexible key comparison customization." +keywords = ["map", "set", "skiplist", "lock-free"] +categories = ["algorithms", "concurrency", "data-structures", "no-std"] [features] default = ["std"] # Enable to use APIs that require `std`. # This is enabled by default. -std = [ - "alloc", - "crossbeam-channel/std", - "crossbeam-deque/std", - "crossbeam-epoch/std", - "crossbeam-queue/std", - "crossbeam-utils/std", -] +std = ["alloc", "crossbeam-epoch/std", "crossbeam-utils/std", "dbutils/std"] # Enable to use APIs that require `alloc`. # This is enabled by default and also enabled if the `std` feature is enabled. -alloc = ["crossbeam-epoch/alloc", "crossbeam-queue/alloc"] +# +# NOTE: Disabling both `std` *and* `alloc` features is not supported yet. +alloc = ["crossbeam-epoch/alloc", "dbutils/alloc"] [dependencies] -crossbeam-channel = { version = "0.5.10", path = "crossbeam-channel", default-features = false, optional = true } -crossbeam-deque = { version = "0.8.4", path = "crossbeam-deque", default-features = false, optional = true } -crossbeam-epoch = { version = "0.9.17", path = "crossbeam-epoch", default-features = false, optional = true } -crossbeam-queue = { version = "0.3.10", path = "crossbeam-queue", default-features = false, optional = true } -crossbeam-utils = { version = "0.8.18", path = "crossbeam-utils", default-features = false, features = ["atomic"] } +crossbeam-epoch = { version = "0.9", default-features = false } +crossbeam-utils = { version = "0.8", default-features = false } +dbutils = { version = "0.12", default-features = false } +equivalent-flipped = "1" [dev-dependencies] rand = "0.8" -[lints] -workspace = true - -[workspace] -resolver = "2" -members = [ - ".", - "crossbeam-channel", - "crossbeam-channel/benchmarks", - "crossbeam-deque", - "crossbeam-epoch", - "crossbeam-queue", - "crossbeam-skiplist", - "crossbeam-utils", -] - -[workspace.lints.rust] -missing_debug_implementations = "warn" -rust_2018_idioms = "warn" -single_use_lifetimes = "warn" -unexpected_cfgs = { level = "warn", check-cfg = [ - 'cfg(crossbeam_loom)', - 'cfg(crossbeam_sanitize)', -] } -unreachable_pub = "warn" -# unsafe_op_in_unsafe_fn = "warn" # Set at crate-level instead since https://github.com/rust-lang/rust/pull/100081 is not available on MSRV -[workspace.lints.clippy] -# Suppress buggy or noisy clippy lints -declare_interior_mutable_const = { level = "allow", priority = 1 } # https://github.com/rust-lang/rust-clippy/issues/7665 -incompatible_msrv = { level = "allow", priority = 1 } # buggy: doesn't consider cfg, https://github.com/rust-lang/rust-clippy/issues/12280, https://github.com/rust-lang/rust-clippy/issues/12257#issuecomment-2093667187 -lint_groups_priority = { level = "allow", priority = 1 } # https://github.com/rust-lang/rust-clippy/issues/12920 +# [lints] +# workspace = true diff --git a/README.md b/README.md index 264f373ea..f4d2d88d3 100644 --- a/README.md +++ b/README.md @@ -1,85 +1,26 @@ -# Crossbeam +# Crossbeam Skiplist For Databases -[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)]( -https://github.com/crossbeam-rs/crossbeam/actions) +A long-term maintained forked version of the [`crossbeam-skiplist`](https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-skiplist) for supporting more flexible key comparison customization. + +[![Build Status](https://github.com/al8n/crossbeam/workflows/CI/badge.svg)]( +https://github.com/al8n/crossbeam/actions) [![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)]( -https://github.com/crossbeam-rs/crossbeam#license) -[![Cargo](https://img.shields.io/crates/v/crossbeam.svg)]( -https://crates.io/crates/crossbeam) -[![Documentation](https://docs.rs/crossbeam/badge.svg)]( -https://docs.rs/crossbeam) -[![Rust 1.61+](https://img.shields.io/badge/rust-1.61+-lightgray.svg)]( +https://github.com/al8n/crossbeam/tree/crossbeam-skiplist-fd/crossbeam-skiplist-fd#license) +[![Cargo](https://img.shields.io/crates/v/crossbeam-skiplist-fd.svg)]( +https://crates.io/crates/crossbeam-skiplist-fd) +[![Documentation](https://docs.rs/crossbeam-skiplist-fd/badge.svg)]( +https://docs.rs/crossbeam-skiplist-fd) +[![Rust 1.81+](https://img.shields.io/badge/rust-1.81+-lightgray.svg)]( https://www.rust-lang.org) -[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ) - -This crate provides a set of tools for concurrent programming: - -#### Atomics - -* [`AtomicCell`], a thread-safe mutable memory location.(no_std) -* [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.(no_std) - -#### Data structures - -* [`deque`], work-stealing deques for building task schedulers. -* [`ArrayQueue`], a bounded MPMC queue that allocates a fixed-capacity buffer on construction.(alloc) -* [`SegQueue`], an unbounded MPMC queue that allocates small buffers, segments, on demand.(alloc) - -#### Memory management - -* [`epoch`], an epoch-based garbage collector.(alloc) - -#### Thread synchronization - -* [`channel`], multi-producer multi-consumer channels for message passing. -* [`Parker`], a thread parking primitive. -* [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads. -* [`WaitGroup`], for synchronizing the beginning or end of some computation. - -#### Utilities - -* [`Backoff`], for exponential backoff in spin loops.(no_std) -* [`CachePadded`], for padding and aligning a value to the length of a cache line.(no_std) -* [`scope`], for spawning threads that borrow local variables from the stack. - -*Features marked with (no_std) can be used in `no_std` environments.*
-*Features marked with (alloc) can be used in `no_std` environments, but only if `alloc` -feature is enabled.* - -[`AtomicCell`]: https://docs.rs/crossbeam/latest/crossbeam/atomic/struct.AtomicCell.html -[`AtomicConsume`]: https://docs.rs/crossbeam/latest/crossbeam/atomic/trait.AtomicConsume.html -[`deque`]: https://docs.rs/crossbeam/latest/crossbeam/deque/index.html -[`ArrayQueue`]: https://docs.rs/crossbeam/latest/crossbeam/queue/struct.ArrayQueue.html -[`SegQueue`]: https://docs.rs/crossbeam/latest/crossbeam/queue/struct.SegQueue.html -[`channel`]: https://docs.rs/crossbeam/latest/crossbeam/channel/index.html -[`Parker`]: https://docs.rs/crossbeam/latest/crossbeam/sync/struct.Parker.html -[`ShardedLock`]: https://docs.rs/crossbeam/latest/crossbeam/sync/struct.ShardedLock.html -[`WaitGroup`]: https://docs.rs/crossbeam/latest/crossbeam/sync/struct.WaitGroup.html -[`epoch`]: https://docs.rs/crossbeam/latest/crossbeam/epoch/index.html -[`Backoff`]: https://docs.rs/crossbeam/latest/crossbeam/utils/struct.Backoff.html -[`CachePadded`]: https://docs.rs/crossbeam/latest/crossbeam/utils/struct.CachePadded.html -[`scope`]: https://docs.rs/crossbeam/latest/crossbeam/fn.scope.html - -## Crates -The main `crossbeam` crate just [re-exports](src/lib.rs) tools from -smaller subcrates: +This crate provides the types [`SkipMap`] and [`SkipSet`]. +These data structures provide an interface similar to `BTreeMap` and `BTreeSet`, +respectively, except they support safe concurrent access across multiple threads. -* [`crossbeam-channel`](crossbeam-channel) - provides multi-producer multi-consumer channels for message passing. -* [`crossbeam-deque`](crossbeam-deque) - provides work-stealing deques, which are primarily intended for building task schedulers. -* [`crossbeam-epoch`](crossbeam-epoch) - provides epoch-based garbage collection for building concurrent data structures. -* [`crossbeam-queue`](crossbeam-queue) - provides concurrent queues that can be shared among threads. -* [`crossbeam-utils`](crossbeam-utils) - provides atomics, synchronization primitives, scoped threads, and other utilities. +This crate can be used in `no_std` environments that implement `alloc`. The `alloc` feature of this crate needs to be enabled in `no_std` environments. -There is one more experimental subcrate that is not yet included in `crossbeam`: - -* [`crossbeam-skiplist`](crossbeam-skiplist) - provides concurrent maps and sets based on lock-free skip lists. +[`SkipMap`]: https://docs.rs/crossbeam-skiplist-fd/latest/crossbeam_skiplist_fd/struct.SkipMap.html +[`SkipSet`]: https://docs.rs/crossbeam-skiplist-fd/latest/crossbeam_skiplist_fd/struct.SkipSet.html ## Usage @@ -87,57 +28,14 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -crossbeam = "0.8" +crossbeam-skiplist-fd = "0.1" ``` ## Compatibility -Crossbeam supports stable Rust releases going back at least six months, +Crossbeam Skiplist supports stable Rust releases going back at least six months, and every time the minimum supported Rust version is increased, a new minor -version is released. Currently, the minimum supported Rust version is 1.61. - -## Contributing - -Crossbeam welcomes contribution from everyone in the form of suggestions, bug reports, -pull requests, and feedback. 💛 - -If you need ideas for contribution, there are several ways to get started: - -* Found a bug or have a feature request? - [Submit an issue](https://github.com/crossbeam-rs/crossbeam/issues/new)! -* Issues and PRs labeled with - [feedback wanted](https://github.com/crossbeam-rs/crossbeam/issues?utf8=%E2%9C%93&q=is%3Aopen+sort%3Aupdated-desc+label%3A%22feedback+wanted%22+) - need feedback from users and contributors. -* Issues labeled with - [good first issue](https://github.com/crossbeam-rs/crossbeam/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22good+first+issue%22) - are relatively easy starter issues. - -#### RFCs - -We also have the [RFCs](https://github.com/crossbeam-rs/rfcs) repository for more -high-level discussion, which is the place where we brainstorm ideas and propose -substantial changes to Crossbeam. - -You are welcome to participate in any open -[issues](https://github.com/crossbeam-rs/rfcs/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc) -or -[pull requests](https://github.com/crossbeam-rs/rfcs/pulls?q=is%3Apr+is%3Aopen+sort%3Aupdated-desc). - -#### Learning resources - -If you'd like to learn more about concurrency and non-blocking data structures, there's a -list of learning resources in our [wiki](https://github.com/crossbeam-rs/rfcs/wiki), -which includes relevant blog posts, papers, videos, and other similar projects. - -Another good place to visit is [merged RFCs](https://github.com/crossbeam-rs/rfcs/tree/master/text). -They contain elaborate descriptions and rationale for features we've introduced to -Crossbeam, but keep in mind that some of the written information is now out of date. - -#### Conduct - -The Crossbeam project adheres to the -[Rust Code of Conduct](https://www.rust-lang.org/policies/code-of-conduct). -This describes the minimum behavior expected from all contributors. +version is released. Currently, the minimum supported Rust version is 1.81. ## License @@ -148,9 +46,6 @@ Licensed under either of at your option. -Some Crossbeam subcrates have additional licensing notices. -Take a look at other readme files in this repository for more information. - #### Contribution Unless you explicitly state otherwise, any contribution intentionally submitted diff --git a/crossbeam-skiplist/benches/btree.rs b/benches/btree.rs similarity index 100% rename from crossbeam-skiplist/benches/btree.rs rename to benches/btree.rs diff --git a/crossbeam-skiplist/benches/hash.rs b/benches/hash.rs similarity index 100% rename from crossbeam-skiplist/benches/hash.rs rename to benches/hash.rs diff --git a/crossbeam-skiplist/benches/skiplist.rs b/benches/skiplist.rs similarity index 98% rename from crossbeam-skiplist/benches/skiplist.rs rename to benches/skiplist.rs index d6760c797..71b9acec4 100644 --- a/crossbeam-skiplist/benches/skiplist.rs +++ b/benches/skiplist.rs @@ -6,7 +6,7 @@ extern crate test; use test::{black_box, Bencher}; use crossbeam_epoch as epoch; -use crossbeam_skiplist::SkipList; +use crossbeam_skiplist_fd::SkipList; #[bench] fn insert(b: &mut Bencher) { diff --git a/crossbeam-skiplist/benches/skipmap.rs b/benches/skipmap.rs similarity index 97% rename from crossbeam-skiplist/benches/skipmap.rs rename to benches/skipmap.rs index 2e809423b..f931cb01a 100644 --- a/crossbeam-skiplist/benches/skipmap.rs +++ b/benches/skipmap.rs @@ -4,7 +4,7 @@ extern crate test; use test::{black_box, Bencher}; -use crossbeam_skiplist::SkipMap as Map; +use crossbeam_skiplist_fd::SkipMap as Map; #[bench] fn insert(b: &mut Bencher) { diff --git a/build-common.rs b/build-common.rs deleted file mode 100644 index e91bb4d47..000000000 --- a/build-common.rs +++ /dev/null @@ -1,13 +0,0 @@ -// The target triplets have the form of 'arch-vendor-system'. -// -// When building for Linux (e.g. the 'system' part is -// 'linux-something'), replace the vendor with 'unknown' -// so that mapping to rust standard targets happens correctly. -fn convert_custom_linux_target(target: String) -> String { - let mut parts: Vec<&str> = target.split('-').collect(); - let system = parts.get(2); - if system == Some(&"linux") { - parts[1] = "unknown"; - }; - parts.join("-") -} diff --git a/ci/check-features.sh b/ci/check-features.sh index 69476f692..1438b7a3c 100755 --- a/ci/check-features.sh +++ b/ci/check-features.sh @@ -7,23 +7,8 @@ cd "$(dirname "$0")"/.. # * `--no-dev-deps` - build without dev-dependencies to avoid https://github.com/rust-lang/cargo/issues/4866 # * `--exclude benchmarks` - benchmarks doesn't published. if [[ "${RUST_VERSION}" == "msrv" ]]; then - cargo hack build --all --feature-powerset --no-dev-deps --exclude crossbeam-utils --exclude benchmarks --rust-version - # atomic feature requires Rust 1.60. - cargo hack build -p crossbeam-utils --feature-powerset --no-dev-deps --rust-version --exclude-features atomic - cargo +1.60 hack build -p crossbeam-utils --feature-powerset --no-dev-deps + cargo hack build --all --feature-powerset --no-dev-deps --exclude benchmarks --rust-version else cargo hack build --all --feature-powerset --no-dev-deps --exclude benchmarks fi -if [[ "${RUST_VERSION}" == "nightly"* ]]; then - # Build for no_std environment. - # thumbv7m-none-eabi supports atomic CAS. - # thumbv6m-none-eabi supports atomic, but not atomic CAS. - # riscv32i-unknown-none-elf does not support atomic at all. - rustup target add thumbv7m-none-eabi - rustup target add thumbv6m-none-eabi - rustup target add riscv32i-unknown-none-elf - cargo hack build --all --feature-powerset --no-dev-deps --exclude benchmarks --target thumbv7m-none-eabi --skip std,default - cargo hack build --all --feature-powerset --no-dev-deps --exclude benchmarks --target thumbv6m-none-eabi --skip std,default - cargo hack build --all --feature-powerset --no-dev-deps --exclude benchmarks --target riscv32i-unknown-none-elf --skip std,default -fi diff --git a/ci/crossbeam-epoch-loom.sh b/ci/crossbeam-epoch-loom.sh deleted file mode 100755 index 59044ec03..000000000 --- a/ci/crossbeam-epoch-loom.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -set -euxo pipefail -IFS=$'\n\t' -cd "$(dirname "$0")"/../crossbeam-epoch - -export RUSTFLAGS="${RUSTFLAGS:-} --cfg crossbeam_loom --cfg crossbeam_sanitize" - -# With MAX_PREEMPTIONS=2 the loom tests (currently) take around 11m. -# If we were to run with =3, they would take several times that, -# which is probably too costly for CI. -env LOOM_MAX_PREEMPTIONS=2 cargo test --test loom --release --features loom -- --nocapture diff --git a/ci/miri.sh b/ci/miri.sh index fa6054821..1006c2164 100755 --- a/ci/miri.sh +++ b/ci/miri.sh @@ -14,32 +14,10 @@ export RUSTDOCFLAGS="${RUSTDOCFLAGS:-} -Z randomize-layout" export MIRIFLAGS="${MIRIFLAGS:-} -Zmiri-strict-provenance -Zmiri-symbolic-alignment-check -Zmiri-disable-isolation" case "${group}" in - channel) - MIRI_LEAK_CHECK='1' \ - cargo miri test --all-features \ - -p crossbeam-channel 2>&1 | ts -i '%.s ' - # -Zmiri-ignore-leaks is needed because we use detached threads in tests in tests/golang.rs: https://github.com/rust-lang/miri/issues/1371 - MIRIFLAGS="${MIRIFLAGS} -Zmiri-ignore-leaks" \ - cargo miri test --all-features \ - -p crossbeam-channel --test golang 2>&1 | ts -i '%.s ' - ;; others) - cargo miri test --all-features \ - -p crossbeam-queue \ - -p crossbeam-utils 2>&1 | ts -i '%.s ' # Use Tree Borrows instead of Stacked Borrows because epoch is not compatible with Stacked Borrows: https://github.com/crossbeam-rs/crossbeam/issues/545#issuecomment-1192785003 MIRIFLAGS="${MIRIFLAGS} -Zmiri-tree-borrows" \ - cargo miri test --all-features \ - -p crossbeam-epoch \ - -p crossbeam-skiplist \ - -p crossbeam 2>&1 | ts -i '%.s ' - # Use Tree Borrows instead of Stacked Borrows because epoch is not compatible with Stacked Borrows: https://github.com/crossbeam-rs/crossbeam/issues/545#issuecomment-1192785003 - # -Zmiri-compare-exchange-weak-failure-rate=0.0 is needed because some sequential tests (e.g., - # doctest of Stealer::steal) incorrectly assume that sequential weak CAS will never fail. - # -Zmiri-preemption-rate=0 is needed because this code technically has UB and Miri catches that. - MIRIFLAGS="${MIRIFLAGS} -Zmiri-tree-borrows -Zmiri-compare-exchange-weak-failure-rate=0.0 -Zmiri-preemption-rate=0" \ - cargo miri test --all-features \ - -p crossbeam-deque 2>&1 | ts -i '%.s ' + cargo miri test --all-features crossbeam-skiplist-fd 2>&1 | ts -i '%.s ' ;; *) echo "unknown crate group '${group}'" diff --git a/ci/no_atomic.sh b/ci/no_atomic.sh deleted file mode 100755 index 13ab417f3..000000000 --- a/ci/no_atomic.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -set -euo pipefail -IFS=$'\n\t' -cd "$(dirname "$0")"/.. - -# Update the list of targets that do not support atomic/CAS operations. -# -# Usage: -# ./ci/no_atomic.sh - -file=no_atomic.rs - -# `"max-atomic-width" == 0` means that atomic is not supported at all. -# We do not have a cfg for targets with {8,16}-bit atomic only, so -# for now we treat them the same as targets that do not support atomic. -# It is not clear exactly what `"max-atomic-width" == null` means, but they -# actually seem to have the same max-atomic-width as the target-pointer-width. -# The targets currently included in this group are "mipsel-sony-psp", -# "thumbv4t-none-eabi", "thumbv6m-none-eabi", all of which are -# `"target-pointer-width" == "32"`, so assuming them `"max-atomic-width" == 32` -# for now. -no_atomic=$(RUSTC_BOOTSTRAP=1 rustc +stable -Z unstable-options --print all-target-specs-json | jq -r '. | to_entries[] | select((.value."max-atomic-width" == 0) or (.value."min-atomic-width" and .value."min-atomic-width" != 8)) | " \"" + .key + "\","') - -cat >"${file}" <>()` smaller. -- Several minor optimizations. -- Add more tests. - -# Version 0.3.6 - -- Fix a bug in initialization of unbounded channels. - -# Version 0.3.5 - -- New implementation for unbounded channels. -- A number of small performance improvements. -- Remove `crossbeam-epoch` dependency. - -# Version 0.3.4 - -- Bump `crossbeam-epoch` to `0.7`. -- Improve documentation. - -# Version 0.3.3 - -- Relax the lifetime in `SelectedOperation<'_>`. -- Add `Select::try_ready()`, `Select::ready()`, and `Select::ready_timeout()`. -- Update licensing notices. -- Improve documentation. -- Add methods `is_disconnected()`, `is_timeout()`, `is_empty()`, and `is_full()` on error types. - -# Version 0.3.2 - -- More elaborate licensing notices. - -# Version 0.3.1 - -- Update `crossbeam-utils` to `0.6`. - -# Version 0.3.0 - -- Add a special `never` channel type. -- Dropping all receivers now closes the channel. -- The interface of sending and receiving methods is now very similar to those in v0.1. -- The syntax for `send` in `select!` is now `send(sender, msg) -> res => body`. -- The syntax for `recv` in `select!` is now `recv(receiver) -> res => body`. -- New, more efficient interface for `Select` without callbacks. -- Timeouts can be specified in `select!`. - -# Version 0.2.6 - -- `Select` struct that can add cases dynamically. -- More documentation (in particular, the FAQ section). -- Optimize contended sends/receives in unbounded channels. - -# Version 0.2.5 - -- Use `LocalKey::try_with` instead of `LocalKey::with`. -- Remove helper macros `__crossbeam_channel*`. - -# Version 0.2.4 - -- Make `select!` linearizable with other channel operations. -- Update `crossbeam-utils` to `0.5.0`. -- Update `parking_lot` to `0.6.3`. -- Remove Mac OS X tests. - -# Version 0.2.3 - -- Add Mac OS X tests. -- Lower some memory orderings. -- Eliminate calls to `mem::unitialized`, which caused bugs with ZST. - -# Version 0.2.2 - -- Add more tests. -- Update `crossbeam-epoch` to 0.5.0 -- Initialize the RNG seed to a random value. -- Replace `libc::abort` with `std::process::abort`. -- Ignore clippy warnings in `select!`. -- Better interaction of `select!` with the NLL borrow checker. - -# Version 0.2.1 - -- Fix compilation errors when using `select!` with `#[deny(unsafe_code)]`. - -# Version 0.2.0 - -- Implement `IntoIterator` for `Receiver`. -- Add a new `select!` macro. -- Add special channels `after` and `tick`. -- Dropping receivers doesn't close the channel anymore. -- Change the signature of `recv`, `send`, and `try_recv`. -- Remove `Sender::is_closed` and `Receiver::is_closed`. -- Remove `Sender::close` and `Receiver::close`. -- Remove `Sender::send_timeout` and `Receiver::recv_timeout`. -- Remove `Sender::try_send`. -- Remove `Select` and `select_loop!`. -- Remove all error types. -- Remove `Iter`, `TryIter`, and `IntoIter`. -- Remove the `nightly` feature. -- Remove ordering operators for `Sender` and `Receiver`. - -# Version 0.1.3 - -- Add `Sender::disconnect` and `Receiver::disconnect`. -- Implement comparison operators for `Sender` and `Receiver`. -- Allow arbitrary patterns in place of `msg` in `recv(r, msg)`. -- Add a few conversion impls between error types. -- Add benchmarks for `atomicring` and `mpmc`. -- Add benchmarks for different message sizes. -- Documentation improvements. -- Update `crossbeam-epoch` to 0.4.0 -- Update `crossbeam-utils` to 0.3.0 -- Update `parking_lot` to 0.5 -- Update `rand` to 0.4 - -# Version 0.1.2 - -- Allow conditional cases in `select_loop!` macro. -- Fix typos in documentation. -- Fix deadlock in selection when all channels are disconnected and a timeout is specified. - -# Version 0.1.1 - -- Implement `Debug` for `Sender`, `Receiver`, `Iter`, `TryIter`, `IntoIter`, and `Select`. -- Implement `Default` for `Select`. - -# Version 0.1.0 - -- First implementation of the channels. -- Add `select_loop!` macro by @TimNN. diff --git a/crossbeam-channel/Cargo.toml b/crossbeam-channel/Cargo.toml deleted file mode 100644 index d03293956..000000000 --- a/crossbeam-channel/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "crossbeam-channel" -# When publishing a new version: -# - Update CHANGELOG.md -# - Update README.md (when increasing major or minor version) -# - Run './tools/publish.sh crossbeam-channel ' -version = "0.5.14" -edition = "2021" -rust-version = "1.60" -license = "MIT OR Apache-2.0" -repository = "https://github.com/crossbeam-rs/crossbeam" -homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-channel" -description = "Multi-producer multi-consumer channels for message passing" -keywords = ["channel", "mpmc", "select", "golang", "message"] -categories = ["algorithms", "concurrency", "data-structures"] - -[features] -default = ["std"] - -# Enable to use APIs that require `std`. -# This is enabled by default. -# -# NOTE: Disabling `std` feature is not supported yet. -std = ["crossbeam-utils/std"] - -[dependencies] -crossbeam-utils = { version = "0.8.18", path = "../crossbeam-utils", default-features = false, features = ["atomic"] } - -[dev-dependencies] -num_cpus = "1.13.0" -rand = "0.8" -signal-hook = "0.3" - -[lints] -workspace = true diff --git a/crossbeam-channel/LICENSE-APACHE b/crossbeam-channel/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/crossbeam-channel/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/crossbeam-channel/LICENSE-MIT b/crossbeam-channel/LICENSE-MIT deleted file mode 100644 index 068d491fd..000000000 --- a/crossbeam-channel/LICENSE-MIT +++ /dev/null @@ -1,27 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2019 The Crossbeam Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/crossbeam-channel/LICENSE-THIRD-PARTY b/crossbeam-channel/LICENSE-THIRD-PARTY deleted file mode 100644 index ed4df76f4..000000000 --- a/crossbeam-channel/LICENSE-THIRD-PARTY +++ /dev/null @@ -1,593 +0,0 @@ -=============================================================================== - -matching.go -https://creativecommons.org/licenses/by/3.0/legalcode - -Creative Commons Legal Code - -Attribution 3.0 Unported - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR - DAMAGES RESULTING FROM ITS USE. - -License - -THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE -COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY -COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS -AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. - -BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE -TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY -BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS -CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND -CONDITIONS. - -1. Definitions - - a. "Adaptation" means a work based upon the Work, or upon the Work and - other pre-existing works, such as a translation, adaptation, - derivative work, arrangement of music or other alterations of a - literary or artistic work, or phonogram or performance and includes - cinematographic adaptations or any other form in which the Work may be - recast, transformed, or adapted including in any form recognizably - derived from the original, except that a work that constitutes a - Collection will not be considered an Adaptation for the purpose of - this License. For the avoidance of doubt, where the Work is a musical - work, performance or phonogram, the synchronization of the Work in - timed-relation with a moving image ("synching") will be considered an - Adaptation for the purpose of this License. - b. "Collection" means a collection of literary or artistic works, such as - encyclopedias and anthologies, or performances, phonograms or - broadcasts, or other works or subject matter other than works listed - in Section 1(f) below, which, by reason of the selection and - arrangement of their contents, constitute intellectual creations, in - which the Work is included in its entirety in unmodified form along - with one or more other contributions, each constituting separate and - independent works in themselves, which together are assembled into a - collective whole. A work that constitutes a Collection will not be - considered an Adaptation (as defined above) for the purposes of this - License. - c. "Distribute" means to make available to the public the original and - copies of the Work or Adaptation, as appropriate, through sale or - other transfer of ownership. - d. "Licensor" means the individual, individuals, entity or entities that - offer(s) the Work under the terms of this License. - e. "Original Author" means, in the case of a literary or artistic work, - the individual, individuals, entity or entities who created the Work - or if no individual or entity can be identified, the publisher; and in - addition (i) in the case of a performance the actors, singers, - musicians, dancers, and other persons who act, sing, deliver, declaim, - play in, interpret or otherwise perform literary or artistic works or - expressions of folklore; (ii) in the case of a phonogram the producer - being the person or legal entity who first fixes the sounds of a - performance or other sounds; and, (iii) in the case of broadcasts, the - organization that transmits the broadcast. - f. "Work" means the literary and/or artistic work offered under the terms - of this License including without limitation any production in the - literary, scientific and artistic domain, whatever may be the mode or - form of its expression including digital form, such as a book, - pamphlet and other writing; a lecture, address, sermon or other work - of the same nature; a dramatic or dramatico-musical work; a - choreographic work or entertainment in dumb show; a musical - composition with or without words; a cinematographic work to which are - assimilated works expressed by a process analogous to cinematography; - a work of drawing, painting, architecture, sculpture, engraving or - lithography; a photographic work to which are assimilated works - expressed by a process analogous to photography; a work of applied - art; an illustration, map, plan, sketch or three-dimensional work - relative to geography, topography, architecture or science; a - performance; a broadcast; a phonogram; a compilation of data to the - extent it is protected as a copyrightable work; or a work performed by - a variety or circus performer to the extent it is not otherwise - considered a literary or artistic work. - g. "You" means an individual or entity exercising rights under this - License who has not previously violated the terms of this License with - respect to the Work, or who has received express permission from the - Licensor to exercise rights under this License despite a previous - violation. - h. "Publicly Perform" means to perform public recitations of the Work and - to communicate to the public those public recitations, by any means or - process, including by wire or wireless means or public digital - performances; to make available to the public Works in such a way that - members of the public may access these Works from a place and at a - place individually chosen by them; to perform the Work to the public - by any means or process and the communication to the public of the - performances of the Work, including by public digital performance; to - broadcast and rebroadcast the Work by any means including signs, - sounds or images. - i. "Reproduce" means to make copies of the Work by any means including - without limitation by sound or visual recordings and the right of - fixation and reproducing fixations of the Work, including storage of a - protected performance or phonogram in digital form or other electronic - medium. - -2. Fair Dealing Rights. Nothing in this License is intended to reduce, -limit, or restrict any uses free from copyright or rights arising from -limitations or exceptions that are provided for in connection with the -copyright protection under copyright law or other applicable laws. - -3. License Grant. Subject to the terms and conditions of this License, -Licensor hereby grants You a worldwide, royalty-free, non-exclusive, -perpetual (for the duration of the applicable copyright) license to -exercise the rights in the Work as stated below: - - a. to Reproduce the Work, to incorporate the Work into one or more - Collections, and to Reproduce the Work as incorporated in the - Collections; - b. to create and Reproduce Adaptations provided that any such Adaptation, - including any translation in any medium, takes reasonable steps to - clearly label, demarcate or otherwise identify that changes were made - to the original Work. For example, a translation could be marked "The - original work was translated from English to Spanish," or a - modification could indicate "The original work has been modified."; - c. to Distribute and Publicly Perform the Work including as incorporated - in Collections; and, - d. to Distribute and Publicly Perform Adaptations. - e. For the avoidance of doubt: - - i. Non-waivable Compulsory License Schemes. In those jurisdictions in - which the right to collect royalties through any statutory or - compulsory licensing scheme cannot be waived, the Licensor - reserves the exclusive right to collect such royalties for any - exercise by You of the rights granted under this License; - ii. Waivable Compulsory License Schemes. In those jurisdictions in - which the right to collect royalties through any statutory or - compulsory licensing scheme can be waived, the Licensor waives the - exclusive right to collect such royalties for any exercise by You - of the rights granted under this License; and, - iii. Voluntary License Schemes. The Licensor waives the right to - collect royalties, whether individually or, in the event that the - Licensor is a member of a collecting society that administers - voluntary licensing schemes, via that society, from any exercise - by You of the rights granted under this License. - -The above rights may be exercised in all media and formats whether now -known or hereafter devised. The above rights include the right to make -such modifications as are technically necessary to exercise the rights in -other media and formats. Subject to Section 8(f), all rights not expressly -granted by Licensor are hereby reserved. - -4. Restrictions. The license granted in Section 3 above is expressly made -subject to and limited by the following restrictions: - - a. You may Distribute or Publicly Perform the Work only under the terms - of this License. You must include a copy of, or the Uniform Resource - Identifier (URI) for, this License with every copy of the Work You - Distribute or Publicly Perform. You may not offer or impose any terms - on the Work that restrict the terms of this License or the ability of - the recipient of the Work to exercise the rights granted to that - recipient under the terms of the License. You may not sublicense the - Work. You must keep intact all notices that refer to this License and - to the disclaimer of warranties with every copy of the Work You - Distribute or Publicly Perform. When You Distribute or Publicly - Perform the Work, You may not impose any effective technological - measures on the Work that restrict the ability of a recipient of the - Work from You to exercise the rights granted to that recipient under - the terms of the License. This Section 4(a) applies to the Work as - incorporated in a Collection, but this does not require the Collection - apart from the Work itself to be made subject to the terms of this - License. If You create a Collection, upon notice from any Licensor You - must, to the extent practicable, remove from the Collection any credit - as required by Section 4(b), as requested. If You create an - Adaptation, upon notice from any Licensor You must, to the extent - practicable, remove from the Adaptation any credit as required by - Section 4(b), as requested. - b. If You Distribute, or Publicly Perform the Work or any Adaptations or - Collections, You must, unless a request has been made pursuant to - Section 4(a), keep intact all copyright notices for the Work and - provide, reasonable to the medium or means You are utilizing: (i) the - name of the Original Author (or pseudonym, if applicable) if supplied, - and/or if the Original Author and/or Licensor designate another party - or parties (e.g., a sponsor institute, publishing entity, journal) for - attribution ("Attribution Parties") in Licensor's copyright notice, - terms of service or by other reasonable means, the name of such party - or parties; (ii) the title of the Work if supplied; (iii) to the - extent reasonably practicable, the URI, if any, that Licensor - specifies to be associated with the Work, unless such URI does not - refer to the copyright notice or licensing information for the Work; - and (iv) , consistent with Section 3(b), in the case of an Adaptation, - a credit identifying the use of the Work in the Adaptation (e.g., - "French translation of the Work by Original Author," or "Screenplay - based on original Work by Original Author"). The credit required by - this Section 4 (b) may be implemented in any reasonable manner; - provided, however, that in the case of a Adaptation or Collection, at - a minimum such credit will appear, if a credit for all contributing - authors of the Adaptation or Collection appears, then as part of these - credits and in a manner at least as prominent as the credits for the - other contributing authors. For the avoidance of doubt, You may only - use the credit required by this Section for the purpose of attribution - in the manner set out above and, by exercising Your rights under this - License, You may not implicitly or explicitly assert or imply any - connection with, sponsorship or endorsement by the Original Author, - Licensor and/or Attribution Parties, as appropriate, of You or Your - use of the Work, without the separate, express prior written - permission of the Original Author, Licensor and/or Attribution - Parties. - c. Except as otherwise agreed in writing by the Licensor or as may be - otherwise permitted by applicable law, if You Reproduce, Distribute or - Publicly Perform the Work either by itself or as part of any - Adaptations or Collections, You must not distort, mutilate, modify or - take other derogatory action in relation to the Work which would be - prejudicial to the Original Author's honor or reputation. Licensor - agrees that in those jurisdictions (e.g. Japan), in which any exercise - of the right granted in Section 3(b) of this License (the right to - make Adaptations) would be deemed to be a distortion, mutilation, - modification or other derogatory action prejudicial to the Original - Author's honor and reputation, the Licensor will waive or not assert, - as appropriate, this Section, to the fullest extent permitted by the - applicable national law, to enable You to reasonably exercise Your - right under Section 3(b) of this License (right to make Adaptations) - but not otherwise. - -5. Representations, Warranties and Disclaimer - -UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR -OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY -KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, -INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, -FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF -LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, -WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION -OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU. - -6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE -LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR -ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES -ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS -BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -7. Termination - - a. This License and the rights granted hereunder will terminate - automatically upon any breach by You of the terms of this License. - Individuals or entities who have received Adaptations or Collections - from You under this License, however, will not have their licenses - terminated provided such individuals or entities remain in full - compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will - survive any termination of this License. - b. Subject to the above terms and conditions, the license granted here is - perpetual (for the duration of the applicable copyright in the Work). - Notwithstanding the above, Licensor reserves the right to release the - Work under different license terms or to stop distributing the Work at - any time; provided, however that any such election will not serve to - withdraw this License (or any other license that has been, or is - required to be, granted under the terms of this License), and this - License will continue in full force and effect unless terminated as - stated above. - -8. Miscellaneous - - a. Each time You Distribute or Publicly Perform the Work or a Collection, - the Licensor offers to the recipient a license to the Work on the same - terms and conditions as the license granted to You under this License. - b. Each time You Distribute or Publicly Perform an Adaptation, Licensor - offers to the recipient a license to the original Work on the same - terms and conditions as the license granted to You under this License. - c. If any provision of this License is invalid or unenforceable under - applicable law, it shall not affect the validity or enforceability of - the remainder of the terms of this License, and without further action - by the parties to this agreement, such provision shall be reformed to - the minimum extent necessary to make such provision valid and - enforceable. - d. No term or provision of this License shall be deemed waived and no - breach consented to unless such waiver or consent shall be in writing - and signed by the party to be charged with such waiver or consent. - e. This License constitutes the entire agreement between the parties with - respect to the Work licensed here. There are no understandings, - agreements or representations with respect to the Work not specified - here. Licensor shall not be bound by any additional provisions that - may appear in any communication from You. This License may not be - modified without the mutual written agreement of the Licensor and You. - f. The rights granted under, and the subject matter referenced, in this - License were drafted utilizing the terminology of the Berne Convention - for the Protection of Literary and Artistic Works (as amended on - September 28, 1979), the Rome Convention of 1961, the WIPO Copyright - Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 - and the Universal Copyright Convention (as revised on July 24, 1971). - These rights and subject matter take effect in the relevant - jurisdiction in which the License terms are sought to be enforced - according to the corresponding provisions of the implementation of - those treaty provisions in the applicable national law. If the - standard suite of rights granted under applicable copyright law - includes additional rights not granted under this License, such - additional rights are deemed to be included in the License; this - License is not intended to restrict the license of any rights under - applicable law. - - -Creative Commons Notice - - Creative Commons is not a party to this License, and makes no warranty - whatsoever in connection with the Work. Creative Commons will not be - liable to You or any party on any legal theory for any damages - whatsoever, including without limitation any general, special, - incidental or consequential damages arising in connection to this - license. Notwithstanding the foregoing two (2) sentences, if Creative - Commons has expressly identified itself as the Licensor hereunder, it - shall have all rights and obligations of Licensor. - - Except for the limited purpose of indicating to the public that the - Work is licensed under the CCPL, Creative Commons does not authorize - the use by either party of the trademark "Creative Commons" or any - related trademark or logo of Creative Commons without the prior - written consent of Creative Commons. Any permitted use will be in - compliance with Creative Commons' then-current trademark usage - guidelines, as may be published on its website or otherwise made - available upon request from time to time. For the avoidance of doubt, - this trademark restriction does not form part of this License. - - Creative Commons may be contacted at https://creativecommons.org/. - -=============================================================================== - -The Go Programming Language -https://golang.org/LICENSE - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -=============================================================================== - -The Rust Programming Language -https://github.com/rust-lang/rust/blob/master/LICENSE-MIT - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - -=============================================================================== - -The Rust Programming Language -https://github.com/rust-lang/rust/blob/master/LICENSE-APACHE - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/crossbeam-channel/README.md b/crossbeam-channel/README.md deleted file mode 100644 index 4904d1ab5..000000000 --- a/crossbeam-channel/README.md +++ /dev/null @@ -1,84 +0,0 @@ -# Crossbeam Channel - -[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)]( -https://github.com/crossbeam-rs/crossbeam/actions) -[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)]( -https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-channel#license) -[![Cargo](https://img.shields.io/crates/v/crossbeam-channel.svg)]( -https://crates.io/crates/crossbeam-channel) -[![Documentation](https://docs.rs/crossbeam-channel/badge.svg)]( -https://docs.rs/crossbeam-channel) -[![Rust 1.60+](https://img.shields.io/badge/rust-1.60+-lightgray.svg)]( -https://www.rust-lang.org) -[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ) - -This crate provides multi-producer multi-consumer channels for message passing. -It is an alternative to [`std::sync::mpsc`] with more features and better performance. - -Some highlights: - -* [`Sender`]s and [`Receiver`]s can be cloned and shared among threads. -* Two main kinds of channels are [`bounded`] and [`unbounded`]. -* Convenient extra channels like [`after`], [`never`], and [`tick`]. -* The [`select!`] macro can block on multiple channel operations. -* [`Select`] can select over a dynamically built list of channel operations. -* Channels use locks very sparingly for maximum [performance](benchmarks). - -[`std::sync::mpsc`]: https://doc.rust-lang.org/std/sync/mpsc/index.html -[`Sender`]: https://docs.rs/crossbeam-channel/latest/crossbeam_channel/struct.Sender.html -[`Receiver`]: https://docs.rs/crossbeam-channel/latest/crossbeam_channel/struct.Receiver.html -[`bounded`]: https://docs.rs/crossbeam-channel/latest/crossbeam_channel/fn.bounded.html -[`unbounded`]: https://docs.rs/crossbeam-channel/latest/crossbeam_channel/fn.unbounded.html -[`after`]: https://docs.rs/crossbeam-channel/latest/crossbeam_channel/fn.after.html -[`never`]: https://docs.rs/crossbeam-channel/latest/crossbeam_channel/fn.never.html -[`tick`]: https://docs.rs/crossbeam-channel/latest/crossbeam_channel/fn.tick.html -[`select!`]: https://docs.rs/crossbeam-channel/latest/crossbeam_channel/macro.select.html -[`Select`]: https://docs.rs/crossbeam-channel/latest/crossbeam_channel/struct.Select.html - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -crossbeam-channel = "0.5" -``` - -## Compatibility - -Crossbeam Channel supports stable Rust releases going back at least six months, -and every time the minimum supported Rust version is increased, a new minor -version is released. Currently, the minimum supported Rust version is 1.60. - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. - -#### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. - -#### Third party software - -This product includes copies and modifications of software developed by third parties: - -* [examples/matching.rs](examples/matching.rs) includes - [matching.go](http://www.nada.kth.se/~snilsson/concurrency/src/matching.go) by Stefan Nilsson, - licensed under Creative Commons Attribution 3.0 Unported License. - -* [tests/mpsc.rs](tests/mpsc.rs) includes modifications of code from The Rust Programming Language, - licensed under the MIT License and the Apache License, Version 2.0. - -* [tests/golang.rs](tests/golang.rs) is based on code from The Go Programming Language, licensed - under the 3-Clause BSD License. - -See the source code files for more details. - -Copies of third party licenses can be found in [LICENSE-THIRD-PARTY](LICENSE-THIRD-PARTY). diff --git a/crossbeam-channel/benches/crossbeam.rs b/crossbeam-channel/benches/crossbeam.rs deleted file mode 100644 index 1c0522294..000000000 --- a/crossbeam-channel/benches/crossbeam.rs +++ /dev/null @@ -1,712 +0,0 @@ -#![feature(test)] - -extern crate test; - -use crossbeam_channel::{bounded, unbounded}; -use crossbeam_utils::thread::scope; -use test::Bencher; - -const TOTAL_STEPS: usize = 40_000; - -mod unbounded { - use super::*; - - #[bench] - fn create(b: &mut Bencher) { - b.iter(unbounded::); - } - - #[bench] - fn oneshot(b: &mut Bencher) { - b.iter(|| { - let (s, r) = unbounded::(); - s.send(0).unwrap(); - r.recv().unwrap(); - }); - } - - #[bench] - fn inout(b: &mut Bencher) { - let (s, r) = unbounded::(); - b.iter(|| { - s.send(0).unwrap(); - r.recv().unwrap(); - }); - } - - #[bench] - fn par_inout(b: &mut Bencher) { - let threads = num_cpus::get(); - let steps = TOTAL_STEPS / threads; - let (s, r) = unbounded::(); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn spsc(b: &mut Bencher) { - let steps = TOTAL_STEPS; - let (s, r) = unbounded::(); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - - b.iter(|| { - s1.send(()).unwrap(); - for _ in 0..steps { - r.recv().unwrap(); - } - r2.recv().unwrap(); - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn spmc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = unbounded::(); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for i in 0..steps * threads { - s.send(i as i32).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpsc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = unbounded::(); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..steps * threads { - r.recv().unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpmc(b: &mut Bencher) { - let threads = num_cpus::get(); - let steps = TOTAL_STEPS / threads; - let (s, r) = unbounded::(); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } -} - -mod bounded_n { - use super::*; - - #[bench] - fn spsc(b: &mut Bencher) { - let steps = TOTAL_STEPS; - let (s, r) = bounded::(steps); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - - b.iter(|| { - s1.send(()).unwrap(); - for _ in 0..steps { - r.recv().unwrap(); - } - r2.recv().unwrap(); - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn spmc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(steps * threads); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for i in 0..steps * threads { - s.send(i as i32).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpsc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(steps * threads); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..steps * threads { - r.recv().unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn par_inout(b: &mut Bencher) { - let threads = num_cpus::get(); - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(threads); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpmc(b: &mut Bencher) { - let threads = num_cpus::get(); - assert_eq!(threads % 2, 0); - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(steps * threads); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } -} - -mod bounded_1 { - use super::*; - - #[bench] - fn create(b: &mut Bencher) { - b.iter(|| bounded::(1)); - } - - #[bench] - fn oneshot(b: &mut Bencher) { - b.iter(|| { - let (s, r) = bounded::(1); - s.send(0).unwrap(); - r.recv().unwrap(); - }); - } - - #[bench] - fn spsc(b: &mut Bencher) { - let steps = TOTAL_STEPS; - let (s, r) = bounded::(1); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - - b.iter(|| { - s1.send(()).unwrap(); - for _ in 0..steps { - r.recv().unwrap(); - } - r2.recv().unwrap(); - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn spmc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(1); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for i in 0..steps * threads { - s.send(i as i32).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpsc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(1); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..steps * threads { - r.recv().unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpmc(b: &mut Bencher) { - let threads = num_cpus::get(); - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(1); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } -} - -mod bounded_0 { - use super::*; - - #[bench] - fn create(b: &mut Bencher) { - b.iter(|| bounded::(0)); - } - - #[bench] - fn spsc(b: &mut Bencher) { - let steps = TOTAL_STEPS; - let (s, r) = bounded::(0); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - - b.iter(|| { - s1.send(()).unwrap(); - for _ in 0..steps { - r.recv().unwrap(); - } - r2.recv().unwrap(); - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn spmc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(0); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for i in 0..steps * threads { - s.send(i as i32).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpsc(b: &mut Bencher) { - let threads = num_cpus::get() - 1; - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(0); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..steps * threads { - r.recv().unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } - - #[bench] - fn mpmc(b: &mut Bencher) { - let threads = num_cpus::get(); - let steps = TOTAL_STEPS / threads; - let (s, r) = bounded::(0); - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - scope(|scope| { - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for i in 0..steps { - s.send(i as i32).unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - for _ in 0..threads / 2 { - scope.spawn(|_| { - while r1.recv().is_ok() { - for _ in 0..steps { - r.recv().unwrap(); - } - s2.send(()).unwrap(); - } - }); - } - - b.iter(|| { - for _ in 0..threads { - s1.send(()).unwrap(); - } - for _ in 0..threads { - r2.recv().unwrap(); - } - }); - drop(s1); - }) - .unwrap(); - } -} diff --git a/crossbeam-channel/benchmarks/Cargo.toml b/crossbeam-channel/benchmarks/Cargo.toml deleted file mode 100644 index 7b334b78c..000000000 --- a/crossbeam-channel/benchmarks/Cargo.toml +++ /dev/null @@ -1,74 +0,0 @@ -[package] -name = "benchmarks" -version = "0.0.0" -edition = "2021" -publish = false - -[dependencies] -atomicring = "1.2.9" -bus = "2.3.0" -crossbeam = { path = "../.." } -crossbeam-channel = { path = ".." } -crossbeam-deque = { path = "../../crossbeam-deque" } -flume = "0.11" -futures = { version = "0.3", features = ["thread-pool"] } -lockfree = "0.5.1" -mpmc = "0.1.6" - -[[bin]] -name = "atomicring" -path = "atomicring.rs" -doc = false - -[[bin]] -name = "atomicringqueue" -path = "atomicringqueue.rs" -doc = false - -[[bin]] -name = "bus" -path = "bus.rs" -doc = false - -[[bin]] -name = "crossbeam-channel" -path = "crossbeam-channel.rs" -doc = false - -[[bin]] -name = "crossbeam-deque" -path = "crossbeam-deque.rs" -doc = false - -[[bin]] -name = "flume" -path = "flume.rs" -doc = false - -[[bin]] -name = "futures-channel" -path = "futures-channel.rs" -doc = false - -[[bin]] -name = "lockfree" -path = "lockfree.rs" -doc = false - -[[bin]] -name = "mpsc" -path = "mpsc.rs" -doc = false - -[[bin]] -name = "segqueue" -path = "segqueue.rs" -doc = false - -[[bin]] -name = "mpmc" -path = "mpmc.rs" -doc = false - -[lints] -workspace = true diff --git a/crossbeam-channel/benchmarks/README.md b/crossbeam-channel/benchmarks/README.md deleted file mode 100644 index c558fab62..000000000 --- a/crossbeam-channel/benchmarks/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# Benchmarks - -### Tests - -* `seq`: A single thread sends `N` messages. Then it receives `N` messages. -* `spsc`: One thread sends `N` messages. Another thread receives `N` messages. -* `mpsc`: `T` threads send `N / T` messages each. One thread receives `N` messages. -* `mpmc`: `T` threads send `N / T` messages each. `T` other threads receive `N / T` messages each. -* `select_rx`: `T` threads send `N / T` messages each into a separate channel. Another thread receives `N` messages by selecting over the `T` channels. -* `select_both`: `T` threads send `N / T` messages each by selecting over `T` channels. `T` other threads receive `N / T` messages each by selecting over the `T` channels. - -Default configuration: - -- `N = 5000000` -- `T = 4` - -### Running - -Runs benchmarks, stores results into `*.txt` files, and generates `plot.png`: - -``` -./run.sh -``` - -Dependencies: - -- Rust -- Go -- Bash -- Python -- Matplotlib - -### Results - -Machine: Intel(R) Core(TM) i7-5500U (2 physical cores, 4 logical cores) - -Rust: `rustc 1.63.0 (4b91a6ea7 2022-08-08)` - -Go: `go version go1.19 linux/amd64` - -Commit: `7070018` (2022-08-24) - -![Benchmark results](plot.png) diff --git a/crossbeam-channel/benchmarks/atomicring.rs b/crossbeam-channel/benchmarks/atomicring.rs deleted file mode 100644 index 78cd6fcc8..000000000 --- a/crossbeam-channel/benchmarks/atomicring.rs +++ /dev/null @@ -1,140 +0,0 @@ -use atomicring::AtomicRingBuffer; -use std::thread; - -mod message; - -const MESSAGES: usize = 5_000_000; -const THREADS: usize = 4; - -fn seq(cap: usize) { - let q = AtomicRingBuffer::with_capacity(cap); - - for i in 0..MESSAGES { - loop { - if q.try_push(message::new(i)).is_ok() { - break; - } else { - thread::yield_now(); - } - } - } - - for _ in 0..MESSAGES { - q.try_pop().unwrap(); - } -} - -fn spsc(cap: usize) { - let q = AtomicRingBuffer::with_capacity(cap); - - crossbeam::scope(|scope| { - scope.spawn(|_| { - for i in 0..MESSAGES { - loop { - if q.try_push(message::new(i)).is_ok() { - break; - } else { - thread::yield_now(); - } - } - } - }); - - for _ in 0..MESSAGES { - loop { - if q.try_pop().is_none() { - thread::yield_now(); - } else { - break; - } - } - } - }) - .unwrap(); -} - -fn mpsc(cap: usize) { - let q = AtomicRingBuffer::with_capacity(cap); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..MESSAGES / THREADS { - loop { - if q.try_push(message::new(i)).is_ok() { - break; - } else { - thread::yield_now(); - } - } - } - }); - } - - for _ in 0..MESSAGES { - loop { - if q.try_pop().is_none() { - thread::yield_now(); - } else { - break; - } - } - } - }) - .unwrap(); -} - -fn mpmc(cap: usize) { - let q = AtomicRingBuffer::with_capacity(cap); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..MESSAGES / THREADS { - loop { - if q.try_push(message::new(i)).is_ok() { - break; - } else { - thread::yield_now(); - } - } - } - }); - } - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..MESSAGES / THREADS { - loop { - if q.try_pop().is_none() { - thread::yield_now(); - } else { - break; - } - } - } - }); - } - }) - .unwrap(); -} - -fn main() { - macro_rules! run { - ($name:expr, $f:expr) => { - let now = ::std::time::Instant::now(); - $f; - let elapsed = now.elapsed(); - println!( - "{:25} {:15} {:7.3} sec", - $name, - "Rust atomicring", - elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 - ); - }; - } - - run!("bounded_mpmc", mpmc(MESSAGES)); - run!("bounded_mpsc", mpsc(MESSAGES)); - run!("bounded_seq", seq(MESSAGES)); - run!("bounded_spsc", spsc(MESSAGES)); -} diff --git a/crossbeam-channel/benchmarks/atomicringqueue.rs b/crossbeam-channel/benchmarks/atomicringqueue.rs deleted file mode 100644 index f0b97727a..000000000 --- a/crossbeam-channel/benchmarks/atomicringqueue.rs +++ /dev/null @@ -1,123 +0,0 @@ -use atomicring::AtomicRingQueue; -use std::thread; - -mod message; - -const MESSAGES: usize = 5_000_000; -const THREADS: usize = 4; - -fn seq(cap: usize) { - let q = AtomicRingQueue::with_capacity(cap); - - for i in 0..MESSAGES { - loop { - if q.try_push(message::new(i)).is_ok() { - break; - } else { - thread::yield_now(); - } - } - } - - for _ in 0..MESSAGES { - q.pop(); - } -} - -fn spsc(cap: usize) { - let q = AtomicRingQueue::with_capacity(cap); - - crossbeam::scope(|scope| { - scope.spawn(|_| { - for i in 0..MESSAGES { - loop { - if q.try_push(message::new(i)).is_ok() { - break; - } else { - thread::yield_now(); - } - } - } - }); - - for _ in 0..MESSAGES { - q.pop(); - } - }) - .unwrap(); -} - -fn mpsc(cap: usize) { - let q = AtomicRingQueue::with_capacity(cap); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..MESSAGES / THREADS { - loop { - if q.try_push(message::new(i)).is_ok() { - break; - } else { - thread::yield_now(); - } - } - } - }); - } - - for _ in 0..MESSAGES { - q.pop(); - } - }) - .unwrap(); -} - -fn mpmc(cap: usize) { - let q = AtomicRingQueue::with_capacity(cap); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..MESSAGES / THREADS { - loop { - if q.try_push(message::new(i)).is_ok() { - break; - } else { - thread::yield_now(); - } - } - } - }); - } - - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..MESSAGES / THREADS { - q.pop(); - } - }); - } - }) - .unwrap(); -} - -fn main() { - macro_rules! run { - ($name:expr, $f:expr) => { - let now = ::std::time::Instant::now(); - $f; - let elapsed = now.elapsed(); - println!( - "{:25} {:15} {:7.3} sec", - $name, - "Rust atomicringqueue", - elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 - ); - }; - } - - run!("bounded_mpmc", mpmc(MESSAGES)); - run!("bounded_mpsc", mpsc(MESSAGES)); - run!("bounded_seq", seq(MESSAGES)); - run!("bounded_spsc", spsc(MESSAGES)); -} diff --git a/crossbeam-channel/benchmarks/bus.rs b/crossbeam-channel/benchmarks/bus.rs deleted file mode 100644 index 754f32db8..000000000 --- a/crossbeam-channel/benchmarks/bus.rs +++ /dev/null @@ -1,57 +0,0 @@ -use bus::Bus; - -mod message; - -const MESSAGES: usize = 5_000_000; - -fn seq(cap: usize) { - let mut tx = Bus::new(cap); - let mut rx = tx.add_rx(); - - for i in 0..MESSAGES { - tx.broadcast(message::new(i)); - } - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } -} - -fn spsc(cap: usize) { - let mut tx = Bus::new(cap); - let mut rx = tx.add_rx(); - - crossbeam::scope(|scope| { - scope.spawn(|_| { - for i in 0..MESSAGES { - tx.broadcast(message::new(i)); - } - }); - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } - }) - .unwrap(); -} - -fn main() { - macro_rules! run { - ($name:expr, $f:expr) => { - let now = ::std::time::Instant::now(); - $f; - let elapsed = now.elapsed(); - println!( - "{:25} {:15} {:7.3} sec", - $name, - "Rust bus", - elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 - ); - }; - } - - run!("bounded1_spsc", spsc(1)); - - run!("bounded_seq", seq(MESSAGES)); - run!("bounded_spsc", spsc(MESSAGES)); -} diff --git a/crossbeam-channel/benchmarks/crossbeam-channel.rs b/crossbeam-channel/benchmarks/crossbeam-channel.rs deleted file mode 100644 index 506e1b230..000000000 --- a/crossbeam-channel/benchmarks/crossbeam-channel.rs +++ /dev/null @@ -1,187 +0,0 @@ -use crossbeam_channel::{bounded, unbounded, Receiver, Select, Sender}; - -mod message; - -const MESSAGES: usize = 5_000_000; -const THREADS: usize = 4; - -fn new(cap: Option) -> (Sender, Receiver) { - match cap { - None => unbounded(), - Some(cap) => bounded(cap), - } -} - -fn seq(cap: Option) { - let (tx, rx) = new(cap); - - for i in 0..MESSAGES { - tx.send(message::new(i)).unwrap(); - } - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } -} - -fn spsc(cap: Option) { - let (tx, rx) = new(cap); - - crossbeam::scope(|scope| { - scope.spawn(|_| { - for i in 0..MESSAGES { - tx.send(message::new(i)).unwrap(); - } - }); - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } - }) - .unwrap(); -} - -fn mpsc(cap: Option) { - let (tx, rx) = new(cap); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..MESSAGES / THREADS { - tx.send(message::new(i)).unwrap(); - } - }); - } - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } - }) - .unwrap(); -} - -fn mpmc(cap: Option) { - let (tx, rx) = new(cap); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..MESSAGES / THREADS { - tx.send(message::new(i)).unwrap(); - } - }); - } - - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..MESSAGES / THREADS { - rx.recv().unwrap(); - } - }); - } - }) - .unwrap(); -} - -fn select_rx(cap: Option) { - let chans = (0..THREADS).map(|_| new(cap)).collect::>(); - - crossbeam::scope(|scope| { - for (tx, _) in &chans { - let tx = tx.clone(); - scope.spawn(move |_| { - for i in 0..MESSAGES / THREADS { - tx.send(message::new(i)).unwrap(); - } - }); - } - - for _ in 0..MESSAGES { - let mut sel = Select::new(); - for (_, rx) in &chans { - sel.recv(rx); - } - let case = sel.select(); - let index = case.index(); - case.recv(&chans[index].1).unwrap(); - } - }) - .unwrap(); -} - -fn select_both(cap: Option) { - let chans = (0..THREADS).map(|_| new(cap)).collect::>(); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..MESSAGES / THREADS { - let mut sel = Select::new(); - for (tx, _) in &chans { - sel.send(tx); - } - let case = sel.select(); - let index = case.index(); - case.send(&chans[index].0, message::new(i)).unwrap(); - } - }); - } - - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..MESSAGES / THREADS { - let mut sel = Select::new(); - for (_, rx) in &chans { - sel.recv(rx); - } - let case = sel.select(); - let index = case.index(); - case.recv(&chans[index].1).unwrap(); - } - }); - } - }) - .unwrap(); -} - -fn main() { - macro_rules! run { - ($name:expr, $f:expr) => { - let now = ::std::time::Instant::now(); - $f; - let elapsed = now.elapsed(); - println!( - "{:25} {:15} {:7.3} sec", - $name, - "Rust crossbeam-channel", - elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 - ); - }; - } - - run!("bounded0_mpmc", mpmc(Some(0))); - run!("bounded0_mpsc", mpsc(Some(0))); - run!("bounded0_select_both", select_both(Some(0))); - run!("bounded0_select_rx", select_rx(Some(0))); - run!("bounded0_spsc", spsc(Some(0))); - - run!("bounded1_mpmc", mpmc(Some(1))); - run!("bounded1_mpsc", mpsc(Some(1))); - run!("bounded1_select_both", select_both(Some(1))); - run!("bounded1_select_rx", select_rx(Some(1))); - run!("bounded1_spsc", spsc(Some(1))); - - run!("bounded_mpmc", mpmc(Some(MESSAGES))); - run!("bounded_mpsc", mpsc(Some(MESSAGES))); - run!("bounded_select_both", select_both(Some(MESSAGES))); - run!("bounded_select_rx", select_rx(Some(MESSAGES))); - run!("bounded_seq", seq(Some(MESSAGES))); - run!("bounded_spsc", spsc(Some(MESSAGES))); - - run!("unbounded_mpmc", mpmc(None)); - run!("unbounded_mpsc", mpsc(None)); - run!("unbounded_select_both", select_both(None)); - run!("unbounded_select_rx", select_rx(None)); - run!("unbounded_seq", seq(None)); - run!("unbounded_spsc", spsc(None)); -} diff --git a/crossbeam-channel/benchmarks/crossbeam-deque.rs b/crossbeam-channel/benchmarks/crossbeam-deque.rs deleted file mode 100644 index 935c1fb77..000000000 --- a/crossbeam-channel/benchmarks/crossbeam-deque.rs +++ /dev/null @@ -1,67 +0,0 @@ -use crossbeam_deque::{Steal, Worker}; -use std::thread; - -mod message; - -const MESSAGES: usize = 5_000_000; - -fn seq() { - let tx = Worker::new_lifo(); - let rx = tx.stealer(); - - for i in 0..MESSAGES { - tx.push(message::new(i)); - } - - for _ in 0..MESSAGES { - match rx.steal() { - Steal::Success(_) => {} - Steal::Retry => panic!(), - Steal::Empty => panic!(), - } - } -} - -fn spsc() { - let tx = Worker::new_lifo(); - let rx = tx.stealer(); - - crossbeam::scope(|scope| { - scope.spawn(move |_| { - for i in 0..MESSAGES { - tx.push(message::new(i)); - } - }); - - scope.spawn(move |_| { - for _ in 0..MESSAGES { - loop { - match rx.steal() { - Steal::Success(_) => break, - Steal::Retry | Steal::Empty => thread::yield_now(), - } - } - } - }); - }) - .unwrap(); -} - -fn main() { - macro_rules! run { - ($name:expr, $f:expr) => { - let now = ::std::time::Instant::now(); - $f; - let elapsed = now.elapsed(); - println!( - "{:25} {:15} {:7.3} sec", - $name, - "Rust crossbeam-deque", - elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 - ); - }; - } - - run!("unbounded_seq", seq()); - run!("unbounded_spsc", spsc()); -} diff --git a/crossbeam-channel/benchmarks/flume.rs b/crossbeam-channel/benchmarks/flume.rs deleted file mode 100644 index 507942a17..000000000 --- a/crossbeam-channel/benchmarks/flume.rs +++ /dev/null @@ -1,167 +0,0 @@ -mod message; - -const MESSAGES: usize = 5_000_000; -const THREADS: usize = 4; - -pub fn shuffle(v: &mut [T]) { - use std::cell::Cell; - use std::num::Wrapping; - - let len = v.len(); - if len <= 1 { - return; - } - - thread_local! { - static RNG: Cell> = const { Cell::new(Wrapping(1)) }; - } - - RNG.with(|rng| { - for i in 1..len { - // This is the 32-bit variant of Xorshift. - // https://en.wikipedia.org/wiki/Xorshift - let mut x = rng.get(); - x ^= x << 13; - x ^= x >> 17; - x ^= x << 5; - rng.set(x); - - let x = x.0; - let n = i + 1; - - // This is a fast alternative to `let j = x % n`. - // https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ - let j = ((x as u64).wrapping_mul(n as u64) >> 32) as u32 as usize; - - v.swap(i, j); - } - }); -} - -fn seq_unbounded() { - let (tx, rx) = flume::unbounded(); - - for i in 0..MESSAGES { - tx.send(message::new(i)).unwrap(); - } - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } -} - -fn seq_bounded(cap: usize) { - let (tx, rx) = flume::bounded(cap); - - for i in 0..MESSAGES { - tx.send(message::new(i)).unwrap(); - } - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } -} - -fn spsc_unbounded() { - let (tx, rx) = flume::unbounded(); - - crossbeam::scope(|scope| { - scope.spawn(move |_| { - for i in 0..MESSAGES { - tx.send(message::new(i)).unwrap(); - } - }); - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } - }) - .unwrap(); -} - -fn spsc_bounded(cap: usize) { - let (tx, rx) = flume::bounded(cap); - - crossbeam::scope(|scope| { - scope.spawn(move |_| { - for i in 0..MESSAGES { - tx.send(message::new(i)).unwrap(); - } - }); - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } - }) - .unwrap(); -} - -fn mpsc_unbounded() { - let (tx, rx) = flume::unbounded(); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - let tx = tx.clone(); - scope.spawn(move |_| { - for i in 0..MESSAGES / THREADS { - tx.send(message::new(i)).unwrap(); - } - }); - } - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } - }) - .unwrap(); -} - -fn mpsc_bounded(cap: usize) { - let (tx, rx) = flume::bounded(cap); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - let tx = tx.clone(); - scope.spawn(move |_| { - for i in 0..MESSAGES / THREADS { - tx.send(message::new(i)).unwrap(); - } - }); - } - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } - }) - .unwrap(); -} - -fn main() { - macro_rules! run { - ($name:expr, $f:expr) => { - let now = ::std::time::Instant::now(); - $f; - let elapsed = now.elapsed(); - println!( - "{:25} {:15} {:7.3} sec", - $name, - "Rust flume", - elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 - ); - }; - } - - // run!("bounded0_mpsc", mpsc_bounded(0)); - // run!("bounded0_spsc", spsc_bounded(0)); - - run!("bounded1_mpsc", mpsc_bounded(1)); - run!("bounded1_spsc", spsc_bounded(1)); - - run!("bounded_mpsc", mpsc_bounded(MESSAGES)); - run!("bounded_seq", seq_bounded(MESSAGES)); - run!("bounded_spsc", spsc_bounded(MESSAGES)); - - run!("unbounded_mpsc", mpsc_unbounded()); - run!("unbounded_seq", seq_unbounded()); - run!("unbounded_spsc", spsc_unbounded()); -} diff --git a/crossbeam-channel/benchmarks/futures-channel.rs b/crossbeam-channel/benchmarks/futures-channel.rs deleted file mode 100644 index 067c9b99a..000000000 --- a/crossbeam-channel/benchmarks/futures-channel.rs +++ /dev/null @@ -1,180 +0,0 @@ -use futures::channel::mpsc; -use futures::executor::{block_on, ThreadPool}; -use futures::{future, stream, SinkExt, StreamExt}; - -mod message; - -const MESSAGES: usize = 5_000_000; -const THREADS: usize = 4; - -fn seq_unbounded() { - block_on(async { - let (tx, rx) = mpsc::unbounded(); - for i in 0..MESSAGES { - tx.unbounded_send(message::new(i)).unwrap(); - } - drop(tx); - - rx.for_each(|_| future::ready(())).await - }); -} - -fn seq_bounded(cap: usize) { - let (mut tx, rx) = mpsc::channel(cap); - block_on(async { - for i in 0..MESSAGES { - tx.try_send(message::new(i)).unwrap(); - } - drop(tx); - - rx.for_each(|_| future::ready(())).await - }); -} - -fn spsc_unbounded() { - let pool = ThreadPool::new().unwrap(); - block_on(async { - let (mut tx, rx) = mpsc::unbounded(); - - pool.spawn_ok(async move { - tx.send_all(&mut stream::iter((0..MESSAGES).map(message::new).map(Ok))) - .await - .unwrap() - }); - - rx.for_each(|_| future::ready(())).await - }); -} - -fn spsc_bounded(cap: usize) { - let pool = ThreadPool::new().unwrap(); - block_on(async { - let (mut tx, rx) = mpsc::channel(cap); - - pool.spawn_ok(async move { - tx.send_all(&mut stream::iter((0..MESSAGES).map(message::new).map(Ok))) - .await - .unwrap() - }); - - rx.for_each(|_| future::ready(())).await - }); -} - -fn mpsc_unbounded() { - let pool = ThreadPool::new().unwrap(); - block_on(async { - let (tx, rx) = mpsc::unbounded(); - - for _ in 0..THREADS { - let mut tx = tx.clone(); - pool.spawn_ok(async move { - tx.send_all(&mut stream::iter( - (0..MESSAGES / THREADS).map(message::new).map(Ok), - )) - .await - .unwrap() - }); - } - drop(tx); - - rx.for_each(|_| future::ready(())).await - }); -} - -fn mpsc_bounded(cap: usize) { - let pool = ThreadPool::new().unwrap(); - block_on(async { - let (tx, rx) = mpsc::channel(cap); - - for _ in 0..THREADS { - let mut tx = tx.clone(); - pool.spawn_ok(async move { - tx.send_all(&mut stream::iter( - (0..MESSAGES / THREADS).map(message::new).map(Ok), - )) - .await - .unwrap() - }); - } - drop(tx); - - rx.for_each(|_| future::ready(())).await - }); -} - -fn select_rx_unbounded() { - let pool = ThreadPool::new().unwrap(); - block_on(async { - let chans = (0..THREADS).map(|_| mpsc::unbounded()).collect::>(); - - for (tx, _) in &chans { - let tx = tx.clone(); - pool.spawn_ok(async move { - for i in 0..MESSAGES / THREADS { - tx.unbounded_send(message::new(i)).unwrap(); - } - }); - } - - stream::select_all(chans.into_iter().map(|(_, rx)| rx)) - .for_each(|_| future::ready(())) - .await - }); -} - -fn select_rx_bounded(cap: usize) { - let pool = ThreadPool::new().unwrap(); - block_on(async { - let chans = (0..THREADS).map(|_| mpsc::channel(cap)).collect::>(); - - for (tx, _) in &chans { - let mut tx = tx.clone(); - pool.spawn_ok(async move { - tx.send_all(&mut stream::iter( - (0..MESSAGES / THREADS).map(message::new).map(Ok), - )) - .await - .unwrap() - }); - } - - stream::select_all(chans.into_iter().map(|(_, rx)| rx)) - .for_each(|_| future::ready(())) - .await - }); -} - -fn main() { - macro_rules! run { - ($name:expr, $f:expr) => { - let now = ::std::time::Instant::now(); - $f; - let elapsed = now.elapsed(); - println!( - "{:25} {:15} {:7.3} sec", - $name, - "Rust futures-channel", - elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 - ); - }; - } - - run!("bounded0_mpsc", mpsc_bounded(0)); - run!("bounded0_select_rx", select_rx_bounded(0)); - run!("bounded0_spsc", spsc_bounded(0)); - - run!("bounded1_mpsc", mpsc_bounded(1)); - run!("bounded1_select_rx", select_rx_bounded(1)); - run!("bounded1_spsc", spsc_bounded(1)); - - run!("bounded_mpsc", mpsc_bounded(MESSAGES)); - run!("bounded_select_rx", select_rx_bounded(MESSAGES)); - run!("bounded_seq", seq_bounded(MESSAGES)); - run!("bounded_spsc", spsc_bounded(MESSAGES)); - - run!("unbounded_mpsc", mpsc_unbounded()); - run!("unbounded_select_rx", select_rx_unbounded()); - run!("unbounded_seq", seq_unbounded()); - run!("unbounded_spsc", spsc_unbounded()); -} diff --git a/crossbeam-channel/benchmarks/go.go b/crossbeam-channel/benchmarks/go.go deleted file mode 100644 index 2d65bcbe1..000000000 --- a/crossbeam-channel/benchmarks/go.go +++ /dev/null @@ -1,201 +0,0 @@ -package main - -import "fmt" -import "time" - -const MESSAGES = 5 * 1000 * 1000 -const THREADS = 4 - -type Message = uintptr - -func seq(cap int) { - var c = make(chan Message, cap) - - for i := 0; i < MESSAGES; i++ { - c <- Message(i) - } - - for i := 0; i < MESSAGES; i++ { - <-c - } -} - -func spsc(cap int) { - var c = make(chan Message, cap) - var done = make(chan bool) - - go func() { - for i := 0; i < MESSAGES; i++ { - c <- Message(i) - } - done <- true - }() - - for i := 0; i < MESSAGES; i++ { - <-c - } - - <-done -} - -func mpsc(cap int) { - var c = make(chan Message, cap) - var done = make(chan bool) - - for t := 0; t < THREADS; t++ { - go func() { - for i := 0; i < MESSAGES / THREADS; i++ { - c <- Message(i) - } - done <- true - }() - } - - for i := 0; i < MESSAGES; i++ { - <-c - } - - for t := 0; t < THREADS; t++ { - <-done - } -} - -func mpmc(cap int) { - var c = make(chan Message, cap) - var done = make(chan bool) - - for t := 0; t < THREADS; t++ { - go func() { - for i := 0; i < MESSAGES / THREADS; i++ { - c <- Message(i) - } - done <- true - }() - - } - - for t := 0; t < THREADS; t++ { - go func() { - for i := 0; i < MESSAGES / THREADS; i++ { - <-c - } - done <- true - }() - } - - for t := 0; t < THREADS; t++ { - <-done - <-done - } -} - -func select_rx(cap int) { - if THREADS != 4 { - panic("assumed there are 4 threads") - } - - var c0 = make(chan Message, cap) - var c1 = make(chan Message, cap) - var c2 = make(chan Message, cap) - var c3 = make(chan Message, cap) - var done = make(chan bool) - - var producer = func(c chan Message) { - for i := 0; i < MESSAGES / THREADS; i++ { - c <- Message(i) - } - done <- true - } - go producer(c0) - go producer(c1) - go producer(c2) - go producer(c3) - - for i := 0; i < MESSAGES; i++ { - select { - case <-c0: - case <-c1: - case <-c2: - case <-c3: - } - } - - for t := 0; t < THREADS; t++ { - <-done - } -} - -func select_both(cap int) { - if THREADS != 4 { - panic("assumed there are 4 threads") - } - - var c0 = make(chan Message, cap) - var c1 = make(chan Message, cap) - var c2 = make(chan Message, cap) - var c3 = make(chan Message, cap) - var done = make(chan bool) - - var producer = func(c0 chan Message, c1 chan Message, c2 chan Message, c3 chan Message) { - for i := 0; i < MESSAGES / THREADS; i++ { - select { - case c0 <- Message(i): - case c1 <- Message(i): - case c2 <- Message(i): - case c3 <- Message(i): - } - } - done <- true - } - go producer(c0,c1,c2,c3) - go producer(c0,c1,c2,c3) - go producer(c0,c1,c2,c3) - go producer(c0,c1,c2,c3) - - for t := 0; t < THREADS; t++ { - go func() { - for i := 0; i < MESSAGES / THREADS; i++ { - select { - case <-c0: - case <-c1: - case <-c2: - case <-c3: - } - } - done <- true - }() - } - - for t := 0; t < THREADS; t++ { - <-done - <-done - } -} - -func run(name string, f func(int), cap int) { - var now = time.Now() - f(cap) - var elapsed = time.Now().Sub(now) - fmt.Printf("%-25v %-15v %7.3f sec\n", name, "Go chan", float64(elapsed) / float64(time.Second)) -} - -func main() { - run("bounded0_mpmc", mpmc, 0) - run("bounded0_mpsc", mpsc, 0) - run("bounded0_select_both", select_both, 0) - run("bounded0_select_rx", select_rx, 0) - run("bounded0_spsc", spsc, 0) - - run("bounded1_mpmc", mpmc, 1) - run("bounded1_mpsc", mpsc, 1) - run("bounded1_select_both", select_both, 1) - run("bounded1_select_rx", select_rx, 1) - run("bounded1_spsc", spsc, 1) - - run("bounded_mpmc", mpmc, MESSAGES) - run("bounded_mpsc", mpsc, MESSAGES) - run("bounded_select_both", select_both, MESSAGES) - run("bounded_select_rx", select_rx, MESSAGES) - run("bounded_seq", seq, MESSAGES) - run("bounded_spsc", spsc, MESSAGES) -} diff --git a/crossbeam-channel/benchmarks/lockfree.rs b/crossbeam-channel/benchmarks/lockfree.rs deleted file mode 100644 index c2da2924f..000000000 --- a/crossbeam-channel/benchmarks/lockfree.rs +++ /dev/null @@ -1,108 +0,0 @@ -use lockfree::channel; - -mod message; - -const MESSAGES: usize = 5_000_000; -const THREADS: usize = 4; - -use std::thread; - -fn seq() { - let (mut tx, mut rx) = channel::spsc::create(); - - for i in 0..MESSAGES { - tx.send(message::new(i)).unwrap(); - } - - for _ in 0..MESSAGES { - while rx.recv().is_err() { - thread::yield_now(); - } - } -} - -fn spsc() { - let (mut tx, mut rx) = channel::spsc::create(); - - crossbeam::scope(|scope| { - scope.spawn(|_| { - for i in 0..MESSAGES { - tx.send(message::new(i)).unwrap(); - } - }); - - for _ in 0..MESSAGES { - while rx.recv().is_err() { - thread::yield_now(); - } - } - }) - .unwrap(); -} - -fn mpsc() { - let (tx, mut rx) = channel::mpsc::create(); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..MESSAGES / THREADS { - tx.send(message::new(i)).unwrap(); - } - }); - } - - for _ in 0..MESSAGES { - while rx.recv().is_err() { - thread::yield_now(); - } - } - }) - .unwrap(); -} - -fn mpmc() { - let (tx, rx) = channel::mpmc::create(); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..MESSAGES / THREADS { - tx.send(message::new(i)).unwrap(); - } - }); - } - - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..MESSAGES / THREADS { - while rx.recv().is_err() { - thread::yield_now(); - } - } - }); - } - }) - .unwrap(); -} - -fn main() { - macro_rules! run { - ($name:expr, $f:expr) => { - let now = ::std::time::Instant::now(); - $f; - let elapsed = now.elapsed(); - println!( - "{:25} {:15} {:7.3} sec", - $name, - "Rust lockfree", - elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 - ); - }; - } - - run!("unbounded_mpmc", mpmc()); - run!("unbounded_mpsc", mpsc()); - run!("unbounded_seq", seq()); - run!("unbounded_spsc", spsc()); -} diff --git a/crossbeam-channel/benchmarks/message.rs b/crossbeam-channel/benchmarks/message.rs deleted file mode 100644 index 48b2a4c98..000000000 --- a/crossbeam-channel/benchmarks/message.rs +++ /dev/null @@ -1,17 +0,0 @@ -use std::fmt; - -const LEN: usize = 1; - -#[derive(Clone, Copy)] -pub(crate) struct Message(#[allow(dead_code)] [usize; LEN]); - -impl fmt::Debug for Message { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Message") - } -} - -#[inline] -pub(crate) fn new(num: usize) -> Message { - Message([num; LEN]) -} diff --git a/crossbeam-channel/benchmarks/mpmc.rs b/crossbeam-channel/benchmarks/mpmc.rs deleted file mode 100644 index 30a41dc98..000000000 --- a/crossbeam-channel/benchmarks/mpmc.rs +++ /dev/null @@ -1,140 +0,0 @@ -use std::thread; - -mod message; - -const MESSAGES: usize = 5_000_000; -const THREADS: usize = 4; - -fn seq(cap: usize) { - let q = mpmc::Queue::with_capacity(cap); - - for i in 0..MESSAGES { - loop { - if q.push(message::new(i)).is_ok() { - break; - } else { - thread::yield_now(); - } - } - } - - for _ in 0..MESSAGES { - q.pop().unwrap(); - } -} - -fn spsc(cap: usize) { - let q = mpmc::Queue::with_capacity(cap); - - crossbeam::scope(|scope| { - scope.spawn(|_| { - for i in 0..MESSAGES { - loop { - if q.push(message::new(i)).is_ok() { - break; - } else { - thread::yield_now(); - } - } - } - }); - - for _ in 0..MESSAGES { - loop { - if q.pop().is_none() { - thread::yield_now(); - } else { - break; - } - } - } - }) - .unwrap(); -} - -fn mpsc(cap: usize) { - let q = mpmc::Queue::with_capacity(cap); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..MESSAGES / THREADS { - loop { - if q.push(message::new(i)).is_ok() { - break; - } else { - thread::yield_now(); - } - } - } - }); - } - - for _ in 0..MESSAGES { - loop { - if q.pop().is_none() { - thread::yield_now(); - } else { - break; - } - } - } - }) - .unwrap(); -} - -fn mpmc(cap: usize) { - let q = mpmc::Queue::with_capacity(cap); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..MESSAGES / THREADS { - loop { - if q.push(message::new(i)).is_ok() { - break; - } else { - thread::yield_now(); - } - } - } - }); - } - - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..MESSAGES / THREADS { - loop { - if q.pop().is_none() { - thread::yield_now(); - } else { - break; - } - } - } - }); - } - }) - .unwrap(); -} - -fn main() { - macro_rules! run { - ($name:expr, $f:expr) => { - let now = ::std::time::Instant::now(); - $f; - let elapsed = now.elapsed(); - println!( - "{:25} {:15} {:7.3} sec", - $name, - "Rust mpmc", - elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 - ); - }; - } - - run!("bounded_mpmc", mpmc(MESSAGES)); - run!("bounded_mpsc", mpsc(MESSAGES)); - run!("bounded_seq", seq(MESSAGES)); - run!("bounded_spsc", spsc(MESSAGES)); -} diff --git a/crossbeam-channel/benchmarks/mpsc.rs b/crossbeam-channel/benchmarks/mpsc.rs deleted file mode 100644 index 9c69c257e..000000000 --- a/crossbeam-channel/benchmarks/mpsc.rs +++ /dev/null @@ -1,169 +0,0 @@ -use std::sync::mpsc; - -mod message; - -const MESSAGES: usize = 5_000_000; -const THREADS: usize = 4; - -pub fn shuffle(v: &mut [T]) { - use std::cell::Cell; - use std::num::Wrapping; - - let len = v.len(); - if len <= 1 { - return; - } - - thread_local! { - static RNG: Cell> = const { Cell::new(Wrapping(1)) }; - } - - RNG.with(|rng| { - for i in 1..len { - // This is the 32-bit variant of Xorshift. - // https://en.wikipedia.org/wiki/Xorshift - let mut x = rng.get(); - x ^= x << 13; - x ^= x >> 17; - x ^= x << 5; - rng.set(x); - - let x = x.0; - let n = i + 1; - - // This is a fast alternative to `let j = x % n`. - // https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ - let j = ((x as u64).wrapping_mul(n as u64) >> 32) as u32 as usize; - - v.swap(i, j); - } - }); -} - -fn seq_async() { - let (tx, rx) = mpsc::channel(); - - for i in 0..MESSAGES { - tx.send(message::new(i)).unwrap(); - } - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } -} - -fn seq_sync(cap: usize) { - let (tx, rx) = mpsc::sync_channel(cap); - - for i in 0..MESSAGES { - tx.send(message::new(i)).unwrap(); - } - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } -} - -fn spsc_async() { - let (tx, rx) = mpsc::channel(); - - crossbeam::scope(|scope| { - scope.spawn(move |_| { - for i in 0..MESSAGES { - tx.send(message::new(i)).unwrap(); - } - }); - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } - }) - .unwrap(); -} - -fn spsc_sync(cap: usize) { - let (tx, rx) = mpsc::sync_channel(cap); - - crossbeam::scope(|scope| { - scope.spawn(move |_| { - for i in 0..MESSAGES { - tx.send(message::new(i)).unwrap(); - } - }); - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } - }) - .unwrap(); -} - -fn mpsc_async() { - let (tx, rx) = mpsc::channel(); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - let tx = tx.clone(); - scope.spawn(move |_| { - for i in 0..MESSAGES / THREADS { - tx.send(message::new(i)).unwrap(); - } - }); - } - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } - }) - .unwrap(); -} - -fn mpsc_sync(cap: usize) { - let (tx, rx) = mpsc::sync_channel(cap); - - crossbeam::scope(|scope| { - for _ in 0..THREADS { - let tx = tx.clone(); - scope.spawn(move |_| { - for i in 0..MESSAGES / THREADS { - tx.send(message::new(i)).unwrap(); - } - }); - } - - for _ in 0..MESSAGES { - rx.recv().unwrap(); - } - }) - .unwrap(); -} - -fn main() { - macro_rules! run { - ($name:expr, $f:expr) => { - let now = ::std::time::Instant::now(); - $f; - let elapsed = now.elapsed(); - println!( - "{:25} {:15} {:7.3} sec", - $name, - "Rust mpsc", - elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 - ); - }; - } - - run!("bounded0_mpsc", mpsc_sync(0)); - run!("bounded0_spsc", spsc_sync(0)); - - run!("bounded1_mpsc", mpsc_sync(1)); - run!("bounded1_spsc", spsc_sync(1)); - - run!("bounded_mpsc", mpsc_sync(MESSAGES)); - run!("bounded_seq", seq_sync(MESSAGES)); - run!("bounded_spsc", spsc_sync(MESSAGES)); - - run!("unbounded_mpsc", mpsc_async()); - run!("unbounded_seq", seq_async()); - run!("unbounded_spsc", spsc_async()); -} diff --git a/crossbeam-channel/benchmarks/plot.png b/crossbeam-channel/benchmarks/plot.png deleted file mode 100644 index f5ea13ac4e11cbfd801d2f008b89cfc82ee89a08..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 119450 zcmdSBcTknvwmpbh6hTAd zjEo`$#hL-~&&Kum&V|=Mdho9!R;QG$E}QCE*=kwnQe4ooGBYx@GBVKKW20+fX<%w1 z#Cw=mh-c3&D=Rb0qkMeE|Md&JrWX2q6$jI6@gkIFXH+aHC}_0EKdatKzcHX#ML{7a zbNq^3=y0pO>JHtNm2vt=p4A)H>b@w9zoMG==X;8)_aCRH{rkne$}iH>?eCW# z#o6jp|NQP*3eJIl|DN;SHmiSr)py%5%8Y-0^$ca?f4%J5|H&_#zDh(y>rNiuH>ZUKNoMA zpX?v}@s93|d{|ejZF_HWl@$B_{cqFqGoqzd7A<2ggv=yC7_Nkw&Y zYM@3n&8$>bVW?4OQDo9QQhTvoXB+3>27k9LaxLq)8_M^IeV7v=+WS7@z;|a=Pg*A7stD(6-F*TreO%ShZ@En3$N^X!h(x#LD7^eExFduI}!n zq@=w^k7gXpT**>0E)NuFXlU3Ss#`|!pl0v-ZgI&ShKRT&CSgAaBw_SkY4(fXM-O`-(8py6BqC9?Jdl^ zzoxXZGN-TGYPdn*&&+SnhzM3{YHDngFIGD|G&FQ|63uR5w~5}e?&0BbIt7X278W1t zL^Pb24kHppn%}FY3z}w6?@Uimzt()%p!Q+#;oBc=N7Y&EduYG;al7Q^NW;jJe{QVEz=<4e`9VDo`k%mV9d(4G;N1kN^ zthK~c^-1zsM@Hl`ZGJbTnntLw?Aj$~XD3DxcGNl}X0PVEckif}#GZC^XfiP|y}kHy zb@rWUHcrlU_d-LNPJ8VxygS?aI3$Fzva*uAR@{x$%J*T5xiu?`{nA+rBbjU0t?TUR zk--aQ+K#3)&CIqta|sGQJNx25=fHrQj}Oh=KU3UO^y9Hhl8ckesrxPjkNr+d~eHkXw0%!8v#L7Yq z$-lR1&3=s>iFW4|N%9VolC4ZdRx`sY!6L@ru;+#6__7}&L?Z6a?7Cf_kR21~!zwNB zvVO~%3m1AD^&_Hj44A}io}BXB>5JGr{BWp{UL(q=@#7N~$>6y;JLHlgyoGX_*~|0` zR#w8iyu4Jqg*VdE)Bj9RD|>tC^%(^PeT2=mQLMVb^%NsH`^pmQmfgZ>F*?sor-$nBLKojDauz+=mEV}GZ;u>e(2^#QyEMc2<;xd|*%r&W zg#{t4uWQxs3~~tD~FJ^L!!?wDWmT9#WW zfoX7XusmEc#H2N&CQ(CLP*8Ag)OqC`KK~rj#KtXKJ{ww&w4~p>d9ym#*%_NvjF+*W zs*y$h3fNWl{%Y*pWR(_VV zBp+T`!4djWSonx5-0lI%zLnymnuy1rDjzggxoTCvmh~xPiT&N2x<*;ecg@)3}7PAIFa6mfKPe8LssSgV?9(t30a z|9hoGlbibbA-~=zM3_(%C8%YVE5}}oH~e&~=Ieo?xHz8I+JzoPIzEzm31#gO`HyKd z#v2~Gy6#q2S1&4k&Q)P8ZaX53F!WN-zO%z^JT@^WL)*bsi{bTYFF^{O(r29bxZOEs zoe%1AcDrhYO5JT5o0&=Z!&tV$Xl-F3=*=Q&nBe2(b(n%YL|?{NsepBxcQV{d(#l_N z+4^}=vS)NO+W7{_vkx9T*z{w5yqI-M-rVxSX#CIm1hrkI)*hE8K+@qH=`0Lj>1W&~u-EV%KNDtYD zS@!n!JU3FFh!{6*PdiH)m^;wtFw>|X|5FmVxu>s>Pu6nBCYOB)I;b(&v5S zM^R!ggox;3NqTyF)usb`&YeGh6CX+D^y#k7POXGQwM-Eo$(g`;74^hIPx{MmtE2B9 z&FL?;Cs=uGcWyBc(Ec8Id`&FhXQU9?ojXasv|Ai6PD)MX+Pim^w6rv;eEiV0M0Mu8 z_pdY6@87?FtxUJfe;qG1d$KYQ2M34d?c3*izrBc0$((I-i1&Z~{JGqP3%Wn!RFFzy zt7pGOZ07{ZI&k&<_PMTQ{g6j;^78v6C6|J^meXpNdz+K>Y1mIc(M0X-le?7j>6375 z@1`9Gw!38_jW6PX>zCR#(6FqHv>j~=@RB}ss3Okx@VxVeN6f_lQj^_2(iVN?yF$al z65nMzB71k0az$hf*$4;-oDVztVrgO4a(U6-@@}EWj?G3lJ{#7p{xy=`&yuq^an5r7 z$Ggi+|zh)-@K)ZF3F8J$6 zuY~PLXdR`G+ji;cF9D{>{6j8$3U|$F(t@OvM|jRwM;G8#dJ*Ybf2IbRBvn=e zy2gQ(BQ@Wmd9!o-6^l6O+EqOlHG(4U481F}u|XlpNf7b;S~tUY`0xuP62LvvocVn( z4k{JZ)cCeLEqK*yIA`Z2_SaixC8QWTHD8W7&u!+cEJaTraH2jy7>A1;P-5nHvyk1G zW>!{Kd8sNj^HHml#x^4@K@}mQHw0Zr@&jT`4Uo`S53*wDW+{#fw|Vay1@bzI-{pDKki8*5$#2lCrYy z>X#?4<*kv2+@cAPWw{p8F!uO!2>v+Sx*MO0sK`PO+fjkGEw%GlOy;AKa21 ztCGA=OG~RfLOKGmIEG~(Mj+gsZXFkZ%Nail}0Zk@VEiuapO!JG65%j*brU#kJ;& z&?A$s-KC}r*|TlQ*QJe{gkyEZ#l^!NC**eWsjCjqPe=P)HK=>P1Mi)Br{~JGYd>`# z_o{vS`O_a;o{Y{W#iaFm#gqdhBMoeZ*prbCYE)eq$?jJ(F80D!ItD6Hk1u9B@$ z^-4_Sd-?KZK|z6X*q)KL><~m*Es`{&$0QEW&Hl)v10K}xu$R3WuEb`lRCH!{E7_nGY7u@{IWiRmwe?PT4Qs-qhmfsFbR$GGG} zI{-rZJHEJ3F(2LTS-ARGh={RWkFP9)l%p8X3Qq51eCi3{w8?J=6O~&-X3B-C_<_{I z?fZ5i42uCW#mqYLwiMIv-hCD&IPi$&J_7>-VZ$G8hj0Jf{)eYV?)knXl;)A#6{kPb zzhu$aidheBCzvc&DX|alKr!{J-e;GPUNsI$igB|mmW_Z>6qxjwz;=M)ZJv>jA2!T(w zW%8kh%-Qw-A_bMZW7wi{*3xIspC3W#L_fWzqw_Iq%IKY|_;BrOJQwL3pI~Kn0YQEoM?zBl&aOW3Yj&uv4BbptCwjvx@2~uQ zo%ulD#ihA^9qCODKZ?6 zmhQ{Evm3*KqZyIE`6VPJw`|z30W3)gnv>)ty^7tIA-C@qcxo{eA6|78{}op z%*^<|e*OAn$H8mc#YEEw;9{YxYl>^Tl8P40MX(RBshJaY!xW2iZ&X z&QFk98BqH9Ei5b&Vz&ycmdsT)92Xg*sCDxQt3cEiPuPixYOno5by;k^bZG{W4j+e zwB0fN<&^{Iy|rm#4rADgKpej4a{+zn{ow~ko}xP&*)-(B8Ib@oa3h8E(i9!qpq{bL zeB{WE?(WMid)?jKT6Tcqk!p4MFBWlN(|_Uvt2xX$�@jU|ovq>UJ8FRBPsxl>*`8R5U7Nku+}e5_@3NK(kH5C}X}qS=ojXTC>GaK}x3K(J@o;Nt8;M$0 zqP#=-OggSoJ859;ahV>G7_Hh=`srM(f~)w;mF(;d1OGCfEqnh5ke_}BMku1TYGfP_ z>Y_f;BU0D8e`KRfk4^Nll1xwEpz3hUf7*pjI~E1cFYX@lS=%qQbzCI$+e7-kdk)fX zk8TuUO-nO=Zq5%h6fA7;AU5_Ojl>wm!pv`><~#jiwi~Ia3hL@!;1KIog&X@Ld_5LM zb6FA2j(6_{A)Cav)onWt9=OH)fr|^i!AA07V`C7wJ%ZiQgWk)`6h}_dMbJ|S8`R1$ zBXrzKcJM6j^3Pgbte@w5x!O!vIF^!2+I7Th&(QZ3mb+7znm*qy zMiYSYMM2OpKmr11DozomZSdfBAMbHfqO!lO8#cI(j|Z;BOT;mTekS6OPBgmhRzU{n|E|xZ)*poiEiXo<}s`L)YD%ocV-Ab zY?a;H|FzH<{MWZNG!pl+vn6$kp6mr_18BGlh=eL%6GQ=_Nx@(m)+cPq1BM!C`g{;f zDhEzfQALFp(%jB&7J62Y=oYefmgoB^H*UvI9T!@wS3&W`=F zL@(?zmqy!lnKtn6ZGS zq+}Hpw^yp@l$MsJ?VcKS30i@zq%{Xo0Z!?3etlF>RD?p6A7Yv#H$Ty9 z(C}d|nB)y;;JZ*!8k(ECySh%iQ>2gh(+VaA%x)e^Q@UB_YCw*~#mTC|EUv%s5RpSHhSm4nVFGR&-nO5cqZ;@-zQ8* zo`D<0r~6vQmm9Z06pkM6ZPKlG?8~43Ec?>?C2%EI*V);bRM5yUP-<%9+Q=+xflNf{ zhM4~ebx_3d{>4vr>3k-sTZ^IZ<5AFFw6xrm!Ug}kyt1}>$JDd8mb;aC3Yi}v;9E~E zCAyVdP-)LjV^3S+K2GnO*#wy7DNbIRmiE}m79zGR??8b;lRoU9tzlxqi#A9P<>S;( zz0;@H1K;iA|z>U=uS?>ZR{P5AE z@%jFU%V;Lp420O(U8<`u0;Yl|Q}zACjkJ17>6i1$;DM#_coEwX)%W0D>gXdR?4N;9 zdyZbqm5xjPVv3Q9Vcq*e^uf1UGp4ULTUlF2DP(KFCDL`c?b(?4eLlP;jN% zWzi_+%_?1qjsdV$v>t4QmtyqUq+2y?Xn^jAg)w`3lfyEG-Yle9_vX#9;pD2WU%xzY z0FgPs+7Ca${PXIn^(6*^&KlzRLGY<_22Iw!*yFdEz;|i3BN8kfg zq)gi+a3GF>Xu(P$AZvMIFTbHCS`z5=C%X@OpiEoLjcFxIVAHZ4W-Z1#j*FPIlz=)i zYji&Za_>Hv{e*;sz6?S^gHwB^7@goK+l`1mkA(!6t_O+=v`qxGiR;FfU!UFrZOWWk z7J+Q?@c;OHN;vAsI)t8>o1X{GcTQP3u&#xh(B;4u1!%NcB=3AMZWc+70|{VfZ?C1V zFN>FccP)N*h^VPZ*_M3GmNav*qem69?^pxg-@Lk_2tKc9!bi3sIw}gqU7wd#!fq?- z1>ljHl@$l*b5vstik55fSsis?;59Gg}ZrT4VAVB};XJsC6b$?=fP(l8#<u< zDk8Y{`}g>tI6dCzR71*8i#Wk40Kh#93-i)qK-IvuoHR50kgkT5Q&L%(;sNqK3|*dj z=Pft4GnXztaAK^bL_nVz-@IuPo49!MkjTf|pV8M4{u=btk-L9{v3Eb>ROs^gP4@fa zq=5}Pt)~~0K9W?-LfA<3yXqwEjn z+NG5npKj5Do%vc?N{Kin-|fkhD9~Soqk$l#lcenC;eqtn|K57?E*NV-=8lez zdx*?6>#5S-DO;b|!^Wn#NGd~p0yCl&-53_Q7_olDexm2$^O^JKqi&>{kiHBbKgnt7&QYrY&3*B=f5r*a z^C^G0$9c}5AK`iX{Q|n3S`hQ$cV|>=em4~&51j-VZE`@J9DUH{;IV)SFD9x>qMvm4 z@F1l|DZc)r?zy8j!)Jg^z_II~8Vj4W%*ALAw&zNNDHjMR^FM_D-58smjz;62mXV>D zZlMcWy3Aw8K>+s+nxLh?e~@Ex;>3x!iqV@!vnIV8E3vWePo5MsHqOU%;bCHv1x%)D z2bp_P+nu^WBd|+Jg-1s2iTzkoa>k!eqY`zhT&taQl`b1-h>XN6ZljvQdk<$7M5!?P zxlBW%P#{o!XMP{$spJi4=g(q6LCjkI^NU9#C-lh^DzM^}U4@?SAf=%YfSUKhhOPNv z)F`NK-@5~K2b;xy;D86!ssGf|z<=Kd@AYG5Q-b0X} zZBmB#f#Z0pmXs{;MkM`Yxv`jjAyXMup+{KB_hpvB$B#rTKgD=k8 zwq3hQiP8nYSMl3+q($!5t^FW3P)#~NS}TN#-AB&`5J0%ZbVH{4=aB&%NB3@}O&^dy zdOR(pg&TmPq;&}Iakzj$Djg6lgh2W6_L2*v#R!Md?E0Y^9Z|*E+T!rmQmBPlIRr?;}QR)I;|#jgoTGDRtFGrwLr!>d=X#wI54nhy{l z;28?AS9bIFtRCQm!B<7Da9>Fy-0bzI9xGE z_MeQKe}?k^Hnc|8sQ_RgG!V8+VtK9`)a-{eo2#f?K*Q`ujs(fF-UZck0^S4<@lZB8 zBrGp4uR57(PIs&fz~=rD{+6e{`-rV}UdkScD&2zq@$?^-T*+Q%lasqV z*u4vxz1_CIj+k>_yUoty&4=u7pU>-j`hpy3A{TC!jIbuS0fj?^HA(py)b9*QHoUEvaBc11PPKLVUi0W#s! z?RsiHK0XvvJ5chC>6UL)Okq&KM!EtObwH`d8{&C|%h=i32kR4A)iP}gfr=3N0qD#^ zdi$#)^l=`pB&hB}ZD~p|E`uu|ar}9D=KkZk9s>gd&Dssza^o&N z`aS6Sk5^LDyDH7TZp<7`se4!XK`Wm~Bc?py={De%nm8oAD`VyxD_47G|>SfT&tLZK~6=@QNIqoDgVEgPTpCG8rH^2vsK#WSyW%ix?r-p#GLmAWd%D zvE!7X;Q{a$9D;(iJdEH=!EtFL9it*zK(MR^$i6M5XG`bxW-BfC@tmG>Gs&#?+UK@# zg^m>lm{oBoRNJ%sdqSb-bFesZ{7QgkFI_CN-N82c7I1)2hH*x9|<1iTM7<~<9t@E z@)pln=Y3x@ar#I+po}@V^6Mdu*}dsFE>E8Km%LIw6;g|TEY+htC-#-rqO_3F^m@%E zR^D*8I2~qpjxJZm+(~h2vFgyiA5r@#&Z!utSUz=0>-yF$A%E(?YIlC~wc7J=1PuTV zB^F6Fr4@+qVuqCgx-L`o&nx|}!@&?g0>TCm{bbbm4CR7$*DiN}a%7o48#rKk!QFR* z$C+Eu7#>A!IoDrFFFP|m-1r=@-Mk}j)kx;36a`wWQ#d^ob|_K1j#zMTDTEi~=eq#$ zd^GsJ4=lqf22m3xfIR~A9j!jAMxm{bT03}N-~Z!wStj>A53J+5O_zDCnuo6R?`7#S zT~0aMkyuD=Qyn%k*Il)a_k&+_gLc`Zgb%ZgW4@?kv8`3)8u5nX+vKte&WUxE$jK`P z*FH)KIZ15^3Bs_(*Bif0>#b~zrIevPCDK@~CxTVhdZ?v&(et`Q>GKB7e8X##J2V9D zt!gkEmtpr7J6e5zBime9`@E~^+p|}h%qTQPGY3zLhWE%V`>g2fDq|=)M)C85tIW2+ zldPd;+FNcb7qI=Q?$7WJLq}5qibZ&ZnUxh3Bn#H~Q&SN{43vcsv;ofo1J7K!vR&4j z=_S^eh!%Sf9`ppz4s0H6%?tnqdq63{=y23>1aJ1Ak>O#Z${|vm)S(>%p@@Tlc@+P< z@RB1pH@DscCj&J&2_S^TB}A?y8{b1UfuJu2t{FV{hpR@=cF$IChEWR4*h?Ju)jK|X zab>b-D3-eZeK%L=hvINqyN9jA`+OuO-&<|sW?=Ap&190cqiJLnwWjBx3kDvNGk&l9 z?AM8Fh;@o}+*`N#>e~}+?i6BOXD%3O`$$G}U!V&%d-nY1SZ-xIy@sJ8-3_C6Cz&lu zdpWo&Cf;9(X4{ce=+FQ447(T4M}Gdf7a9h~H>;XQZ;!U~lRfg4w)V^dz4r%IAEIK# zUOR;6fY%`kX#;(jD=--RXjKhKIwXfeF?joSKcP~ui?*ZVJPXkU&BNTEKf(E@Pst<{ zu{cy(wy9R`eQflxqIZ#FMiFKh=$k=h;6m1=q<>QX>j_FBq!Bu|N$Zi8Bk+@PL$*bc z)j`rBpcoySCnSn(3=FrZ@REqSJ@n(9QFDqH5X3VyxS(tEwtSve zfDDAr#1#M__O67(3=dS6jKP^vVR(<&Ea)?YzsPg$SmR=FKDnuAJ)>Z0l*s4OJuW-q z_IL@8^r{Dc51kC{Un9kzI;!BTC|9_KX41n}OVvzZMJ>2Cw3~^d|45b3V##r83;$$2 zMwOdE`PF>!w_UbG@7J~dI8~Lh>SJk9UWY_~P(D+v^6&?CuTN#m9eY-C3JPSvsT4uB zx9@W2Dk&-HSXmxjDS!SI%23C^KnZvXcORer!8Z}4*G0k5_&kDiTM~KPB~aLaP#;mz z(Mbm1W4c<@Xw$vsy2=z5Wduvpx+{18RGv0%YimQRuUt`}4p#|Om=i0@OK`sF!8xSl zNP}jMY#k_uKnZ(6l*Vq9Pdy-Z3GmtQN$sSep)vd!=boG_gmye2Y&zTe*H6NE6HxZ@ z?_&ta2Vd~)!n(WqOmWCN1Il9oG$)Wp_%Iq z<9=`PH)4_eu`!HEt{F=yT4t{pw!p*S9PDr9`i3h9vh?~%K= zw;nt~KN2+}P#wXfT?pV+gUx~qM$lKzE6Yf$Yq_|%NS6uZx!IVASy&q(vyNky3`^`j z5*8OI|6rpBK$l`8{m@3Iw%zeW${1^+`FZtxsF*Lv)X9O@J`*VR-QY8Uj9x_2gR^lv zLuvC_V^qy-ua_{27qxhDrPNH#z4fXct3%QzcAQjFeRukBPPaHqw?L@*J{Kx)3qRy= zh2;WDuB@?F!qMEP-j!AIYrJ?Lcasx9%|+r?o6B;Q*2tsMeIbf|Un#jz8MoX{=su$s zbSUM=YnFp5rQH(Vj3zgI3nLZ{Qh}CoI2Fn*6j)GxP7*owPV%J zyZIIR9q#KQu1*R21;}BvwA9&u(!3xkEgyYQ>=Thqtb%ru6S>9TYimC!PtBz;`oiu2 zV+5Fdf)(|#s!Wo1wj&@Zq?YG&)E#~wuC1-zbiUfkp!K-R`b%(E-kBQsl4jnWFc%If zsOR+dQ@MLjb))CAUPq``j|)adJZQsSqN@a9MGKVxL>u9V@$z`t$54(1wF@?)=h!8A zM;K&D-p`*i9@d>*Ps>p=U=%?ExCh_|WZ-z0C*RK9WsC-J}9-{bv1n020jMtT;lr#%ol(p(JKkG|}uO7AyUgV!Xk!_3i~ zGGb>=x9A-}#S$2a2M%~)fHud1YfAx0TlJ9F-b?({4ponsbTp)EcHA7`tmiqWJH;)H~(SQni}w|k06VPK9^(%I3KSi zUqiB!Ia_^>O*Hj(v%Cu{KCuk%FW_k+xdvXmC=(S`oBE>V!@$5T^k1IhPZ$-S>1eMN_2JPj~_pdM?6KGLf@o@iy-1vqAc%ply88Qz z4<=@vu(6rhF-rsulS9zOCfnf=;rK4SRKi5tC9n6HghpZF?*g%Y06j)JW<$xLR zsh?i~VAg&Bwhh!wB$bkM1;7g$#-KX#{`@jXax&2Ytvy8B>|BVk!#x4&(>Z^hy`Y7OG6K*UG~SaL{!(}46POC3Pr@;BMqXYI$VOtK zgk5hQ;C=t)7%Eg7qLxvyNCcjIOuY*Bz_KX0=P13}kadV#8vy3|=M}`6fc?Zx`3R{C zaM0V07t+Un>#qnYez0ycUbP660%B@4y{s_ko1Lg21Y=@98qzJf2(LIpqfVCEBRM(Q zUSRbe0fBMX=1tHTv9_SRpTnJ-gS_)NH`n>n!-H(Llvw7Zs3;E5S>K>mf;G*@Z+`f| zkF&HP4?d$RbZ(wV^+b~Zc7YR_T2@vT>ShH{8Z64|BdrE&(LQfO*D_ceZSdnQEt-dh z#zrlM^$s(`o#18d92_oZ+87J1hanm)xeJOK5tb$IOlcw!Q866Qx|-Y8k>X?BVQH^YFnHPJkae+8Bg1Xx@F2<>cyV%YfVT%tr1`Qp zAVK=h)P5%?C+MdkzhDwAfWKr_Af(&nKZBFMTcw7n)orHRV0~SV11}0t!@N%_uo2Q$q*FN`5uXXFz`G$p6ko>uEtwHugVJ8{xKMC(E3u^r^^Jh)}ywTIs1Fu9GJZu#*=>YtQ+(#f2 za8%*#j#JML#+(9_unH(}Xs8Rme7TRK$HvYk63f`*TlVLkbornLvh17!HTw*M54WY>K9pXAbfg7KhA-yQLyX#92RFm~C z@T#T}^$XE=b8&9GPsRu+UJFBT#Ipztr3mC0qYxpFvjf74q#uceVR*K8g3nSwN8~Zp z@FV_40N5}gr$swJfwv&%f%d;2uDc5#j50nU5Fccz@t)@jJ5^NS(~nb5dH|a15~NQQ zaiWiJ(fAGgOH1o3?&39GH7>4b^bZDR-||8A+`79~Y;2q!^OE8Iu7wsC+ItjN4_MOu zwLGX)ZD6)o+-;6eg< z*XC8kcN|^XgI7p>N*r6TQ>@D8FR7?FG{O&ig9tIg;NJ)S9{`mgV7Z{*Oj3@4NU)uK zeXi(%z-a6{cyM#t+*;qfykl_0Qv&SF3^!gApkfrE0;`PfGw6tA%+*GE4aei~=9t^s zz5(TD)RxuA1KF4btO8N$@yDmVnGB|%n1YtlD-S%5)MRF36N9jT1RTt(mZ6!Xym9kp z9TZh1390}4NJd4HH|75w91|r5!v;1s-Qz!_le<6ve}_&MV5VtIGiS%Vz)g@U4*y+uFhxKPZAjr>LSJmg)Dl`bac77 z;_Ra7$_L=z2}iNIQr@(PGV%q?3c7bn4fbn}?|g zAtXY z*RNj_O9%VGgJtiHFpoy8p{M}m)=EGn9Xj9| z9+~7?4Jy6{oanQ#vr8MtZiJLEikb+c%$E0(r+`&fH~=6|AF>C`w)td6jS5^ByQpYA zk4CyhH1X3Gsy@JvH3;-Kaalcp1$k@xN^aLI5%ZjybFWK}rh<0i_e2 z!G>^$kt0}w)0q21$XuI^)bK$^if&X6{VVXUfRBQzYRi_-^gDJug4$2~9B33kK(v|} zt!&@A6(1*{&Rp)?xr0wT(0_CMGgqIG2b71EpoKuCt<8p=0eRPID46Vs=_c|sIHU*~ zB2(9>rNlK)q2V;o>AW;e@2Ul<&l?CGhLtW1sAz(2_;*wr@wNZzkh4O*B#3=79Sn6D zW<5b5JTND#jv<*6GaxCq{8S%5epJrBa~QG0iAiRdJnBc?C_y>3K0R|R!l=gWh2 z#Lx~=k<6A=Nv}9wu>KBe$J)A3JISo4gqB$4ZkpGj7X|Zp`U%5v07h3WL}wl-7mb^v zd)X9!{9~+DJ$3#(75LX5u}U<+FThq}4l_pgf5X&R0W(HJiZLU>%f`+A;CgD{Goe7z zIE{%u0MpL<8(1RYW+y)-)5YFQ?g(Gd>131^bVYJ2C`^mauj?Qg4K^gPqwx{t(!TvY zW((A%SD-Nb4&Cs_n+V?hV+Gj*ihr*QjZ_CvIv#@`2_yp{5>*@n6wjlc6$s1;E8=mC zs?LMTItQ zV@o&q*YV+xqOZ+8Vum_=bkJdO=BAU$vHJ=a-pBzHv_a$^`j6h}f9_IP{q$PNbK+Dq z+51279)?05F|j}(=hlz2Wfc|C2zgYH%#x<^l9EVxSkbO;SAjXiHbk+I`5wT_8TSw4 zqGmSpwoTNDv!kCJT;SC?hX8-4k_S76Xq*=b9iMQwSTI2*a0TzNo@@l#V);2i7*qXXukf+nQ5!Xt_V z!i4j&?DYYLbGv`etjT@+CV4NkSb&4jMJ#auk{98YLc6&Yu=s0!e*8CB3s2xT05pm9 z1pVhX9&()odLS~Mj8lRkhQB4aTg+)utU1+`OG2VWRTNwz>P6wJQ=XW{UJrx`V;vc% zE@pK$G70eY{es&ei1#ojCr9SgDRYeaB}<5jF#vJFefTCsN*#~NzQvr+-NeRCjgs*+ z7ZBxE4D7@7dn3i@3(z(Cqb^*4;9nIY>XVqeyc?7d+*o zLyfSOMTFn?bf3hMkmmO>4QmWWZi&+b^Z47{kfWKAKD#kslCS`yd?10t{4ar zgnI{@jmt3c-49*Ln~tV-=`SsJ7UJ^$%}nl+#f(FqaHHG+i9R_LW`G5pI}z)vNQ z{1G>#F$4!1@ItILv{2N^pfUiX0g;2)5LJA@yOL3ML1q%|7#a9SX(Ns;07S=y->LOK`lA0ni=P$xAas`? z^z0gq(Jz-f!!iEc*H?=-7qLk!dqC7MrJ})0`QzK9opceowCrc@6?DvU3JR&$;nh6? zmdk`!DgOEkD3O?5ltV*^>>3(+US6IW9ATdgJaWX7cz{IlH~&Kpk6vR+D&X~6wPovI zH&{MLY(Pz7uIWarl2be}lEVBIC~nJ(=Rs>qT+-SMMOzBD?cV(ja%M?+xp2MYOoI-O z%EyN&^kqK}LQw_113(b+3kVK3T71||NhnPKNz?rz7)EJj_Kieujv4uU)JYFHxS2k)8=CJUgxO}r;1Bnhr}*j%u_GNaG)ZA zwMsT^m$I<51eQ$NecBXU5s0&0FmVuf3335ijwj&Sv~N3%`w8MS`}u8WahmI7^j47R zF|m>57=Y*O%pA-xXyHK0C;*Z; zh8zTMAs4KAa2%_DR>xbDszhv^m`uj2S3|=H@rXu4jy}CumG?#pY~My->gxl@eH=qW zX93!SuuH^ugfP7afKA85bRJ1-BPHcac!EO5b{UILYk#%)i2Ik&w z8R%Y0|lLuF-M4cf8WDgI|lS4OB zF6KDSvl(2L>ABt_w`A&`@gRqIxa;1-a7_n@0Pd`usTOj(5%PgIrk6>gfWutL=r*DW zX{e*C>jBUNw&hcB9tRQuB+bh}@|Ly!igaYUZp#8E-o>``e-M_7>8v4wV);Hv$ z_O_{ibobK{Qg?B=3M+AMf+S{e{Q@jYVh|8UAN>ijM}rc>0lIwa2Cy-?PXp%uB60ORr!w&H92ZV}OYU5VRLOZgF!n<2_);b@w2EJr4{ldB%Az!GIMxI3H;K z@00IhF4wDhIG2(?va=#tS;%OXERJ3Lf-_bph|`wNu^!Q(nFPqMZ-Ef~XZNAj2Flq8?*-wa7A zp8DAz9Ok&o!G2N?BhsayhvzY4r2=~{1jd_fS@wN5wdPUb-hza>FV~8qPLu_}7MuIC zSj%2O@+g3+Mfg$smihuReC4{}7$o-&pp2~h7Jg@lJ7=zIGfH_eK02ScgT3zs#n)azmhF>>gR65fUVJEeohbMpmPkT)JUi^#kJqcpi! zmamQ-Y-PV2PqY`rBp7~4Lfyjspgr7>B-%Rio5Ptc_?4eD3BWU#f(ArrWtpHxY9DT5 zr~n*#g4PEoK`Bf~A7O+)d0oAEJ7|NYv*#bMfb2v~yM=)$5VT}Y3AGY9ewVln51BE6 zG(h}nFiT-X{2S6Lrg$&jPx&)DS_M>p0;AEy$q7BIps)}^j0Qi_1`vY%(sVsA+eUH& zhDl2x)(e!^NgPjH@dKy6J3R0AF~kYGj2oJD>(TGv?(JdEum>Wr2i*vnl$u;60FVHv zI5x4vnkE#xzx_FmAKkWX6N&P-9NwYHQnj&q2^VurL5qt?l5zu7DMVuzT2L*ptK=F7 z%+(NoBj)iiByWiz+u7OK2|<$ns01^w{P`5P=1?!mJQ#{i*9#?e5ZA54MrfEyF$nVw z(2`J_Y8h6Yn82X$J8;zlHU%GiGzPHyV54nY7jzib!_rGG`XHvwYB(ptotN!NAjPUw z5LNH&13*OqObzK(hN7@;Mo$Noqs@>JT^$B(s({M>_0013FRsE}WR(9O+GYDV^5@e66;(9g11xsixNYcmjN0Sb5AnVugUOvg--Gk;64Fh*W4TPvNaVN-=hKnv$9Qz?O-PyPJavmTB0l+wg8Zh_yJ=#Bx#wrF@j zg$08G2%$eiE>nv&Mil&C^tvvrq3=I31TBgJXd(){1CyB9J)nJ>d*B#}ErKH4>5l<9 zjldY=V`G{qFGfwDDA9^h;Nc#_<_2AbOcm_&dF$4~?H8%>`k>l4F-6;B@Z3a8ctRn# z8nAu>kZ!x(@H;;qFo1ozkl=u?51RdYg_a4=Lck0M=Dq>u$oMq)p7|>0Wz5v=MEAv? z+K$~pck4uitYOZ>iju|PuyU{ z>g?%x2ss^h^mHSFC`d<4$diGAGsssMN9fzp+?4Ggiad$gQ=)ES$OS#2K-uKP1aSyn zeSZZw^*+=CNH;W^`wS3HP^9i0juKZSSkB{PM7?v|MU}S8>5c^Em1dA{t{=d&q-P2+q zcd~#b6u|T1z!g8Fq$A)DQpBT_7qAY!_K`XZybS+QFY@0W+~&ZvlWMGp(Uc$ef{+MA zOFxfcTp+3S9RV6nt29w2A?uR^e!J;0WSTM5+7E_x2d>2_e}PE|LXQaRn?GMRuoAGM zt*vd+=#y^yfK+4ZlJeK53He}-a-t??cVkJqP5+1PM9=?KODvY0)%y8}ZO_o1hg z=~gnNjv@vfR1YRD@&mL^wIF`iQ!|xd1ypa{5Yj6rtp+9CzKikK55vDShdWBhU4XEc z5Vsg$gb)lIcVpbQxG!i6KoOJsD+s{F z%`WpDE;Mnj*UIW70r1Hkc;s#txBxDK-$GTYm_W_dN>V1bYS^#L0ZETfPjf<0#hA{I zp`4L2P&h@>duNeaHO`3GdoI2V<_ExHo*H{_F>-)q>OjKh4K8?I&eSoJCGw zdeZF6<{KXFjaNUp>%Cl8LjElsVIYXK%uFH3uqQ?Z@b>yZqnQ7Xs-6IERE;V`CfYi> zc1*!|B2;4xjDS0M`mW9ZS%E5nw(t@*-n_!t4%EX?Ne6#~mLRwpL|q!LWlXgDtVfJB ztWu%y&F)Oo=XL%_<|8j*dnm93kJ05JuAJ_tprME1lLvLM5C>R5rwCe(G`Z4m4gKT6 zBS(*ZL@1L<1Ttk~JHn*lIKGYocXaNp&&%GO-+3eUnF<7>j2@A*?+wB>gO34N_Phj zmysvgK{6?ff%y=%f&L?;R~D12i=*30ILxt-gkeTEv15M-M%pos!36jwko#*{ZfSG{ zrb-1z_(MN7e_lECWS=`-`sv{6KDGJDBcVM4kj zngcFA*(8zrnLpL=L+R)E$ymBsAoX0@=|dOG=CwsvdapR)@pPQVGL?H8_bcg zwua-dD5-E~$b6nt{EwLzokrB!YRLhD?&xS~=UH*NTbch|sA1$f z*;M}BBshHjUP#SdLq^I(4E4LiEJtj7TQO39Tn$9Nbt5YyW_u={U!uI$Liq#cYoWV!^Kb^XO%#Lxy$~$i=_T40i7ZDZsxjCB3PRT(#$-|l;X5N1Ry3xl z!?>QoylX9-e|h!w^yo23-UlO?n8x>aH!SpF4tk79YYr?DPSaB$7q}8}4p#HF=`+*O zorKB?D91;-XC%x{5N|{vAX5j(Fl+dc(cWKqcLEnyqSg_26F?dnFM|~M1&&7G`#rej z7$-AuG}mF)1P#&|TtrEV2e?k0ABr&;3dWmmC(<%ASKxHmb$f~4`X1vIG;rt69blnt zpiI#*mE_?)(1z ze%JT9KGOv=aD`K6hLP8U#2mOy+XV|wC(VXDt4?0!F2cE*H9i0IVfjGg7l41&qkf6N z!FI%m?XLGgAnL8VJQ<>VhnOMO(-f1o+62hLgyQ<6t7IKt!AM`%yk&Jh7!&P|EPsg! z+mS_tq{-O@uRM*G1CgvooEkZ+D5H$Ns__^zxHr^b8J^?mhb7Ti=j=Cdu?=)XJF>W| zu5dogu71rh!`;G2Qe4j%nLTb#Pp>`lAV6&wat3!p$F&|?`pKm{b+(iO^dal%N9Cal z`_)*pWAE#4N>80Cs6X&XfJIM0!2I3i=0`fcShyKy9nmhdMx9T6&3!Mu!_Auwu6$Omzke&tYz;UsqVCtPXJ4RdP1;^X<&hSVW(>nH#h{+{XeVH zlu|c{TGkdA4_GXI$u48}8DCO1o#PB@js*@lrJDH>j!Q(3*MI80l@CZl4|u7UxG+i} z(A~klNsL?DXMRaG%;Cum%TIp>yg(<@Kq9umLx|nTTe=&!9_T4|F!yVVMFf!cTJW2w zf1or+{~EB2I`P7aCq6tpoZ~5@p9M!}&)aUu1)&E>a|?szqUJ43KmLaXb3}$gmoA4b zSAH)i3(X8(EY$c92`dkPRx6g0Uh@?xNR!noVB{cQ1sMaM2;k`S##2dO!z+ozlKrFj zm7ho`0(KINd|VbsAWsERdHuOolNhzh3TM!k*hEx0G$+JP_l8uIFnBYw)2ZK{_lyS_ z5C#fivWONixqPCS9gUsMTtkZuQ_0kxhK4=3^AXM@Euixt%{Ex`QoGi}rTd9iZryL@ z9FQE5PlrnZ{jX71Hf+&i*6FeTC+RCg&JDn#%&c&^#TE6A*Nz{ZkkV4N1=Js4^V*qV zWW%k{UE9A8BOd-mzMWo9(bnv^Kq4!eHp-g6e5*sZq{zxMHOkKcr%Cp;r_ql5fp#1H z(7K3H`}XbI)B~F%Ie!slTur&!wH}aawdOKOy;`3+`?1vwPA{LVm;(p8lPHKPiRvkS z3=9##eb>QOB@_!G-6Z7XK*{1i6Im&l{ep$&3%EvCN)CWJqOYbsXAj|ynNw;O|Gz_1 z*+VG~u!f;E0^o|V^A-=JmIc>Lqro@Jo zERusvyfD1N%rIj|ChtSfZZpHx_DU{QWzb1Q#wpW3$Bsx5c5o)&B4JJXkOmFT?6mpF z$AFqzg`omKW)aIH&~9BQxIl+i=XXVg)1QPSY0dF&vJj~dHQ)ivkAzc*W*c4x!{v1?$8mA{3QmNliKV4_=UG$)*YfV&WE% z5so+l;ZbpK)Br!b{$Jz-dHTqCT!V>WE2$~pHfws zuUOA9qC88rW=&l|p_7tvRHaWo-sIlBd(X~-do`1A?nzBn=MeWQNuO8X?hf4t?(YsA zvVlWc7XkSQcgfi4+-P0M)~!>HtP2h{VUxuS8V3X-nLB87U3YT}>Oxa@Desm1wJhrj zgU1vjl`EXr6O9&@+`fizBR=UXE{>)Ycz~-`S-|Gwr&qTYSY6`h1Ne2Ld3Jg60ryxB zTD{3eYu2o3CUf)m*L?Z%ynmW8QUOKxx zBoZVKVbMv1CN$vTJ-r^PXMAvkX~Nbyx<%NjQ>T<`UkDoP{aqvHqw;4_SBKkm&$>&{ z1%D@mI(N2+mzj6=0)48yMR8k3i-Hw`@dwyzSe#2oVRAs~FY`Kx>Jm3J<3#wYJ+v+2 zdq#SQhb+ARatj(2MFeH^U-DBZf86^>76eH1F19Nw0Z(QjDZv-^pB0JV*|U?6kLu;~ zRE-XzF=|CwJ#Q=;P-u1YL@rs_p6N~;^~C{ zE4Nn79z)O+a-wtYZ^977@a*4%S_?@CXK4e4h#9!!2{Rg)L9qq(Eu}rQ`Q=lyUgxwI zY8#APbiQC?Y9{j-WyBDC-}LV8|9IeIwA1fyl{33V74@G;biAtb-A)lQgJ_JOA(_?c z)Ja4Ama@!`u6OeGR#0X@`l5nDJMVfAbESL-%?8mYZ;>X3yCast)4H3dl-AfEJ_!>; z(#+;4!KWcH!|kx^B?G7}(9|f>^q{;zF~!8lEg?A7jaTFYEn>n3L%$6A4NHbIV2a=u zL>OEnux;M03qRH&2v`Dn{(9DTA5uCbOM~TV=HQst47Fs{kEEt4zDj;Rg z5M@LZW)|EtzjVF{%Sakj}A%7j9p99S2^)r#(k=YQwRWnW7NNy5E-=-ILsKZ$&j zi9-q&h#C#sGU$lu2}4nU@sBd*H`mn+iHfYY_ShUTWbmRSBW>%%&Di}bu4mW z-xtRv#rHAH3pJLK1Wki?Z3}-Y-eQD_-1&^ zCBJnOu$uV}kiFBXzeYQ@WcF12oleqi(UVopf`{5X{gG_u2Y1S^yHbtDrvHije>d2g zTY}7hGJxX8=2IjkvPtAdFb*jEN2I@)HEWhc8Y3rp zPO~f#CtwVs;hj0lcu%ADZQEi%O#^sYnqIm#=s^dRXWIUR?WY@Ac`eELe_YUg zq)lP_L34h`)*ea}-H9se^G?C*k_uAcySl4W1Q zfXtisl=}FT&a9@9Tf%bml>Y0sZWTkguC9m98%_h7`cx24p44%3heTE1sDN0%7Vg*j zFV|G|+&kqn&M?I>-}jzKGDiat_jr|#2tT^Id7T(WWBlkZu4DRxT=xJ&$q4bZX@P{gd1!Qcn-Bsr-9NYgqG z|6R5zLNvu(>jp?S;cU4oiTnLntni!(4r6;D|Mnfgc^M+55VB{XFFrxOiv0+6hgc&V zEj~;5P$h})lc7~FY=ULuNMJS#pM{%$7aF?Z?6Z_)5Cqku2o}82yzE@?o+QI+>R{oz zT|Z>Myd67$IuMHx4n(dk7eu@ltY74;R}Yf50s)VnH9?qS z2~q&F29?+k1=_ROf)##=^LD&bY0%FS43cBv2?#yug%3+c@0JV!d`U!qy@``om+yIx z&H>Owd;HslN`HN5dz#xJU-*-p99c`Fu=xB`ixn%C%Jb`NMWP0=ya@Qa?zYbr6=jSA z4`(Kx8HX6+@b+0VX96jA+CP1^BXIRnMA!1aH8YJ`nAI2h{N3@zWP}osv5sy;=l*O8 zLY?c$DV&m>uH=$5L;G_DW&HJwvy$0_`42K^ebya3&#;i^vQJ1F2C^=c8#<51R^KzP zXAU&6`{{e?>5qsiL4R6+c_X`$y@SguKey*LKJHuO`0RtDs5<_U(LMW{hByDhzth78 z<3w*aW_-vuU`^T>aehO_?E13x4eDlDC&%?K-2ZUo?Eb#b;?Ow-zs!ja3tqq8bh%3x zPhpo(E`A<)zY(~ow~r_JBET~0lBb}(vaW@+geZJ|K`19>QaOKlw0A37#&Pf2;P{ z%ab|B)g=m;;Rl>?N1yqIJU$8#g4NKAvbjP0C$8~#+?VHk_|D!+)Pjf)D60GmB>FME z!|&kokpw%s(gG+n*gm2$=X+0sAeEp-DLPz|>OGwkQYz!VWS{aFPAGgPEyEB_5ZL#V zzRmB0SqafWTZ>RnqL`&=eHSu{9#*2MLH32gxdmhtwQrx-ryaBqd|e%Xb#ucu>?>2O zXKojK)n>M(_tH(vF$z?JHFT$hN2C*08%u(y2l&xaA{}gDrt3*dP66qGK1Jo^Ls(Pw z)L$<6olBa{BWTMUq)y>TXRq#}lGp?5xhYQe!$2#~)CTUiW`H-gmX`kANCkxyQaR?k zYR#GnJw8ywnlU+b2eO$W<|6EXJ;dfIcH#%m7?7@rmXQwZp=^Tzot+7EWjx}`oZ@)M z0vIk^G4*1oZpvjh!*w$Q&)5j{v!&m9moHg zu|4YKQrS*~yAl8IvM;ZC3T?{XBw6zWuEJk5@fXGeTLMuDv+rym^a4>JPo6luS@m#@ z%?wB0v?gKTf&JxgbuXseARPO|Q8-lXg^NbKj-IfLrp5M5#ge6PAgU8lyfy9+k5EYV z5R5&%1Y~(VforK<*RLFz{mA!amwnKgBl2585i8d7i__Ua+EX?va4bY;kE(M{e|Znm zU(JBc*W-zA7*AvuDc9x-PkX)8UVas)T=Ru2 z#OFCZ;RwBu(1vcU|9XMG?tf!tTztf|@8Oh`s>5|Dg3&>F0L_$6F%^zEHQkDOBfcGO zqu(t6x$Lu;`=wf_U3QxicY+IR-M+W^ycZfM(xovcj935B8e*SzWQk}B5E~Lakoe!9 z_x5*0G=HOv*Ddj@+zSiyWEr9Uacas7kas()M(N!>DR zD#nf-OJNlazb^a5Fa)3$H$mKhPUA)X8Dk0jq9;8z`Eng5Xq-mFBh$;Ju+^%D=VlN8 zCkb&1On$n+$Z3qykofxn8)aHpb-D=q1b5ydj0U3K2drd2at`NnRmQ{J8F%M=t5R9+ zKH<$4zpX<9e{8cLs_phMaW=0U1~}D-JscDF!P#P)=V4cam2r)7?q8U`_ReUhv)|36yXNoq6xB2B;b4@w3$xCtgoJK$ zpABi-u3g%dE2_Ldg@UP4JdNTP6soO-8i5E)Xy@d&k1ryET>Qk$Kwm%3zNxj^uskcLwLrif zwCixPSQ>?moZrjFru?T*ecjAU=eE(0d+3?_rq>%QF}3pLtTH4UmClRMu4ZP-3V5z7 z<4WFg(%i0J{Vh==S#n;n)}2S^sIi8=ew63R+I=`R&&zI`*b><%5U1l-AlW^9MXTxuCe3fA`N_J6Wq~>*%=7Z}RQ`1Z2&=6g?4$0xg+qbo0{8@+-?s608h<>ydpV5w;JELIlsDw2YsA05;x7~bg zDx#S*il575n3txkOO}-od{^u?Fa*bg6H+X;*&ASR3Z`it>`oNRY9_&{Cc4yIQ3|Q) zMf<3$tMkk&gNJuDF|p<{0@7zw@~#&7h{HL|c)~KzFO))wI_G@{QbJ^JyK^N@nbh3e zh9gFdFnmR%!78>SG~2&_|FL7oQn@)0^%#b-UE&EA<^gDBT5r9UP0|LPwVUBWUNoL( zv6)<*Y9b+W+-1(6!GF4u1jHjoj#(qlM$2W(DuPIiOP{0rz z2eWC?q=}+K(oJ8Ud0Ki~W`?)44w2N1!9#}FAbNsF3S{uoRJ`R#l5|jXrfgL=k%^33 zeH^=l8OSmq`Ph!kuTsO|FvPct^K$z1=_sgcbFifYuawfND=R4AYh;9hnMqt;Wg%nLprXSm__#AXnIBKK!nI@x`s5=P?AT>n$8APQD}sswX?Kz zbZWAFi2taGPb{7Eik#E4#>Rd{u*_{?!x9t#yw%XGg)|Wg#n7QcJC94$pk@+KWyRm} zc*QwaQALhnfJAZ~2^Jwy9WI&hC+oS-3g~>evT4kEif!X^vVo3AjU6jLtrr*x^?My> zvg_A?>c=dq;mX^#wzjJktxXhq52M303NYj<`%@#0Zy8ZL<+2R6xCLeuk5Lu{lOIeUD} zakN~g9-N>PQDa+m=whC7O@5d0!q1GclI!Mi|L)z9vIZl!oTDX>+s5?Oix(ZxKzAAH z^)Pufp7%O>fuG=5R&k7XFf`nf*`Q^=4rJbS3DhW{b+(Rl>gRnf$g^qj!hUV`#SMsj zo_cdb&_UmLyOhFy0Y7-^Sy;40FGjoA;mp^Z&vY&YNtWH9u%jt`mnf4X=+1bag&!cHqOyuySLasi3T(nTpA4u)+ zoBYLmOk+h8U0t%o>ng|wtIHjS2+i6ktdLfzgVwHugoMnqF~|GGCZBqpnAi){HC=Fr zFzy}pLVR9(yrJf>BB$pE7WG@#C*$Y#n>B{H%JX=UiuU>QXTy(X{q2XpOF8NZ=&*Um zjvyif?B5|B6Cu08KPJY2BX1QDoFoI_5VGRAR7kXk&|gepIkZL~j)Uj-b0Z$7Cs(+Z z0#G3XXl3)BJx1s80LXF+Zk-6mkS6eY2x2yegsdP{Cv-{n-V6f$HnZA?;RngGmFHec zNaHx;4jt4u4H6fX>eD(N=Pqx&@s&4y-LX@rTs*?ouU~Ic+d^|#!rSCcz2!`rIwae! z3l{{DS1V3JLSw9vnxS!XQSII-52F-+HwhswxBVbM9>h=^X*d*$WtE?&JxE@xWv$jB z)PA~$ht=4z&9L!b)F12EooWm)TyJPsks8_<3!Z=3V-g; z$)yFCS@;vEq0GY~RxIx?A7(v${=AUF)U|u}fIWL8SS|Cx^2!sa99QsM>v!&4Z`G<* zA`n8tXn1Ni?I4x?sZVeE(^VWNBgc&6s9A;NnBIOPz85t;67Mz<`ZV(X5)Dn)g)1Ff z>*+_;kqbG6`pG9g2uLBq#ZZ$?TEKE9S;a42$g(?mVF?L6c(ylET_yoXEBx_9^Tek9 z@=FumhCIw-_u>jkUg5D5Csu}s>tg;|&&e6<23VxssF8FSH1}6P17cSE!lAVqPINLJ z$FAMFiC*=>=V#5d8#f-%ZtEnvNb$AsUPJ*$AMfg!k6ccT6(kZjG|AJmj-)u4`<}9X zNotrjii;KFjEKAJ4{+q_;SEhOiPsH>>W^SsyG~71hZSklzN`(3Yi6+`P~LZ(i|eAQ zzUI${wrsmG=wQ2pb^*7O?c5TpbgYwUuVt|;Q6W|w89ec&mnftI& zqdJtmdnXXpC5%P`*(FM@&7ot*k88pgXAUFAlHUXg!;2ts}>b86#`m_51gqK*otRr=n;N^e!z|*-SYOj*8Of9;wg! zHsqNP)UXDUazyt&kk4x!Ox7e1kB(+u4?8E+RpihugmCuqgapRK#31ZxNPbY*qEBxl z5jScx)GwKQ4sP3)lQW1KwQl7~Wt<#F$lyC(K0*PzP*KuJUN*d(KJ6hL`XLSLAo6xf zX|dPx-QLfCZjp0#{*2IzBmMmR;FNY#z8W=XWuXmutwB_Ya}ofG{9pA|zqe>38d!KTB<83vqdHPW$hughs%|$s? zRirieQ;mlD&JB9RRuvh%F6mIH?vO(>7^nBg)tp(~90s{5_F9&;;tzuk`gE=C9DS9R zDqp)*TDz@{&CN~K?OjgY0-&S`RH*zF3tF4)Qfv{rivqYhxPZ|9tuNj+&ao45qC7vo7=1 zG-GRe!gAG5qm`PBn9rc+wPQuH`-@?mCEt6+zc!`Al770PQg>O+oj)tquT~mgS?j?~ z)hja<+uA7Z+NG|IH^{2Eq0+8uRN2v)y|R58&FT0gO}c-1~N zujagj75B_nJgxWGW#+XL){4fnRTkIu4^=++#_7b*^iVl~dFYYr_UY4yBabxYfqAp$ z=uoh}e_)PNV?Mp{8gTx^Eo(*6XMnV?DUgMChN{hyd z9B+dxv@Pj*dGB4lY!gyz{C*t`2QJNAzPx|u8e4Le`rg`7e4&efn_DZM=2@x?YCUoO zwWU1tSVAs*7?r2VEm{2)r=9%q_>p zPubz}KPvs>PBsksX3W|Bc2}+P>Gg)ZC~MxyHSNO0e|jt5cO{P(WtW!t zUjaJ`*iD8sUj6jUdn>WL6=IGA}RDAH@!4gpo+ol;0 zv9U2;Pw9%S*{M^fVy^)M(oJqztKrvCR-ns^F=^4F1q}~-V3imf#e0f*Q;Bb7$C@UR zSA{>F7nA#iJsb3*N{WBcef*;T%_`^a2xDHY~H$i97iSKe6HHx;S z9{gKe@R`6M&)`3s!K2GiY%_^AVv;6U!?oism?}O~y{*?IZVxr#K*PG#u}uVCUX}^)5rL+1|>Oocf`@7o9Ig(W3<@1Ju5Rajd5bqlqo92 zo^r$LwQoO2W?<=&Hnudc$bAKfR3xWHo-EmVOK?gWPdmanxicaTAKt*Ji{U_pQCl7+ z5OK|h4V4uVmHYSA;lZ`GQTAFK>>#-40UDw+8+C3$mwNT;MVHZ7PoW|uE_~CHYb?`$ z>>hsQ1e;kCNqmjB0{cG zR(sw4%le^^)6~)m0GcfXrQFfWJ1WxzL?QR^02>>Z&!2Ym(2w(h6EZ}Wd~-zj0^YlZ z_0H2LPo`5|vTg$ezQ294fHuL3&dAMhgnm>8%E2~txDYSQMKu*b5l-&m(7&no?&&JJ z0uwL>I~g`JoxZtzA&=H^tc21zjd4?3L8osuVL~gaC9Y#D)W!&8GB0h}zFqb6luf}7 zc6K2J%Kp4LDP0=Yt^4Bok_393kd8aT!<7^lzLq&i05;>wiv2%c>)j%0+(xc! z<5>`qzN&k^_ZGEzdD*vavDGtOEA05`)7vY}z)%#33=}ZH%1Q~~GP;O~bi>!L zOG8I7#Y~z!xoq2=*7{Ll7rVbGIq0SO_Lm$l=|LQs90itRFyJ<=8uzkg-v#vq8s%om zL7+X|(-VRB*rK?|=U9C=Fkb>(r_;SFWC6iM%l8|&@%yhnOhkZc@#~Lw!-K}CvyXu=ab9u5;NW0+*afl5;$=@}-1r2*>|Isq zM;3*&$z+9ZHu54vbPyIW{`*~8sitPmt#02hiXEY=flNJYH&>2vUdxO~l`x-so-<@0 zR1S^lO4?jWk7g=8@6wt1`!d?IXb&wdEqQ2gw~D!iSqdJRMEYQibe6F$$_g1!diCy2 zx=IVM;e)L3Mc8SWhnFxI4v_KsyvtG=ys1qK1|E%!vJd;Cr(AEMlB}T_Qc?J3(fhW3 zvzKhLoIUmHSDjC0v0o$HtcMNL6!RPxj)!cYbIJA0Ol#S;z*2@$qei*d@3yfCjiD<} zt~I_zi@qu@Q)@lZ?OOw4Q8s2H2N~Ae+k8NA{fn68)Epge+Q)Wh$VXDlY37tvYa{<( z<^fJF^w_SRQp^EDLdUe%)C|4s@E|sq@HJF+t_HmwNppvldh*+E^~g@U9p_q?$MGWb zeGI;`OWD2t2@iO55ukNf@`?p?{EmQMy>=}mbN5d_ zP5kl4{U-Wds^9Lgq3Jcw@2p#|o(+5jlhRs7BWUdG27B*m3Lm|G} zUSe?;?3+thb;yR0agFfQ}e6%jDahFL&^s_ffS_;Vgi z0?&882cQz2f?sqKBRX46v~tU=)voXA*`T7L0^U7f)20T}?>&D$vfXxShDn=t`=g@= zdn={#&V)r))7!jv?r8JorBmNJ#Dws+)=K z?yC@iwL}}&q|dw#+%t6C0iYc!9g?n(<

@8A$1g4~-nFaKVR7n`@>ql0WHnv9U@$;+$_m`=>o{V>zb`jh<;Rq&i@zj3)$~CBFWrB6 zdHEdnliS$ZPR7v?dS_CvK79nMD7sJ<%qHe0aBUkf#Q;CEFr@|gdjPvmohd>fFqU`? zOy95t;UWcin2SsKrT3jXcaDgUUkA|W;ReXbPj_h4f(@fs3O9Ij(*C}79K(P_;@9KXwx69Yx3S>lqei9Dh*AGtIzGaw z%MGQ%OQ6_$Oz4IH6$u1bVxV=TC8C}=U^yP=5tQs>N`X0D4Oc?2GU$D~&|0dh0(6OA zQ0~jT_yay!9Lx=*>w-87`9pq$9SWmHxW72QQ0jUx()nzXD221N} zqqMQ&o|i-fQkjoBpT3-#DeFeiTU!&=6ufSokU1z5rybeF2}Zr-V~9vhY=UNSWRl6_ z#cTwZR>HmY8Hy2iK`Rl^AeC}*aS5bRt?t5XhY#1HMZ5%QvXL^v?pGa(`T%qp^8(Fz z=!j%>K1V3Qu=j{8QUnEULRL8*@oq~$y=`xlPueT`7j=30{Gn_8%Ki@Q%%jq-0plgF zMTx7H&a>@}&GJE_w+iT!4YCdYEnCe%h=o-aeh5M)v{QptpI*%c zO2VmA951R|B#%9zp*o7kWQ*n|BQ>>Oz6+X<6I2=@#YEoP4%R0K+l=HZw(SSTx#?sb z&YL&S?cN7cN;QGAC2S3x2TldJ`Q!EXNJiFEPf*CzMJ|>&=ZP}^{=M^Y)H;nIVK(m{ zI%LQkx(klH2)Y6x5xu>wSiCLIQ;c(Mg(M=wPSzlAfS>}&2p)BIUaX5F-$LBcq^VQO zol7|uhutt^0EaHC*R*L<$Tx~e-($!XK*8ySCLv#xI2yXSh<*D4fS<>mombmhjh!2f zdC^68&f$&c5SvI^7|-a+BS(zLN{l~oosz})B{D1OMH`Y%Ew6P6;8P0-3DGQn|GpbW zaY#GYqRtD^HA#d9kULs41v7b~@B28HN{)3BR~rDS$~k~88#uI%UJaojxJw0vrn{Ao zm7Lv5>ye`x4$E6osJeDZztMLnIN>9GV_mpSR+W4KRmh1Q-d+Y(i4JBp(=oCE8YtL|S5m7|uHgn&5y_|e(&X)U_z|cu+K-<&hj9dTyDJJ_uEM`P(m&uk+L@o!s zjAGk^H5cH2U|tK>zPwiCL#pJF2PgQHe@S|?@P#j<92 z^7_7)_ww@xP)KOz&5`j|c zZpb^y730{F1qb9DZ~Gx4`9)&?O-xb&k&!?``Yxxt85=i1foeRyzvlU6L$W(b05pD) z{%o!mX`f6!)1ulB+2KzGknvd7=L$A%@P>w!pJ~JNS&AI+F@4Q1B&APa3?yOg3#zr&@onRy6Fp(=45upHy ziOoL!kfG^;{d5^Iffu`U@RH5X5vM zKKTp+IFSZ`tth0f!#;4a)d2);IXjLM5T-B~)Pt(Ko_Y*{A4S0whx&wK^J0$glDO}0 znf_ncI)(!$7+Gw5R_(Uh75P<^3tIzuW%}a{noB`OK-*;6<~d*T>VbI4%hANa1{u%Q$NSIPu>2>2Jt|LAn;Fg(`E?b{n2I&|o9 zadE84>0$1I4~{uKb1f|-FCvoH+Y23K%!OK-=PjB>nH)J98_6so+&f*2-R|`GZGVh! zLJ5qlMCj&{^r>&{ zB05$=%vt~|ZXiAaVzB;;y!bHFCcgm?@jTS!p)UovAFy~vUoyrNcCGs z@DfA!z@c&dSh?cAX3e0aedjLJ;c_`P?@AJm+j+s^Xjy=xWH8UFcl$oUKvLL2d)-w% z$6r|5n{d@lH!Z%@qevqDutvwdXHlz)X}Wt|zX-y6&=+hs&g>tXyt%8##k!D9)SQ7P zjR|ip%@-u&sHS;$|E9?sq1)pQ|H-Up#W~A~6i6?ClCzo2kaOk;4T%NQ zz3SY#bGd6E{R_Z^9UrU~{D+AEwTgdiY)6Q=Smz65#h~=41zSNHvlRV&#|W2yt^XOMx|_Pl zjL-GW;H;Q5Yu5PL=b4EFGpE7j9$7<~dCZ*PJX)3)GA5@%7L((@T%-dE`_~x=xLPx9 ztYCfvcGIA}b4^Msq$`q9KyY`jhy9i!sJoh(xh3-q>J-X9=v5mIVLmtKd(!lQCX3Te zNOTGYJWf@eGrF2NC-LdUH*a0j{>mf9ww0Ov^a3+4-WBB#Gp-QpHa3B#YRwgxj+^r) z7QKC}B-M~}8BoryNW#S#kfW!~tYTC-IR4y+_+R=()gev4CDph62IrflY%`SbGgncmzSsZ^TNDqGwj2ZUx zecss!_i!X^*6p@R$)HyPQMQ>lu{GM4%)+Ti0Rl)+YsB#+5V%v>(qE8uD=Qeby0ULp zltrkwRGmzsjw7a5Oyml1fmQhc;cl?Cx17>>7?`q}C+bJ_Y^F6YDx+UTuf0rA0BY&??u~ZWrSCw+T#G(b zK6I8*hzx*Ru8?R?cC{L@*C(dP7+6=>fW*XuLV1w3awNaykLF3vR&F$Ea| zD}Yt!dH5k}L9He!%M2hX*{K1nA=32nD-Cni}T{&6`>6r^TDI=g+_=kylUPE>TXwr z3xEgG_zPG%q}|rcFXhE;)Ki^(Rb|I5_v{L;BTw7`p9voc2KxKcx(CpZk^)FwB9|FV ze%ie|(y2pNdS#!N-OS9c0McwkJtCtxy*Lo!q3qK?>@=;-p(Mf)1d5!~qa)uNda{TJ zCxZdMs1`U3y;mVHWN1Z8E^>L9m^{!#rXdxCgb{xj6p}iUKI(fd@_LkgWV#Vv69J$IH0O;zCpS}6RCLI%VO_?qrs1*MWb2pwHRr7VmIDUpiUoud zj)gHnb(M8w!whbkIo~Sgd(>+dwKD=-w>kO~z6Pr^G?~vm^i#0!yyh+7_%hWt1-YsG zu2_v$#RnZoPaX-e$q-1Rd8_W-_lLaa1r0VYt#`I*Md!kkp36N{1+FyTi;8dxh^v!f zsF+YBco3J_g*kq_YJvGh5DJ1I;xj^t7Lf;Q?b}s_|jGFsT%42g+_G2ihQ>#`j zphv*^Nt`~0-2_XZ8>Tf#Mau4y$@ZM~MEA7f{N^*g42}_ff;oaHPAw2}j&v#1En9qa z0ybnFhd-?>(e?k@Igxz=4TXk= z+Ao1|7LopH`V5e-u1JI6G3<+IBtQLj@@6(8Ca+cGO(bxH3G^&dPT9lEYvI=B(cbQ| z?Bnb4^_rThK~BJjUB=}K!di8E76Pu4LA$+TeH+c+xcf|iW=J2NVt|N$1gaJGeeQ4` zu{fp~ZtryP%{3(Cu{u~2JXkFA%kcFmQswX_(5p3-ibr@<@}h3cTi&RuhU~Ry!Q5g4 zIY@!YMh{o~jg;_+$Vl<2%*;QnPAhsD4?=B)#GK3HNo9Bp_)+~XNyhA$(*yRBrUb*! zS~lvbQ|m$tI=)9!D)U+5f>%x1@2y63kj&VkuO-E!8-YsgPY)#xjQ!{QhqmDC6kl6p z0WoE`GE%d%HGnAvYrwR!o00nF<6I2`U*`b?2DgZS6{Xp6@(MdzyC@TmgT+0NqkKs~& zIWmP(mVHmcwx;lhfsUXFKve6`suQLsf1c0KSzob`OhiEk|HkCT(4#^ZW5!h$&=?ox zv$d~IqO2}|^Qaa2wv`(<)(0EK8KW1vYuBzqCgauo5hH>;p&VZdOIp{rAM(8h@D4Z! zcbIxOhyNnpE!GJr?%%&J%M~~hTr)G)>#8ukfo4tt@N@o5;%xx8z$B!PNSy;=!;!m= zc=oskbBIN9i}j2ejkL;pj$=U}5HZSjr`ezPy@MrG=cT_OD&n0`qp=lAWH7WZ5H^wtg$5lllcrUyi~UjFPOPl< zf;V3A9TR493m^?WbQV8hOQo5qY3J?;-qaPs6*GKJnmJQ8IL7)BfVKCRWt;BQ+}3<1 zeX+6v)sST3a8oeDej?~O^U{yAW*t5;+oQ9iq4D9qMn?5H8M+cJaX$uDk|cv=lMivO z6)fu8#H&^tecu*4B2u0(V+t3L8*W~$uO0k9ps(G%@wwb>(8V%fL5*y_-`>pZ!>W?H zb?U4HjuE8*1)M;M&#R^^`H9A$bC)iz_YK;#**9dRBjjbknSjy-UG<~xuBctn?B_oL zn`3;+&$`g6%e_v!Es!$Pz$m6jPA+AI#N>Iudp1-}4{55<$RgWM@~J6hmqri4wOoZ+ zud4RE#j+V-eBqNaSRhAgDuAje1r(`Nm*a~{Cj+intF0g&@+(k9qTd%YP?J(xP-Y7& zN5=*nouSLte#8@W3_gz*O%7XKU0vLAF01aK@DCi)E?}IIaO}qxl@{2qi|4)(oiZH< z=7E}eH2@nWXU%>|1ATqNi}@G1mYO*)nehakhoIbte3nC5pmtIf!;8Rij{$@7y3+37 zZwla&f!nexsH(z}4wn{o;>?-rc^8vEy?r8^CXkYfa6^c8=5MH*?F(EKf@?FJ)0J6~ zHzL^_#aEXQ+;)r#Nc^ud_zz2Vuk?JhGyrUmqegf~l1pG(GpKDj#Oc}3+J)Y05>LpE z{>v?f3>_+qe~AqrG5uE{)5%m7Zej#~gO0kLH|%0WNkgeni@GXgAoK^u%n6@n_NVpG z^{Q3)YNCzJ(G9H=h_TjF1l6+)y89j&%)ZSxdMcOs9m<>UxnEUB4~VIaA_z;$_R)89 za)$A7+uh;?X#L?4z+}?;;o|4JOU%}emxDJH3`I;c#%uATxe$3x9ac4IPZ|9&_KSJTTZILB_^<# zVm=GCRo261f3sFY`P>$?V8rZu{J%C7HkW5Tre|BS^xcXd&TMoUa%~{lsz_of`=KAp z-oMwGu>4zsX$u`4gO?Fh80c1Xk_A6*jt(<-*yk{f0!zz?_k+6BK!z4n<8r%B+abEp%@hXqYZN(o7NBtgu(MSG?&h@|X zTMq_H3&-7c^wl?!g>Z7>m30}jo5@{B3wwL}m^?e1^8L9-N))`VpxbH3yZeWD7u_k& zkM?=F#O~i{+!*06bJ(p0Fi5*}sRqv=M4c@^fA8qOiX4_MMZ9y=WT}2s(@oRFau)PZ zgDK?_lG>PJrdCEH@?2XAoeV>Z@=2`5UpWAEj)=@b7Wjb0PZ)?;&P(Kkd>wV-JK2jw z)Q%cDB96K5jwhCMNlGNoBqwp)wxO7SfUbp+S-3ERI3WE{%*z8}T_)4TQfSu55jiBh z>{2g8L$Gu;X_Y)4;ujQ+7*7WwIxU!@u-y!z2^I?7F3a8BeG(R9nb~k|2}!hdYn#x% z$XKW##N7H7f8f5``nz^!6$Fpi(71E;K?OLSR{`m`Tp%$YQ?d?SkpG^lH$`)nb??bb=L`gWSIj zLMU%#6c&FSZP<##hYy?j@#-1$01RnXki)0|dB}-RO;Vs`f)gl&wzQ$V2s1Ok$fq}^ zIk&SnxK(hG$Sva*0?hKFxEMh4$=WOoR}RYsY3bC-VJA-gqsTs#R#_`SCSx=U&B^5_ z)KgdAn^?II#-Bw-u_iYmd_k6SPr>-YWlA6|8mR$lE4c3>f%s{2a(&>bZLt;DcScBw zi(;|;3=J^0--Yi3e{UXqCM+IE{-CJAYJ_Z8uz zdw&1^b|hZ~0dF^ltQv&tfhT4)ci42#?Z4Sy$;c2K1w3}~lTUoq-$nhM68_|fec1i( zOQc*Z=}?Dj#qm^tt&Wj6&*8Gkg5O3-S>I{at=mO9Xc^rYO>m#E$+{+WL$;!WUML=8 zgI|Fl8vKl_wu0GcB557Y=6nltR$xd-j>F|al%QK0#=g6?UC+`Bao^x*-KKj9KjH0G z-+cu6OKSMEDa<_l;EY|-osV0nX4%N{if3D?ci6@ zy$A0x=-`#?+}t#Pcf*7qz(U||?GJASKSsnQw;Nx#6*sFbtxBo+8~i<&d9>BkboCmN zotJaGd+NFeDj(N>d4Qbo*vxw-pb~|b3w(cw`Iz(#ri+vG(6(y9F~UF$kfDRl;ZEYq zbs>a-TR7k=>fyMy&#}ohLF>t5CWt$vawICMUkDyqRkH2c)FxP_aqCpYrM2TEgHdQW zB)d3Nl2h4sVmK6;G5OxLkkIfijDcOG%t-ne;BWk#^VmZ!a^#{?`C|AtKVZg=Kf?z8 z50dRt+@p2hdt)%KlrVA7r{b#bq{%Utr_L9B8V+@(u^5(fr5`7d@+xdCBs&6*U*t+dr^ceh?S?}#7ci7LE zlnv|GZw?JL7>bGME+C)3b)>&@tPQ7y0Q&Tb^?}OTANk)QvEswZbJ#E>o7}axJ$Nr4#jpIXlWS|yd znyEk=m^*~rr*QLQFQR#eSZ#Ya451w1gW6~!P!bOy&3E8~aAEL_RA^XY$AZX)G-7NeQfz^>3Vua-TCv-5Y zem?c*yR^~z7sX>Pw#s{1#6|qwO72`5TxA-PbOcN>WxOki3;2X+{q6FqkIU5>G{}IF z-2`hSD1|K<=mSav?P)>SI0p~Hru*>n!FeHXKk&R)L{ zx?naSIvcU{kbbQr&nzbrXGQzN-AP{*^D{7c8t`>t{!{PnNR zm}4CfR9vX^ivZtD{L0xT;5SpfHN+*- zp!l-HqiC2WF+%}utu*zbRR$DugBjM-?$f%YKXeQRo#AX}YDp+oP1_tq35JS4=p_I-X)o=GyR( z%o<7x?#)RMFCaDBx4h5 zx}=|8wX*0yWTcY9uuVY9%-u2X>|ZV|Fa6O=pIJJzleS^Nkah!b%f#HV12H*(b|v=_ z6&SZiKv-CqDNS`9#T+1DkM?npR?bO4$xOLxxO8^!XdGjnCZDR0!928k0SH{;l8;9I zTL$@gdU&v1`plLy5DYK!;*Qs@SO=7`ZJw%mmoDmD*@J*73{k#b4!yZ2Ti=d zNP+)OXC`y?LOtm6$;xnX*XCZwMQmxPP_?qK|@<@2BCko+%yYsuh9g{i6AHwSe9 z`BdI~T>2utlIBZTWV6lQ-F1x>wC%RLRU^91e?4Q*z}-`(^|!Mr3I5wClpl2T11$kf zz-EVvSotgx={ znH5!dTI4wAUKW8P2ZiTlA|H5i+Ws*p0H5|Q@$H<}wA2$J91V-aq0ihn$=y9Bw!8z-kRZBX=OhUI>gkItMAmmttrZP>7ZQFR^glah*R5WIkRwyO=@t@;}QcwVNn45V8{`LKm=f~^i> zG9(HE;39w_qaig;3spMjsurKsi>#}4?qLlMSce*L=Wdkj1R7nCq+dDmES&2AeXW|i?8*;mh}TQq7oBFk()P; zI8{4Zydd5Xwubg3CK&xS1?1zx%|UJ(%lqyL8I~8BJ$I33Ri|GZPwZ{0KYt{v1lqmu zj5cgo&IhTc(6_L-Fr*(C&VNHtO8y+66T{to6{XU5<<4WNyl4?)igR1+YR^(qFz zGn0EAaCwZo8@JOXOfXsFEjo7|!ofdi=Lf(`B;m29el(bX=$(ZW3FbU=xCJiT2pvDE zE|ryEytw@hnOLLCKvIM0Q|js^7FxNudgj(o+x67roADRB4_j@bD+P#jc<^Y>Uwje+ zl8QF{oC44;qzgdCjrj5B%w|L2D1=!2?KiQh*Aj@2s*Un`H9HS*76<8c8j;G02WVk4 zZ{8^vajJlnM|*G;!73CAl0`&!!9!cD!<7ytJzN2I#(=TLIx-ceo#5lB6Y3m=xsR{| zxbHuC@}!c2WQG-_qQKfP$Ll3*ijRM(mH#H^tRfZOk6uFIkIqc=UNlzlyRBHe7yb(~ zi0hp$8;b0TV&=0NK%_Ec966tpzK)^FaXs6)_q3{h|29XpV5@$DTjD8meSOb&+x}{@hy}B&25xBlKutOpQ1`34~1gKacQdZ~u zX@6?{wSFlgSoB*Y-5_MuUFz^xvC`BJH!n@_&H2>5Oc|T=&bh_ER{ET#`&}+{ZiE7( zLrFqX@%cRCv%vxTx3??9{0uZh?u#_!JM=_v6RRXS7e~k2syf}+^p(rw+V{8R=skl= z>q#0LU;o|R&-ec9-(5FUm_82Wzdt;HNf4ud6J>KtU`akW3_ zj6o=kK&TEq7*$G!IF9yVqn{B(B%80X@d+u3MMbZ7@1$V;Wj#pL+8_;?Q5gRti3mJP zDK3@mPFXE(WHgS6OC5)>)y|LmPf1P!7)E57FTtSEWj$kBAcOAc_x5Dv&4VX*o_2hD z=oQ;6JHHz(+1H*c{Z{Whz1KP96)Q$Nz5C4gV5$0mW;~UchGIpc<5R`-ME=NtOhI)$lw5@@G-BFxrcJ5xzeY4|- z->P_g;M?9|(|XL>`*?J|iUJI0#OFh3QYxCZIh6SQBT(`OeK@$(g6!6~sGz-v_P^?D zK4Xc0Noi8Dsg>&y2J6YRWV!Q@HzE(xfv7l`cF@gOK}gh)oge^>?%cT}5mULz5Ll5& z?D+9xYHD7vKzg8~xP-}Im4-q7Va~2)pNkrMgfm#yRum9zx3*x@cgm?w`WIc>t5HT{o-TRAb`45&o^JV-+RCaL}3UJ>~r?RYx5alV@rq|5Vx6YQugV?w)-$b_%0>(M5OKnce!f1t;*vqWt=(S zGXKjsx4;FiwchTBRkFh6jTyn^+1SyepOBE?s_mM04kMakTfR|@dyiq%gmzd==nXEy z=3Cl{yu~`!C9d;Wt^5K=dXYhLpDGa6LX?XG9TA|{kYGkt_; zK{xc6OV2Do!secX`(GW{cE(&GvINEt(OUvSrGuCRL+(j(fNPUkpFQtixV80kt%GiR zv$7Zrqiw}fidxlk?#+qr;9)I+v6&rKz*34bQDSUd*SsGCj93>eAcHZC%!~R)7+-yK_bv6t{Z7~KHYS%|ROFS+U96jY>3nebu3ZBh z9OADB1=H3YJAL{{)WJBn;?pU1uNq?VKp6g*43C4Z@!L%wA=N-&g1~;{=nL&mufn+| zPGfG*DI!?Sj!+^fU{}vLb#7#DFdI?!(dmc>oqMPhz``26GtH^jkg@%KV2ou*ggWrs zcR{PXsIKp(&S@H#5Q`3?7Bk<96Z#P3zV;7ts9mTKcIgT#jodc?tDe zv}h4}S~@hIgVBZe3%qGnfmN69yDuM|n1V$yJrZAyC~At|^o;4!G7PT*;AQ%iE6+Am z{2-O=%i}>+H(jQArCD6$LgTxO+rHOzXujj(J0G^SCp|=QyX@WKaYOSL8veSc%;mt% zq9m8n*7Ms2%5xgXxS>|Nwk8I7Z4?>W_A$-?bqA!1M-e#ee>zC~!#vp7d;BMXW@bZCt$;a3gZ!_Sguj9K7X#zt<1sl{ zxvp`u{r)*Y=>&td1#_AV5<*?%+ft*V5URR1Up%_UtVhKj`NMZxgoRzMF@7jUiE${Y z)kqo4L6XVo6&qK-IJB?N(;$V@vmWfEAYX2Zt1$M|ANh=VNX~Pi?ekQ>)!BAeRdEUE zF4|qt14OTZP|j%av}v|4U?Ye-8!=%86uV^K$@~TgFCv(j`nEwm_o?rBT3X}VP>*$> zm4Y|RH!xh-;7me&ddQ`6|0$L8zCB_ZEW1;sJD+pA_MX$`6v-&gJ~&Qm%%5uf+-`U! z63jce$Ss~=PH3q%NSr1^mNgW$jH%$vqoGq>W<4%C=<($T$NW7dHOaLtY9!>n6g+)Y@CRr0jMKx7a zL|P_xFZ>cp3h=D@gwjBcWpGNZl9=8@ONeNSw z#B25K@k=eOkUCa2UZ-edA{nhE#6%cKVKk7FcI0Wh#OPKHBpsi!<`c03mtiCW0lwt< zv{pmwW*{-CW6sR(f|$Wrv>wNCLVFinstpO_v_NhaVg8j(~*q@wJv{e2GyN2cXF#CHk$}hDqp>B{d#SM4W&NZq;ccMr6^Y8 zu76>~v_Uvay|2kVcj_A*oan zS1LULbg`oPmG$W}#`piD?#;t`-rs%y50-HmGG;0=gffMUm3hio8VzO2l*kY&Da$-Y z8e}L+C_|(oO6I6YC6S2IAe0J~RKLgFT6^zv&i?N6yUum4^BeXb-)k>xsn6&Ae!cGD z`F!5b`ypFsxMB)LojTj}_aH%@Xf+$jJF&mDMKzq&gXrq2edsQ5Z5Af#E|rUBcj!Yu z(;ZIzo9p6j-CTS|P^I1j#8X0%L>!XocUl=aH{!$V;f1T1i<^G|6$_f~EfGI%MnuTy zlST;x*iBjF+bD+ExszWt$I9Z( z39x{I z{**y~h69+^`GY&|k#0z~_1MQ#@-Rpla<`>H%FRthexTq-m^f4-kVw{F2nKCfPV(7) z{rYw8dTVucb&2x}DXI+ZxyUjl%Im)eWj<(ml5~HQvf=w$ATMac6_cOA4tk^P%33vQ zMEgcN#?)w982`TxO??nXxg~1VHe_kjcBj;b4mD%)s{wvaq<=N$)bQWb&(_9=AyM;= z-qGEv=eWMcj=KS3YX0y4Tu7~n)c;>b0um;3uEE7Ghl)a;o^>nY1Fy@Pi1u+iC}T>( zLa)E}%$vGc1$da`!WLs|SBI8TT*HSCznPS8GQ^|&Er)gwoDf6E<|zvz4>7MN|Grzd zGfDXum zyA$OtQ;-Ppap_@t0iH}EpWu)|T8sO#z(;f}XGwKe={#P@-G6j^oY_0f)eXI;Q z;Er-B|M~fPw_qt1po1nhorCw#>cO&~4lH-rk{^YpfzO3SP^4#RS5j8c@}sAY%tl!P zQsELFoajX94qKb!8`W@Mas1QaQ2^ZrFNRakI zFnQROFegtpkEO{dMmmWY$iO#56tOICKKzHwpW(EOL4)jS91!>Rwo?Z(j0>7aVvPJn z!injPlbGxFXB#!`BSlIxYV5YU4ucS9$3D;lixArwAZ2POJqp5eA@NkFd&yq-ho1d%1Oq4JhJr*H?n}jS9ILs!i6-4icJ@pY23;whh7Xo)#*A=NSAF%fre3*ti{pYf4v zC(9gN;Q`1|sDs{r>ZFj~_cRQ$Od{1t9dw!}UicPS*<2y&c$8y|SR-TaTRJKWojf)% zw#W_z=W5b^&>iaaIH$sxn5v$uExYrrsVT&Hh#(JE)fnPVR)<*q$^`uQqh37()ttM9 zw4wBH{Wi`KM+PxW0Z5RijhzaG)%5{!^KkJLKxgaLTJof@6+BJ&Odsz1U5_k} z8Didk$g}yFQ=DV5QkN0~J*O<-05qAvc7WT|XUbcuNox1-O#~LY!+?cUk#KaG&eavZ zq4B#qZP`ubB?1h_UhXn{o=AEbV21&tKZu#cCn#Pj|0^VKRQ4UwnkEH2z}{5OCGFxK zRD2>vk?AV_wq~n!;6-3;J9*msI}U3hTs{}(3Kxx`;GnsWext6si~L%|A()#GxppEa zVS;ShY<1!*FOL$i1w4)PXaZ958$s@`#yVnTCu7)oudnqGfd)Nh7K`V0utSbu;}`M; zBu-{xXRx6GE<=tICE0c=h2`FemBjQ-PB^qhUDQE=>#B&O46p<-8Z?D}5_scR2i$w4}$R@27J^YM|17TUXdyg2D=)dw+f9dvd zH!7pex23kHfA0#84g_;wHm1<);6)2nxn&f(h;LLDECrnt@Qeh{pDzHzlaY5Z8WcAS znFm}rj6FlbTZ$9~6i?>D&_C~E@=Z#&B!zZDiBr!XL=2Ko9hMMyR#oFSFV>C{fMj_D zpG*qy*-QBpN@K)qsr9ek;$py_xA~k-nb_L#5;yGD8cCgMb$3d~m0}-9X5k%|S~7g2 z1y}aMM3l%JM(m4?AMVwaYL+L=teyrDNGK%!jw+aJec6DdxI-=W z9EyZ_Y5llMf4lFs-djPR{*NV+`1k>Se*h4YY=#&$IArDcpAi7mO~75nC?=vjhbyCt)>8gT__VN(=J6z;RUGmy4WN2n3WX+os6XA{HV zl|4(gC?0SI&wL^*G*rk#)`*Lu$HQS`>Mf|c>MQ-ifUQS!BFHw>XbP5*2>C%s#RHMK*mif z*{-yZgdpbw3x_Es`dJR_g!A9dC*>q=9hATl4TTc_l!UfrGb53GXd=2lt-%*Mvx`zB zii4_=NHx^pHto zDpzq3nL%qwKGi7c^fguUWgBPexq{;U=YIgxwLxOvfYQfPShLtj!P(Re!#$Efj(m0M^?RZ<=+IwTjO$u;s9@? zeor00_Y_NFvY%8Xoc2umP>mn$I&`=&_pKNS63{;ozx%X|Lr-A*>>cDrd&LFu;>p1o|d?WxF^PxaKGN*QaPZ^GO_is0}o<^nOL?Z2G1k`ip6)Vv?O+Vd?2E zvik+z4PtxzF=dKoUO?dHpRFay5W04i`n5Fha4>N;(gzd<5p+9iXJ;pTJ`|QRO{PR# z7Tkaljha23@|iO<7E26hs9*QicwV1Tylg+%&Ne#O^%GQjesZp{w)-K9SIKw`%Nj=e zxVim>{8h*fWEPHv;TYsUf!w@SOe!BSC$U9>^M#^XPBY_t`=j7B=XgtX&aflqt;AgFwYrT&n z(gMRGI-nTy3&Bg}htb)>x3_jtQ~aB|T4|Jief52~ZXj%lU2oxJuKsh4pTV*!4SM>oi(kMRhnKXV?K(WrAd(!Gosf=E$vmQ>YjR# ztTYszJq6o*K*!U+^ydNA@cSKd)&i72U?+`u#$$jp^H|}RFLRfeo0_H$D3SJ`M`(WR zHe>7J41=y_@~;ASjmo(`r4=)qBa#)CNzw*=2sY3Q0i&b}1Ss;kaG6YeiWo%2V&JVZ z5PaoyTERqyTQ*G?_aH_~mm?PwDJXCo>XCu2*bPL#HYxU&_d%hHRFq+kZ z!#km49}-{ri3^K}f5ye&&}T6tQCk$ zRQ=A`C%wKGQ{T+~g@5TQJ$;=4Wpl^7`+TZo_SxsC?aN@x>WKV{jT6#t^A71 zFUY#@b8~lLh*fZ)owo08izYly#iq>nM?k5Q2Dk=Yyj^9lQ>gdk$imId1Dqc8@ZJ0u zo+uu-rgGvmzE*1FoBQ5Jke)~YkNoj+j@ixiy}+)(!S@knK2Th4=~K~-nEh8+n4NdF zYNyDfd3NuwAAB)^DB|LFH$riT{Yl_nBY<3up80uMY+?6PgZI(?r(Io_{2H+I6|4*! zQ^eEYLdR*hSsp{z*fmlajXRif!o`7rd1D%QH2qyPJZ{l~1+^mPS}b``sLVvsLGHUa z@&TnE&dIp32489CV@VfUd|X>T@Xf_8`qA~X!?G9|LC0H_zBFO0+fd(>cjQ!QN@{E~ z6V@Y2%I<6FaHh%tsjx@AQO|*)QrvzmEm2{AkUk?eT&cNk@I>d_uV23!$1)!&2G1-gDBAIP#?-03*9D{?JF7M7 zmlI6}RBnnkrPow+_j+f#*!N7NUZ5P}!dd1O-5BU( z5KgZK5Fhw1IIFlg$*IMZDRKI*d#D99j;w$3#XX{ctb!2V0Iz*fARaw6H0~h~HS&}- zZBm&XQ0?m#C(34w31(SEfX_$S>IB$1(+p~q;O=sEz$Io0Q9j6^boK@#SH0LQW-Q0k z?u4(-==+on@=lonMXpI0F$lhhof!6LBdub6OIMEi)I57)Ds{r9z`+;J)2|9;325Q5 zzJHEEwrJb|U_mlT2euF4;ijtUY^bLIf{QjzW z>{s9b(Px-B>^}!jix0Gho$p6^AYy~GD2DVM+i?_Dr|y^Mf-`E&O313{MyYa-RDH>? zDsO1F84DLI*eXp3vRsU7&+5P4H}~XBiwrM_Vvdx~#(4=BFclF41-dTdRkJjIL{@Yg z=dl$Z8GUJ$hv#pGVEqjgBx6&o!qa$LNLeyOA`{Vg7(zjjf56`D0gWzzAA zuQdSoII50sQZi1AwgiN&G0EV)tI6v9jj95?ctkrB1k^u{K>z% zaqE_stW%4&R{wT1xSOFR+n-ytCM4E^%Oz+azoywJZgI1-tm4z+% z7>E%#ov7F$8H$ffW~*zIOlsnIuP6%x@44%B&-ZgOCwq z+9Q|0&+6Gq9s@i`QzS_MIkMTz>QO6uRjEEf)n&yTMl2$(;E4Awo&Op3s))I6anj}e zj^cI*FbTAh$+Zzr7{&Zp_g*sMB%ZtE&?&Cwno)-D&1JwTvhwE}+0ZfJU`Mg6qn zFlSZPmdjs1Lt5Zlk;qXMSOJlym}erwG%wh?Gq#&o+*OAkE0C<8LcT3ic%Uz~IA92t z2G^po&W|;yjwi=HYUxpWRGXwl;_E|cajCdxV>NwMXK9I;^V38$D+IHs9=)@PxesOM zB78VQ*z9XDrNS)T<7)2s+n`Uj?laAP{bua%Fl0K6Wka9@`E11KO9Sj0sf}B^HU#b0 zMMlt-p&g`C3SFIHK_O;h6+s~T za6V}bnBj23*xuD?*p}ZnsFZhrDJoIFF63Dz`G>2{dq&U6ad+Qi1w^Oe`q?3=YrN0* zwvl)>Cth1RV*#`Dv^V0hkcT(3T+%aE8o!Qev&~UxbzY_D+ld-}96ecJlpQ{Hyy`Xe z?a~H-TbN-v*N{HdfOMq=L;mTo1B_QD4QMNRMWLN#I+6=?Zt5HN@Voqghg5y6l!*E_ zK7~mlWBUa1#I)sND`yGaXtrW3NAJafpG!?s9 zYhiT$H_ zUZCVV_~K?)7moKXoiTlS8w!Ee&}U=VAi_|XN5$?`uFf7ZMnfQCQEp-E(VdSK>lAb*A90Q+OtF1MTrBH2(NZ;6QtFI@S?pI!oIv^tQh?}LHc^V+pYY)GiskFEYvH6? zf+piAYsvH5gU!kqsDWm!3pU~cb*4(6KL?1P!JZob_zgyjeLn+_w4^#x22;KFQsykC>ddw->p@n9n{3iFyCGJ_n%plJj3Bcl9e*G zPYl2lXB+Q{1?BCCc#o*Y3{yVnB+Ia&7&baSY#^*s)Nr!>?%mz+TarOzNUosat!ShD zhWx!Jm`1h^Xlo2%Rm_*TV8(WngjrkNv1X|YjIyjohd``{6y2<#WWFd2@=`K&|HEP9 zZ&wKZ_`kEv7@hfI(*KRNQ|nv)U$Tzthsob_1KqT=#KGHG1p#i<8BVLH!}%v;B~gGL zJFWg51!&6dsp_~(K6H!AHtyI2rEW0D5c=gQ(YZ#tSE{?qqGW{kf6P1JZeh~HD=uQ& zsBuOIiUxej-yy&2>CBeDGV-0a>dDiAoN03R-!_o_zw7P6uF#i@;jn_IR>69BI!ONGgL%#uzHvSun@yk8l-VOk$BsE3F2rlKPI z_pQa>(ypV|=l?Wzq0AeV{m&}C@Av98QizgNN;di-0U~9|6nS5e_V|+{lmR@3LU$Rr)xV4aGHzm9@?^&Y^J?;~6jGIrz1U$A{0 zmhzx#<0;lX{BN#V8}(##7J^$fR0hbCkLo!Mhpro|=wAN-&Kw7k=iNaR0%CA~DJFa# zb7qV)`ueT$@I=i4LB$lL8|N)58TQplbv%Y z1!a{w8C=#~|0~}rx6>3C|6-+U(55NOmSj+B{3k8ZXoyW#|41)(hhc1{RXCqX-tW#` znzMphTIS@T_vk2;I(D4f`ncJpZ>O}H8>B2im|0KiKp8j(^co7afV4~uwf^PiR>dd) zsVjhC@xCKF@!wOr-bccCVZ&HH81}FVE?=mVNOaeOa-?yWsO}mkn)Fwx;*{<(9+jmSop@m+Z3p zU@s<-f=}r0*}dYwl?2iHJioeVqHqo9JPadFe{fFFvqpBph6|~*&Rr%%%ucV~=?;izgGh zdpW5b(na22_b0OR+qFOzBm*QAE>cw+C%g~Vz@$z z=oeP-r%F$!mgFGAHHGi07ZpCs@(cP^|Ddy()$v-Rt&Ti=F|~Q~b*Z(V&Yk@t{^V4B zr!%4BpEfZ~?=;na?tmYY2gPqT)Y8^``eN;qy?5@Zv>Z3Cwv}@IttA!reM{H;@zni~ zqKX3TV6Rdi$AlA(yO!jP$URXw`5aE9zv+}%Jh+3;5$Si_s4rs@oROx`e@GmS*00ki zG^q_9?1}WjWc@6S6Y-;!n*~5GeT&r$UiO9&F8V%CU|reW27bqdfB$qy#$xLKjdKfW zr_C84^=w!=oziX>PpYDDkYxk4pjN2ls75AYEX0r$#AplFG2Ma5?A5d9^MjSZ4_8@K zcmIA?cy9js;W#L~4zo7hG53g4=3Q(mJ5i2KFfn;)SuYoBgmbx0`Ey$|YxZe$@cy5b zjlwsye!I6=Yu3pj3PobY4>QGH9iP@F`|eI*twIPQfsET=rCU zWsXu4ys;w{Q!dwwdRVDVn+epIF4nbHanbB)br^e>b$d))ZRg4lIKH;(w>`LD!v+nW z|74}${8g)-aoa<;46Bkrtty-T6pFc->beaYSX&)@(kS_iiXc0I<@N9F&;o+ z7*GZEA8Nt2z}}a1FtHa$KgwH(r-!tkp&tiQ0Ep`XTwf9N~fQfmYkbg3})# z>vU_~hqUu&Nx7ArfuZ0J+ zFW(s8QY%eso%a;yU+Z>gZhn^Avz4d1r(){D78lGOF4CFMAx&{eU!zk|^JMR)cQi~s z92}v1W7L2r-_H(tx2dF0#@fGHwlWx}n_tv8Vg8QR<6B)lU~}UAAKl$gO`I{qA@iW4 zcdG5-eVgwOIg!<|yZieCLn=SbQ1@0u&aO26d1Y7J+#D@c{}FGs?l-PeZ}8PUi`{yi zZM*b@{m!Mfck8;m{i5z+^yO;qkS+f0Q}U)RZJ*rMWtm@?!}K+cREGOx4a>LvddorU z;TNlZ(Iq2HZ~RqXRVQX$UgE%M(=>BUi|?w}x_P4?##3~^>I@~YyIX-s<-kFEOkKiU zM)d5tn^(2miX@gMTgMXEumc9~vLVuNuXsjSS&irTEyp*50AN)wk9(81`q-kQhYz0) z`%_qOG0P2TRd3r?Xu|cw#kMS)wjc(B1{YGKL|xFYvz19fsh^+3$49=I(|x;AGY{@Y zF~$`cKaVneX|p^m+OdEJxt-qTGTktP21T}cQ!M@VJZhwQGx5ONFFCc9V|BvSn%Mu< zrmP$M>92 z>&m`)XWU{hMDEpo`DLMUhgWwlME<%Ct1~L+)V>O|?|TriFsEW4w+LBmsPmh&G&cqd zY@)FvDJv_J!BSRLK`TP?EX#I?de<|q@M&(wd9EBo(QUt8Fz^?Xq114q(F~|KV4p|x z`=4^hBkm*#n3x!rrE34NdED2+>Is zVnmMn%hR{V`tf>$P7OR65o_#uW!#7@e{_A^<P82U)3pa-L(F9JFaWoc9r=M;BZ*-v>-`X$o&Y04@(nr2U5-%JehY^h3{U-w}Gn%1TUTCQY6C(Zkf@ z?vp2bsN<@$BZNvu$FT(WsP&&;9>A&y7M>~mIcf}_e1b$pC?O0q7?~L2;}bS`?r#$g zQU8Gh{oz)5R7`hG-n{wuyz392K0Uxw41t-qDG#N0m&K1Lf#RTYHj#{b_3fL(`%1H? zScUdIgxzXUMs<~PKr8Tl?6W6zc<1BpE>bEaLLcW{#{%zxc52BXygkQXdEj`Yu2sHB0HlbD)31gfb_`Z(yxr* zO%N@)6hnBrB~i!_F6}EQ;2#5T9l@Y)_jcAn0n)wCT$U zJTf_Z^^a&?CXakNv(DqfmxW#i@2z*Kyk0zO>ea+yF+(4OCuK4H{r**K_xBAZHu!pS{ZLOPgbdke8rOSFhdYwJ`_RHBrWA^sUY`nA}Z+4Tm z{bM>>U(}jEUpf2wxPV?~2b~)>;??ONM=f&(6s0@unq#c4n>RO3sp`e786Q4>M%Fbi z51T*W%$GUME6U5;Jzc#DfEtM!O?bvn0nd&A5tZF0wq_rX4IIg|Udc(Pz9*)A>$MlCj~}Rus03 zIaqm8T7>}FVqd=LO>Uc-*yoYaRp(xo&NJsS*TMZN9$6Gj7?inj%Tm1(gP!UG4i66f zP+QsT@t7}H&voxJxm!_qLh=!xFH44Kn%%YPyJ(k+UfJ_QJ3k&t*bshX+Tb>3-x|0o zjT+_s!^ppVzmfr#CEA5Ws)~1;ypHX%@Ejbvplb*3gv~3jobIYpzQeNM_mavT=Qi!F z{IXR0!jrDetS{PcpBUQvTc-2W7jLJ3ytL5W5QU%%It?Zeo zKgog;qbTq&>{w;L3V)O5pMMCrj-SVees5%?5})VSmIV_m>!yxY&c@5`B&wPsAXMQh z*nwBFca>oTvEU448CC-;y0+1v%x+0_x}2ppgtd*y+`{UY+2mpY5^U_`nP;P0SS( z7(hSPn4>`N+clZ4t9C z!Za6RxmN0y#{2f{k$N0z;Jbq^cU;;?1${4c_-JH$vL+Nkha3aCfZDd~F~F?$3GE&e zd48NN+2t{0$o1mQ4dpEu)O}mnw3UI~WS6G$p3yx23Xg%aYTl;J2Gd;9(+S$bj9DFa zC}iDAxQaIR`t@AO*tGSBj%VlQCZjXNs8=?IVZs{Vsq#DjLNF7e-7&faAHIq? zKVHJfj258T-Tdp*IVc;gEx$KOA`~HqR)nX)4-z9Q#k+oz9xPCKH#f*Niq zEivqkCy)WttrxrZJb~br+FI5F0>RRreu8y^r>)GmxshO>njZ1f>`7tPnBCF6^lo1_ zs2|I*Ty3;vo$7JEJ?sVUgpQ#yV1IS}Cquw823M3NeUaNY|41=qH<%Y$rs&lo*IKE= z=dMaw(?xZ2aZ6VvKX=wSB>~}Ld&^s11XkIb{j{Vr9AhL0@?p^e^{05ABAnU2JdRc79aSw!YhYK1@!YRk0BchbrJj;G)QJ zyU0*@{)vQ_N{oXuhP8O$J-MK`*&qQlt&>JnF~N$*fB^*QrI=g2X{OVnLHNSDe@C+@ zWdyvHKniI}A^Z0?#x8WqN$Z6R(Wq!6d%1qG!{t7%riO-NkW+9-<QxY=iIe?d)bv zYw}w52ifI|aK(keFrJ{Oz!j|Y`(Ro9OWC$hySIF|CyyULH;Iz>2a8U*^)AJ=j^EnD zipqZcgUR}q+i!Hor3u~rMVs8*T(%zj+jwa=%g$2=AYp|D+mj#F!yK*Ar zA{88Vp`s+CW=AO1ob)~+hW}w_?{as;>rIm4LYU~XY2W~cEVrBH#XUy8^oH+o|rSZ`gKH#rQM0Sg`rB#`nJpL`=Acx))F)Whd>I~WyiMW zIvl8NNqx$}Ml>>D_);Pj^7|5m11(flx4g+k#B>@?P%bu)RDp2+RG+bBycE{0O=Q?( z@bBLy<{5ZNniCLFG;|+1(w79nfSJ?8)MI2*fOOtIhsPDY{fOa()A{ziUfBp2s`eFF zaxo!4`)5aa1Dl3IgI8JQK0bN#YF0NKr5!<)$NyELF{buV7W$6 zs!tdfz-@^x+HA4<#DSf8racS-v570f-&%#)7XYTABSs`cijWD{P0W28NN7pI|B~&{ zyQiPaB2_OH(af*Ezd;#WWA{r4^z-WimoMW%1vf88=MQb(Os}P}YS!5C5dyTld*{<| z>LII_7;|+KFY!}@oDpolPZ3nbXmMcdCYH{l}jW;lH4_o)# zByAY8?W7BJHMOk(+{?RmULBvgWszrThtIXe*O_tt7sbUuR0FV)Vz4~|3J0w+?1w9_ z{%yve!Q+CGa)bZK^O$7(>wZuWdhSoD!JWkaBPGZfoApZ1xDyT4RP~DWs^+^X<~3N; z@9eZAwn2Wz0ST6qTl9F4QR5(6gABI8BQkNz>sAbszCS*+=x+5wQ{%VkUCTQM#@j?k zEH!ChP_($|O(p01e*G_bkl`VUVUJBiuO!KLzpw$9(?coaGDF0&#nj$liZK+`YR^a~G_LKP?s}`?rrmkLmqr9k^ z9xk9}a5oMHPUMPsoPJlO~-aXyQC!NI>)NwLpM9t`MI(D=%*n zKxJt6^Z3l{#?)l^Ng01(hI#q-7CUzC%w$0gf6Cw1)~I095|cf9!t)=KaY92V7}^h6 zwV!;EoP2~;;kJv~t5csM6`U%ZM_8B@F_ywr5(qa3H~=Nd?{H^5q#N1XidbO}{Z_y?s;Ly7lWH zIdtd_qhG{H8A8RMzD{b6IaBAyI3my=ch)87?!x2{&(3o(W44n&w?9700S{Stl12VP!2RSax4lRx;7a- z(4kqFed_baj}M44-1-I7CcSYruuiaZ&Yk%E_`^q!lGuSuR+)(LG1UDze#Pu^RO0M3 zQ0v}(hx~YSCDVQNnuwPq!xl_kA3&CZU|g0=8dT!E(2x;+d7XGY%5n8C<^XWW+R}k2 zQpCDQQ&SUJ`8~SzJ^T0n0jDQ|ZGOYmD_3fC!Lw&CWPzyRUW_rsD2<6~#<3IA*peLw z#za|i1`+{hi>ovc3T&G1F*URN<;8aVVt|HyF{+OHIVN=kvz8KyYHL|qXsU`zF>r>kAWxo?Ni_+XtgWN$r2H5gVH$}N!yTB$ksejia0Xstl@+{rP!Dk*< z5*zmeQ7K)Kw*Hb0^N1O1Hf&ge`$^lz_1MB>9=h!C&xkUxKQDR&Klgj$3p;Qieb}%y znSV$OXN3lc8v`8uhg4_$H5S8DCJm2p5)dampg+Vwv-W%@W}v=(@pN}LoO^q-H@jmz zp~V*TZ!0H~%F^h9$pONVM}<-$Jh-5dR1>{4G`fr!aU8L9(ZOd=*_3)N`E=@MYUm~G zy3c-j8R6vn49b78DO2+sbz;hM^-fmjxtH?Q(mzu z<>%*;-1oCXPB0~D^Clr7VOh_2PghlG|9*7y{cO*dIM3P4IhQO}IOM@)>(lAQ@&L3F zuaa&bjI?3GkJU#$jN~0*mpMerYs$1h@rak5c|;T%`5koU+~j4f-Dn^d&j7aS0|y?F zKZOCmxv_9Mg9EHv{y=X*X_}dvyY#!c{2WBSqRV;oXeVf|^`2Kg&Q_t zoj1AE9u3K2JV0RHBrru+{pbhsO`mm!R{eU%CbmQ5pcCG`k#%qi9zA<@@cKK4xMixp zF7=*!;}9LUEOD2xW#RCt`=zB-7VtmfH^Lb4489J2O_*jK;lJ~mQaNUK@lwB7>&6Fe zEM-WXBH;)oMHa!qc{%CVSUL*0vZ_OdLZZ1~P)8rZF1iJJro)tCxGkJc!K0#`%t?Zxk;?(NrpKAx z<^>Yc5`ws7DlAL@9$4*oVPir1WK@!}4l^aigYenjBZ0$Dboc-~zkZ~kbOd-l z88Z?`YSpRN_UZjU0cgn9(DQXqPM^r=u4pa8)>3al29PSWscWbO{ZDWBFAEq-+6HQ> zD_fj0UBe&{ex-u_fU?S_Hzr#lqSMi#tJ4JWxxb~aOTdXYh9YARtf7w1+$nwo7_R3? z%4!L0xkZ%1R&RLoCtRi1jET~};&>vCPAd|?X1`V}67|POUu;)C1Y^xarfIf>p`nZ-Cp)&2T#)BCcdHq$^F45<+GVk&V$LTPuWw z*CU6J%Fr`!L$?h{{*1>P7O_$>@nFZh+z2=xA&5!E!&m@o+h@*Kcuf{KV66ZP(TYlZ zzEQPAc#OD33b6X!2_S;|=Y{hIEs;GUlyBU+T9*$jll5=3@|1bUz-3M{&A{@3izA#r z)S!pZnX272gj}h6{)_dssDiJj-5G!StF{rbIxv?DXDCX}@F>Rl0#ZMF+@1gY=Tjfu zIl7hKll6NASZpb`NMD6j!ML zcf$molKp#k@9sbR98#wh5C~T9e7U$&ny3sndQ-E*mGd3gWN-LL~d@a7XL#gkGjZI;tfjYvR_%RI-yVjXcv<49t zG!BB(8*!b$Fd$i^)N5=L6#6PX;+nxiZTXwMdxzQ}x#4lUSX=wyI!rb@rAu=gTDQR`FlK%Tm2wH)cf@L>dD}2O(UsO)@@YqJ@DiHZ5v;oJK@2L z@6P?4`HjO+V$qzX{Opb%NhWK+tRI35P1e_6h!jOQ5?SymxFUKx830O15M$xxcIk6- ziT7(ZZmfUj{(V__%Z`=+U3`zd`!A@?Igz51fPBQwZ46xzC}4o6#;sYjHO$pxz2MY%d(wY9@2kU%!@zYpQ&Atv)IBhEcgEDkdb2-n9EhlA>SG&kCF*{P z$6a^lzXw4_g(Af$WrNX*s`4n7IJM>3vAgUT;0DsJabMX+Hu1eb2GCm zq(DBCU{46g_N(e1BPrBYYS~iCRlbrxQA(siyt-LtW{;81(4jt}?uO)%1>Be3k#uJn z8Htk^CuRv|G_$*egK7C7#?ZpbfU8KsZYTSp>2#=Ku9AnVegr#H&3Ln7m9(On>X?k} z97^fOh^p3L(oGqJl&JLJGu$)O_#h;nWZE|ig6UdoFS;K7jP+eR5ML}@y*lAqf}Ue) z6q^_24ezeMZI4FqnutsvvA15mdd6VegbP<<<1{G!PX5&MA(OvM-ZKBcyUBn5O$glX z^Q%&K?XKuMqJ!!;%PlqMh5z}jD@f`;fQnIVlK$UEX}gk#3+?njPkLPU&b9sxb!lMG zaFWaa6MyBX>H0e3+ErOM8{9g5=hjAKQv5rf(F9|T*%NTA8|Tle_~R2n_$3c>5R$`f z=HR%poRcHXBmFh=Hb>!aj`BEb%fe3z9F{UX;4RaZ4+QmBFx(^gQR-fjY$Vo{Q%?-N z+9lZHpYL8U;?rp5>tWa)kN{4$OQJ#FKu4qa4T;#=K+W5HZ7@ku~= z!ra4O0i3(>o34=`88DGsL%H`yXGD6e2kDPb2GALE9z&60pFbb>PdwV)sH<*XX1KJ% zTy(`IaQ8AiF@!WwaHtj`8Ew_3P3`A+aswyUxkHPK#zISMjsPxu28h=Q34!i23xUKP zfN!BI5Oj4>(4tXz{n}!AR_eB46Rzypb!7kCR%3pTly7OLd#E0$L9eh&?lMEJU}9pq z6@4sV3)t>|?hj!D>yIK+05L?5RPHM0$Y9Tt$K%zu|B>gH&V^lGP4?0t4v{G8Jcm*M z(xr`Sk3vnhb>C?70%swkNscv1TI0y)uFA|m+Vg!ejqoG%!rZWbbtE0jlml~_pj(W? z6DYTM*3s~vJutU-&z^VaRRLZUkGd-NL4EhpPs!tu{CcTt=sgU$+bC7fBN|~KKj&Wr zwzMyM|5bBx^{?P@at1Q1jvU@Mra{B!<0I_hYTB{ljml4=qtJw_R!wJGl_EsuE~t`Z z7?A3z7K7dG&?hb?YJklpvG3<+!P_}kmAgQQ7-C{khglI|rL2!1$02;hO0@obm~t5c z2kTIE{Gufl?fL?-uSM^TlZuo-WE-G~&{5-e*FVWy^IPH8ZB3>pILLu;5wtU)2iAEO zq@4=c-#sbz?c2BBBb~PdI-qt~{YLX0@CFsO$r5wsFo5jj=5RsE3i^Ijr#2K$TOJqy z-`c=ggp6|iY16ErPQlH$39& zn?OOi_~y3wj|KRpSq-B2oggp5<9Vz z(*wjG<Z-g$sr8K7-(#0$1kblN)k;wLFVwc5PqWU<4g7B%W1T$o39OgL z(XrNiih8+LK*r#OqfrDyY}JxMK{Vz^HfA_^p%jF+isvICFz<;;6H(u>t1$xOmlh_6;t`47Grkn6S4PWZnKuX zs2je#y32}sEP331VzB+N5lxwK{t3q}U0*#=!LI6mT_(M~Y~p0Mp$7VxIDyOvtp#8p zxMWyZn2BoZ)(^Qc{NR|EFFnD*mst_Iesf5%Bhq3Zl1}Ua#6+mjhcwW`0O% zjL|{_@S^JAY0_&7tBOPdM4M8{Sq!!O#nV9{%e|s2e|A7pkeKh*ddxWc@;rZ6WMR1L zf9|)f1XM2>bf+S@zuGvD&boQ`CJ}j=2PH1Ezq+1FbaZs0799S~pOkU`D-zQh!SECs z=U>Mi2JnBO8)}JK0o>-4`@Q-74|#g-oPKT*1@vZ0BkI3e{g*!L2-Ec#RIR^u)X!UV zOFae-ELpYMgKO!GDao!^Vb;pL;jW7oWwv;d~5!48`pZx-x8A0!j{#cAZvyyEwAUND~*vVUwHy~Oh6Z=2(Rlr zdiZdB#dUf}f?wKC?8joLSBEji#N(Gj9Q~}v=N}IK!!m~WI{VXE!^AEM4d!KG_-XM5 ze+4~6^=Vgn2ed-@LQ2m3`t_2EV65}|w}k`{Fi!yy?mlP9CK5^Z6i`mO?3T@&kEW3( zL!*-%b=-3?Ob)1TJGKYV%`gUC%usC|COcw8?9s5{-HZ%herFK_g2#cM(LxPN-O|^@ z?CsCCMYj72RT6-%D0LQ)Ka7GcPI5+0keRGwz}>~K1_a^gz@<%T&R)Hqcl_p2FRzJ= z#|Xt&W$}+@CC3Z~Cg_D7102yS&sTPh0sAEJf zLV?2Gsn$sDdX}^o{2EGD<|R1#9Fq{7c`t3I-bWZ%U+s}fr^P)DFXM4$2V6=_KW=Xm&rSrEFLj!CJtdq(CB+_G&XoB_IPj; zMGyi` z8}i9NGPy^U^`WqE29EIuZs$ty(kcIVWNzf9!w|q))Xn|}k5cuuL}Gy&n5_~rH>`Oa znGE{$rTi`Q7sW)Qq-f{r5x19)F>2l7RK}wp+Gc{};+FhKKv>*c6Wd=TMCE`1@jG`B zV6Pw(RM<3m$$`g0^FF05JMK4Xl?==zT)8M znDzN{94GeR$&)8}`1ntq<_^hN2g=z7ca}9V0U7jhEEtCK*POei>z9fp&#qndi6}9M zxq!D&$y60b{!eAJvjc4Sp(lm2z)l1j2+&rLAp{oEmG}9aN8WR#T$RmmF)_P9xCEZR zck3`BuwI1@<9PbYk^S6gyi2kpXUyKm_1;}R*!#$G~bqT z62>AlhSCX)M(lt2+v&v-tF=9@o;u|MyzDKEIT9Q zLTKjE-lQ-)TMX?C^xqDi)Gvo~y@_`;%ZwI!7nzDW1U!JEEZjVXxG z!lD+@;;I{~Hn_!_SwX0%}vpC`57# zJ7ZvEbf(G3=#;gg>>24zexeF<^YxvoJ+us_JiA7v{C4-awP$87D6cU+r2+U=7m}vD&=Z; z_*p(7lM$EV9XR=A6b|jQSGW(}f15@wAm_AK_OBK@K~nK7+(wot21R&R{`A+MG!Wf& zbWRY6*J3Pr;>3wf`GM!qK#$SsJ=J}G>CJ$Um-C(Atnrv1NFOAqNc{03!SUK=CB?Dcw&oY9viW-3o2PvMj_rMAKV@8j$=?}+dnON_Am-k3} zJVZMOg>_EeK!qjDtE>g&L^pg!#Tjnn7T?Rkq!gqS9l(9sScnt1pH@Av$ichaFt_TY z_IQ#>sP_2PE8qO!e}8P-MlpI3-BL{c2k!+NOf$0_J4~D6NUeMzcJ(tG2(Y_UbYb0= zQHBcjO9kP^R0iskG6rpMRsTTdmf*(1gz0luU`$fznViVC(s1+DEA;TKSDRI`~gM*&C{Mr@&eZRUIYTmVE&<=5dd2_yqN((XVkAI^&klN}J`*CRR175aD`TT<{KWom-m7Hji0MUgKfC`sAjR&!1 zO|3W!0wVYD*)-;O2Om$*M<|To@lH{hLUvYXAM>M9W)~`XDHhWbhA9Bn4*A zUP@W{;>{e_d1SrO?&?J zdlH(c8v*JpHhtK!c4ZiqvFd;aQ7r;@pKdpHXJepok6}}pUU-l(%VXy&-KmRrSDZXP zc-rm35X!!P6UrXXE6sM(st%|pfP zun0a!Qe|95rYKOg38RZ=g6KCGbQ2>EEWSX6Q#>8(h^9}3HLNE`<|aJ}6SK7#6H=q= zBqYL?nMbS*Nc1&Cwu?xD;~0W0+&_fuCBV(Hj2%ro4n5@od@ME^&?5XjBv?B-B#-w# zdh`n18qsGveXG>n$Pm!gKsgkZmgXW4xn4Oi16u(EcOvSb7Q|oG+DH%D2KGvp|0GtD zY66&{=N1}XTQUukf;%LbuwrI<)Tf`ABvx6U){6oaB6Xf->Cd0A%O_|>{gXfOh-H3H zvl`G11;{;iGEl_6+frfExZZu6;PEy0ea#e+D%Ed3iv9HSQnNpNkaA6cpntTub%~3` z%P#W@)c*L_M7RCR{|V83Lfjc#7+sUC;-C*O#*3gLagtsKJPhp zmPmy92UQ=Lp{K^vBnL8*{j9_4$OT6K31d>EP8cL+%-G!uyvK)q23JTu|MFz?w=qV| zTb$ZA$1QAX&GqaI>nL0rCoGF4Av|bsU1DP$7wOSy8s;xq@`&#>6(d^NghCa>2@@x1 z0jX(CFcp!;7Jhaa^TXmY#zr3ZOhg-4UcCSEWhxGEp@V5&1^`~tRSYm}yQhaw&mWKa)C7*}{{bRdd1l|Z5L+aa4-|vCKFN6?BVMi8Cs|>{Mvrl2h0lT|lojO1^)uJ-P z2}6~SMs%>Jn$r(^3M`YC{@EtN_`jsY{emDBnniq$ZgD?MZ2|W)E!@{P!^SJ^#+vCb zp7O&ZCJpJk>vZxb323;iN7^`Qd~VUnIbgu`d7eF(*e>c1dLu9(UU0j1O$=N+E4li| zv=o{Sp2<2OqdupmGx&z?T!DYWl;b0@c8e|naF@=Mmp_RX4=h$(OXIH;1Lp$Sos9CK7~60iu(X7z;z|nP65vn5|)Bm z3AIg30eOj|;H%YEYvCWjIfF^5r^wvgKU)xQ;ewDwMErMxx*}$xlfV|%-Wo|u;!iNw zDNSD|QeoQHb!@BlUr=zJ24UOua}dl&PMnAbN5d4JA1#6e;Xb$g-bBYBE`6Ml030c@ zf9j0UX|QZH2!OuI(gh3d@(JiE@1l>RGX8J%aVTCM(!Wwo z_5Vj7_n$~Ie}nDNCuh%}Zy??kr3FuUzhUE6603CJ@)tpo37Clp>>K9;b?RS0%E;t? zf3~bfOEwUDXpC%3FPEO>QLO}yp%)jyyXg05`czKd0#M-d?Id&a1qFJ~e%Tiwg+H4> zI2KD*u9MJ3mmMp1Y&zA;U^z1@nxm|?8MgjWR=r_=Tdgtn4h|Y8r=KHKU*;`dKq2x% zZ0FF1i2&#q-7ad1b4FZbV^oAGU0rqjV4HNmI>>%O1`ucTin0i%&%T@%ZgT+J65V-Gg( z>g6{@+`Jhi0;qY6Y$WQ|^WuWQa;!27|6#nTeoev@XL* zD;S}i{g##t`gb1BDUez!2%01i67Etu53BswuOPGnV_Uasb)JkA$gj{0aF-8FEHnfT zHUVi0`)DGRMm@b;(@d6l17y46c>v{IXXEk)mB7W`J;C~d*-L9&0=hF7=+s`wa7!V)A2ud&NCx0jI<*b@D5@nF* z##D?pBRY2;b)eHoi#PACul7ZNlpG$#z2``yUr~3(?slB`tmULduE#={IyHo39Gf zd17bPX9oTI19Ah}wswMBf$4hj9*;njF+@TYpOk+i;@NSp9_9=%3#G&scV9G_%i@mU zSbG97B%c!uVstM%)6y<*bqrXbSw!ctdxLXGu)uTK~y=RAl_GBT;-3L8^+6RDXFZns!rP*;W|$Urv~~S zvwaqG)yYKZ8|>ZwL;M|Pr!#SX+q>ix3a!z-9#Val13d|KR&U$s`0UBC$q1ZfK$mYz z_j&l_$)~7rHciZZ7o0tr+62^#C&Jz&<5dYfF?nodOKJD@4`)waHLa$MkU#!CYpRj` zfvc@cd-v!eb9N!MoW^U9sp?2`czL-f7xR9!{yDvGmAd^~fp|>MWz%o50R8iiIg&=GKNBg z42d$7%8)6Mu@n^vOQfVoB6CVaqE-qEDf3YG`OeyV?S1yz_rLc(_ngPQcR$YKJoZk* z@Av(EhWGn5y$Ae!{gzI{ll6I0QwbbWFT9HL16V)rY>{4V$4g#e!m}n4SS|BrxT?wH5YxLPCp#revVn`*7bOJ2CE*Q4o5rCp0uu;jdvEB)D}~nnwyN5s1b-KAosokU(cCh4-VtI}OkdoVRBAk1 z6$SiK7%&D-m32Tt^%^w_59E5=4Ar*?CpKg+A?t@M4h&RKt*wW{D+La%-?;G=P5`F4 z)S#et>|MyY0KZuuk@D*ezP`zro~X8_xU72ZIiCWI^)VES&FKQp0Y04Cl%Cb3Pmy3# zG(=Bx#;im~#Z^0J@qYDCXEaFR5fQ#vYb54ThN~$sQcg!JB2PN*e#*-C#g;PF17(q2 z)N$#pc(Rmfr&r=I>4t(74PJd}Oc|1dV(Hkv$rvS39jOfh>rz5!b}2QraB~bJx^@QY zjfbO)e7(WnKa@sSpaQjlpS}2U{)-o1mbz?`O)%NU@4%LJAsgckkTQV%QiAA(ZKZu> zVaNW1b8d(pL-h*KjY@O`HH!q#sR20Eg*csoch_PGlMXA-S<-G{YHeWo=eAlnzDzPy9(2&}Kc5^*c z&6eG}M_^7u8a5L!9{seF*mv=Rzv%wT0GgOOxwk<%J$qFdj}9+d8@?Kuk|bk zX}GuQfYPgNWcuOvZ+R7mQPC0_bi(ccr@}g&`8L{v^yk+v*)Zv!KoEobKgaM-hh)Gc zI%&tu%{Pu?ke(@3hGZ9cR+JC;_+WaAz6%EFjW_GYcnz#@qG_Rd=w(nb6r>s7>!Fo! za%%+k(QQsjY=PbZcDI>V8q$4Vl6k@T>S8v&-iHQ9;^0#iRHtpy!Z5-u_Zbf zwBYo*p=TEsY%ynjv+84CBT7?)XQiwc(el87Pjp+bw-#EL~YBz~Ru#s?yR z^H7puB$}gn^9|wQ?O=k_NPeK~TMOCR%>VW>?cJ}Gtm9K1eX%-Rgoi8%*R=0TszO#-;OSvKfhO>rCSXaov%F27159q0D zo5|Se0Q#?v8s)zk`k>C}o)aRYg*)3V}(cbRMU{zU?%(8v4<9k4ff&A^j$jlfQZ{a8fyn|D^^9GJ3M^vX}!-7*e!CQSj;ocLZft%04I&CnVH zq#@262z?CUerPKsw2oON9XcF&w0{rK0iP`$A+Y8;q)aPplOF#OE++S=0!4*r<0SHz zg_MsTCp<3h!R)4qV1e{YGcv+PeCNSn-~;BC8n?U)@(h00(`y|LoPYI^kFNz(4e;iE zLQ=SqfQ_bctlMrI4poW=K%7A=%hc@bAl_WUjdNPDUDXg3^zE)a=3R zgc~<+imn2nIStnq?@Pf~B$^IIQgsT2LLj_2$1a;ES(-t?m}7|~O{A@YCiz)xwnIU9 zkb`!YE_M4SWL%q2lyf_^*3qO7c5Ly2r0+omb{kZ7UPx*<=a_41pi=ESn|EKg`+Dzy zp53!W1BZp2=y|TO-hU86*>(KQ)G1v{a^}RGbiCg9xI>~pH!vME`o_kN4k=s}l12jC z5*8P?3+(eyR#&n~%@DPN!14pi)x^#3S$#w1)B^rfty%tseK5?N6lIg~Gj~TbpK8>SggvPui*0 zqHoF0n)gA1M{ZJ%0`#00G^__`JL`=)DDp-rQU>SihjRVEZV$}^fu?eLWR` zrFJnO3mHKdOdD)!G;Z;s<_QmPR_n53`P{)8rOFlyR;vzG`o7P%&F0m$FSlIkucIA2 z+q_$0FNc)cJyuj zhwJ6%3=jET%7M7D9?B2YpI;N&!fau7>ULTZ!AJ6?Vfn37|SKXNz9Mh{FYpnK7sT)Vt|d@Svm zUiZo{=`J<_5<&YytQ6o?NpO_e!(uD_QRwpi)D7FhhIh_~PMX3BZc2cpJQ~iZbD6vM z?Ad?{oHdcT>R5e~$IV`oTasX1f0*TRd(YhVU5#ncmR${ObTBAut;2c)Q1%gO@v2=G z7v$6k_b6Q+w6s}T-(%+*FPr?U_Q(k)Q!QKL^_%+U@toD}>>ro@wU!FegF2LL?_~VJ zExSOfa3{SvqEn~-M#*R$b^NCW=9bvwmoA;f+Vp@h$m!WSTn3j?mUxMhk|&N|H#F4% z{9c4Ju!+Q4qGyu%IMdoq-FVj9o|fzL$mkf!St9P zf185|Xy26@lxE@|WexM%wza$7lFQgMH{NYzwW2=p{dT|maO{oTM9q5mz?n%O@Or%$ z&*5*Pkzy^o7wmcEGpD5yP1^4YDvj8iOa{cxMb}VXy5G=T*CuS4;o7;d$TGqd`RmYT zGuEzI^Cn_Ht;R{$f}w2b>$QF6-)J@IzIwc;?%KI90lOJ`13z-sC~{?wWyzjjmhZ~l z^`k6iVI9T^>?44f%oh(&4Fq~>2Oo7#T=ZkZA$5}-eMWX7+H+pQ7L(ddPtR*$rgEvz zf$hT_BB#3yun5p>VBR#PX{TW5)NbQl~{x&t*CfM9@oAEg7M%#W+8K-C8ydInT`SX$cHsA;2?fRwl z%NS9lJN{v6mqSl;>h?c%?eMN?hZb#i-Kb~RI$*%ZUy_bR^)gsI;L7y5v-*Fro0O;< zWVhM1wwJijcz+nkf2sms?mXoBA0{LPZo!mv6}bHR>wZz&%gvF_{1s`x zAQ1*E5S9ZULJ<=4J}+A@U9eyZjVKp^w620KCwWsGBRV8~{>aBqaFgqikAGC1iIf;H zxowv&o0;=eCI;+~!76_mqBHWNgBZe}{Ov@_`Z?#k<@2J5!`e>4 zDh^a+Cx0X`p=LB2X_m_iq1i79$~O4v-1nu(fkl()Y##WjmM%L|o&$APQ%&s%rvu?6 zGG1f9`^%26R~-C&z-)Wl(Zja|M@_9++BwK{Tz z{e6<%M8@19aF%(*;I!nZHo#4AON=En?Hf;d?(CJ<0eDXYt_Ywx%TNDyfP(MN^Mh(l zyL!oGe7&56L36rG-k)fF@=*cbJLS2GP#iW21ee*N;9TA^W6aq3qn~5Yy*tqn=0zb6n> zdEdXSG#?9BKB#YZ{kQ|BW9Mswka1lr?^ucTU9M{S^LtWrPyF(WWW?u4iiaqlmQ3{f z)n#CZUcF*7yY<;$^jGl$z1iU#ONRek!ytl5X4_K}Pi?P#D&Iw<5=dPa;^W||Fd-hr z^64x1T`Q|?mhC^v*@m1zoUS68P*Y2Ky@=^rcaV-E^c#U!Ux6gMm4SiAmfsHigct|t z2+pRJiR1{&<7l31k*Wa*JNwX}NuO;%Z2|8y5BLqFI@n4m$J4K6pZ z!0%f1;D_M;efy39E2H78=>BaMien@t=%-GRJkdY?K6>`)m>oqfiY^{NZxc(`1Wyr~&ihHVs?-=Jo<|tY!CnUGj6c{9Z z(TQ+uuLl~y147@E_tEnnHkk~{F=%X(MI71ULkoFfVH6dJu$t!v&`g4E1)xWweVaj# z3(9Y)r}nSqQ@|O=mKu=ITFFB2nbUNW?#|p4j+$C6TSkMaADO43tQ-LrGUpMW=vWqK&Y7T~psx_4v9I|+ zvry6T=;?*FFI>FX8cx*q3#eVD@bx>ky*2(c*yii^z}9bm9#7U)AT#0k>sVOa?tXp# zV^5r)AP)rGXMQvU58X=^h8IfWf;VqQgGrVUXG8Bm$t)um(MsH83yWGChyrc8lbc9` z{zl+42oDadGH1c;0P4Hw?`toi z)UPk#9o>VOdeRvi-$^2b-m+ucuR8MPBW!<$U<_TfXZ{T5uSqd)09c<>TG+879|eJY zf(|H=Jkc1IrX9W$9Mz}hKI~4Ylak;-0Llbw)G;zL5))09>!N)~S5p}nsrZ?eH3(kw-=z;9)*triSQ(sbX8A?Fp45&*tvWOMZlPyFay~B#n2>F~3%4r5D zPx(oNPH}SbLve+X)i&|J7m6hso}I6LH2Z%b5W{!;JUtT(cD_?S= zX>~%W1ar#L($YxAHbG4&padVISUeRpYJ}b{Uk~CUo-~fs!M#4kCXRNGjBbx)(`kNPQ&qj>>5&qzgJunE(R$w}$Epv9DNGsAil$kO{Jj1QagW z7r6<2(8}DuFh>Mj_$&fYYgi__a|cDHppCPDtS3E6ILa8$AF7xlA>S}XPCoZ!&+nbH z44~t}=hB$L5Y?rN-*C0yJ!=lW%Art_zd?WJqy*ptNi+krD;?+kdyV{Qrvx;BgNqV1YyO_QwDiBlA_)(wgkz#!)5FuqdU|qi>~mVx%Tm zBlWTFu{ubPgt5_z>`#h482dM^NYXjieLjMKg-5xXo$wzp{Rw7ABfLDJ!f}0(d*n9x zzfgj5~~4do&=FpnEp~Jzi=5@ zS@17Khrj;63iIa_{?1*~tPn-EF&%7reNa#nb`Gt!48oe@2*1i(UM+ksjU5#_Bu|24 ze~HnYJGYYr%wB+sfHrtYK!lL>@y;>zd3kUm0xX0-^wty?_O~x9VwJnR5m)h3SqvZ45H8KooCJJjzD#Bi@K8 z!03u#o2wuHkh8S-j*=Gzk{sC!7M!qLk;XIOoFFI-`0U5#M{U`h*2)Z{@X|@SF!|#B z>oCm=4(WE1|=wX=y6N>i3WLp$+_p(LN(ozw@NiSb@S7P{DH(-DB_vrbq_S zO<~oE9u>2Al|+XC$(i`XhlGy3C--bVasmSW??J%qNBFTK@dL-_Wus7&6?fVhBS09? zG^2}s+pp^lJ9*Bk@6yY8nzOUN=a1@{3l`kjygYH({Fia@(`}+PjQ7RYUsM`5uJQZD zeVlEUN@#z%h1>IP7W#EBR~!!13*Xi2*Ivh>Yz~EtZ<+J8XY1#`p6vc;+wMJUpIo^= zd-BjFja^3fihVt{=vIEoT$SGr)Vcif#gZ9Cg_c%UmvZJO&3tpBUf+lP$D~|b-7m#q z*3CYTxK-}n!k&em50GrAtOwOglv<(|@F1_M->WraQV_FPm>|f22dv!V7mVXnd@@H)+@LdD$_qN8Are7-4Q`u}{fx zjM>)xfg_$VFeD+)In6Nt<>qJ3t(!fZnEAD0B^cJwYel*}sKEuwB=|>YAc@vsdLP3Z z7lLZbU>5Xwrwh=zY^?I2IfB<+R58jC!LPXUi_M?o@|tRzTKV+ zd81ri-*vuy^u?H#BUM%F>xVgiXU4(lY3?Fo30Dr;z3T5786%gm1ZJxWI$3Mo({4nVb%Ej)Br%yXuWW4kEDw~g4U z+zSr)=EJ*wny;(WnAkTUb3-#)J@pkYm7B~ju9g(I_xhJzyMrz7g-H6L$N812?`D<9 zy*d+@6*1zTd%&qrB{4Y{ELT>a7Uv##VQPbZI~V&%_CSn7Ve;0BI*0neTRt6Yzw7kE zjzJ4go!hlLz3IcXO&9pb98Q@ZNn=`>BTFje z-GZ;P3b`2OT;Xb@EA8SY@t7#L(sFXX?i_qM=61iN+<>-@QsP4A}5W%*&qX7nIlyGiiJ) zJbB9J>t*q0<_>=AwfXaf9IdlYZZBy2uIFoyJ>LtLpY0JkOKaT9Ser-3kKbFg;98no zav=(4lTSb;S=v^u``}JdU z$W#^|Evd34;KkTtoi2QN-dqYMC|w4%OoM^nh$IhiuQG`J`@3P~M9(R*pL?M$nsXqR zR(k2;I^atZZYbd%WV(Xs{yvB_6`7k@!wA@t;f{+Cpdmf^i+)eRT}7cGAq`lonP{MQ zV2_^E2#N-7XQ$V$$itzVPWakrKnx-99FbQ`9G#5yeAEXCm*m&OZJXU}c7Xf|qwghE zk+5xA@}0NK&G(ef5Y$;*hoZ!57k=(;?hRwLx^-vrsZ-R=S@abA;|jVDaxD*&*ub^e z0Dm4c>uB?y(6W>vGbYXc!K7QpAethKV1AzQqinKkBw&aDS+iJjGAqTxNsl)ftXPgt!_y zR3o-vRg#Mw$>VL@wUUYyi99-b^^$uQMuBEyrsE@{4(YvEy*%h35KeSv(AvM~0BnP0 z91B&YWYtlS%XyRDB6R6lrLPAbgb(JM)NJW$;7(;bFwFZC4xZn2M@a=7d`Ct3r=*dASW8ONw3ZspIvM)dW=-(Nn@^Q0Qsf+$=JVaB9V55( z9Wka&O=te6?;bWtZmDa*dwuy=J0>|4b8$~2rDXA&$GZW0U73Y*NV(TPOP=atlkHot z%>-hMWIjOm10)!hz2ha(7DaW&_`$IKUMxWGlcCo9=WWVLybfOgKY|NNwkHPHq)pI6yM-I-|h$=DXOZ|5q zBrajyvR&XyF`;|JRXt$Ln|>qq>KNH{Jhz(vwei=L-!Ha}*~5Q*5S8(5*}2Mkpz>28 zbs@4;u2?LCKR>2caYg-#1NZ&yS0et)cSMv9e*9MZC4(Aq8%fyD`CP z_oEB5Zlf@7p&Q>UU#WvE-=8o2k4RM0c40e0DMv&10Gk8=gUEL%PMl@t!}6;I2UdPh zBIp+0&yUU&a0VfY*d$590WA(^f6FK)&KDYmV#+p27)OdvPa3`dUnQTet^d=>E?|Ro zg{Owoo$ZK8gs_9Akh9ExA-mGJV|d`6f5BOv=L!gV+Br*tg6V;G_g{M4@(U+WB#1Cg z?9fkb$df{vC`LRg5mCnD56DcfTD7W3+quJu0=P>cu3UN2H}7F`itSmKO{0&rVE`EA zW%PFnkhY`>BiN?45kDvSU}qB>w=Qq|*8p#Eilq;p^V2lP!gC$3yXX<+Z8XfBxE8+` z$g&GdbUC2{nkLnnMk&Oh;x z_5K$rscZj}l+;Jh*SX8>8rf8+l&JNu^2y=j_55m&hnOyCVwb!yx?#F~{zWU*vpq+S zHA(Hfvgb|BAoI?~u6orLjq2>JStnZE>anHqkRirP>M7rPtLAyDz^1G5I#rJ+G1)5< zclGPBC~1~Y{~mcgX3ZR`ie@qLF^_~=!JqgNUz#_WN5?=P&X`cxy2UWjxMw8{%@5!g zi*;wD58mnn@*+pazW97yuNbk0si}=luU?J7wI}>CvktnqgVE8OKn<9LQeV-PN$(K% zwG9jmBtg{m)&0&OuWAI7&(4Z!gaMpMV`CIv`}b>Nk&{_>#V=kkMxg<*p?B`yU9Db) zsYb?V26mXWNwC<>ts5sXQsz@PJ_sx)@)B00 zyugOFs?mwI4V+|an@_&LYQBV$>KP&>ASQ(u^L1GIRSXRcbpp+JXkT_ec(U@7J+zeZ z`0-5uy@-Rb(bFP*kStQg;K75j(T^y^Jl}-+U>$ynl&e>3AZV#U-UBw}$#YX0Br4h$ zTeqosam6WLU*CwC6iPm;S1WNansxSE_#F)a;e{$ISFUujgQg?;OpzrQ5ufnHg%a&q@R}42ZGZ`~Row%lO3lER; z)~hf@6Pa;|PqCJF6>grPwzk2xEq9oOzdyTm>)jqH-~|dVL<|=%UsfhUhXC-c_brGq zNVoH}=%TtT^8xc&t15<@o38^VU|F;Zy8aFu%fJSEHf`FJb3R~t$D`lhokFWjFJF^S z!YNU(eWJG+c6Ho7!Zr&?&;gN`kf12I65ROsUu0Q zia;}OVEcT+}T{M&j6mQDrB_gfmL5SnhYN7 z`u2$-@MKFgoZIW$)0eZ8TOn;gI4B9g`(J!Sz+@z&R%F%$f1n2E1zsH`l*4O@%7@ak zeI8Hww3g3T_&sRJe9=ceNldU_MK~z>X$E6eLsuic>jKi86ajTVLVTO@F>v*6*|u#B zFD$aNCr7gs;b7-?z~lHE#?h!5TcEGH$fr1!J&%J)u`KgY2yPaVvoGR zng3+N{AJqOp#~raOq8(MQy2(@c^v{JslNYcT`Aa~IDrA3nv1$1c%NO?NVMry>6SA;Au`{fyOVH_) zXFp+L#?=|!uNE$}OIp>El)>Yt8e%bJ?_E2O@$la(D6n?Np3>`7UStmWK?*;`pL#-+k#1m zL`VJ=_KZ5Us#!QTv&MZrKA%{;Sy{nLH6`tsHmn=@>nPhNJHLNROsEXHL(G%{ZeK^E zXENTpO(a6{4v@jsmw~IlF6<|xuUZfGv3?oPXIzI~ay7*>let<@!7wgrRmk$7F82Q9 zqYmm4l)|URDI0M*j*s93#UFME^2VES@X#L2^p5%1ATTO^|L9@2^T*DR$KSek?b>Gc zMjQ##ko{(qaNP`{U&}iD@RZ{Q<_j=JaBFa|E>U4^I4;-WFy7T`&Q6^!UHAu?*&{dE zjPAROQCVW&Y+y3Ly?gP!>vrhji|-x5!s<)KScB=mA!BsH0_0}nU-+s(ZtQK$d8)00 zu6wi4NsID^9T?e-heP%fEc3R~^^7VDrZ(khdy3AtS!(|qYIdbb+ zz7B7vdo(~V@igmGr(vxbN?XP+?V9CXvDwsm?TByhj&-_P=(fbdpgvj&o6GRK4s15pI;4A+I8@M z;#(IV?XS>HRjZX85x@4Jb2mG=8o@JKBR3PoNF1yxa)m|HoO zZ!62`{F>2WtK~qZ*LZw>c>~zxVTfDq5P=eQ$aKe#7PFh!QH6=0f?16C$n zk;V>XQNXbxV@U~9;FMBP46?IpLF{E$n$E|GE1F=;8O8~H^k{tLLsjOHYdEpB$3$9> z(P=27gD|tKMn<4QP1Y1MDJL=XRt-f{pngu>gE27yR9FPd=9BdBCq-B_h17XaI{tuX zgDfo6*rk{=qHgK_e3Q$~@A&oHVimcgym$BR?lULUSdB-csscHGcz3EJxFMNv2kmEq z#RkMiDcs5rsZr~J12q+LZ^Nisk8-5q@%1hC?rPj2>KSUe@FcB)9=>>3nZ~NAkYUz% zXO$@ym}<8ci-Xs&5i3#N#$Fy-`L4sKuNiHff_R_~l9E9j*??zN`C##m)Igyob>+_+`?mr(v{-X|JiR+r-Iee_*w9b`H7E?x^N3X`>zW(D~k~y>E?=Xxwzt zK5;{oUU7?cR*F!DBW4R`+f0Rp}t^R-6<3MZD^|<>i^kT>BQ>2J+msc z*P_biRU`u|-DOn4+?>-jS+ftwpv03$p;(=8OqpzeS6XUzu{N^Q^8td_Bd$S~;Ay7m_@jVGmJ9oOvja=EUU zoTvLGzRs)cLnFQ!&i?YSu9F>@Eg#Za_@8IsDL7Zxa_dTi047qV71Xh6Nlf!!x>b0z zzu@kDl9gt8^DGDXZ%-1NFFe-*GX&p=w_0zFJLs+#PCN0_ zlkGnEK_HI(?xpiBo38>V7XL#&R7p~mP2O7ogp=vAiPXB}!VQ;s2IZDFoB{?@W9fT+6|wzlVmvyO(KJP&^%=0i-0Q#hU!3b0Cn zV7Va`3TS;fMGXvW=MCgg4(-q}(Co$hHf;{g^6^%;TARfEZGHI25fzYkDgznB%N3mB z-$+lWV;RFM)gwNctsEby|Hs*T`wp)Yb8!2W=C~Iq&_zOo##hqR=m@r_(>sb6P>xIf zKE_2KYp^;W1i20HGw;F+1DEYehF0VNKyDG&p(PN&Na`CYP{p$el7?*v~pkip%5RW7@>9O9UsNh8N#{VL)q49iiIPV+eJ=ggXlwg_?*yeyNeMrESXvLLmer6~r(Q( z)~)YujBR}{BZKC(L!HU-i)&Kva5Hu7+0ztco~r!>I(|thpb$k3dYLT#)v!6R%cjBO zy97~{6@+IutGD$q=k8iO*AfCHZHc7U9W-7mL%~tiNt;_jZc;+-EI?Ut{mV6uMLvWF zx9oEX8;vO&G?dAZV?JEc1$hIF ztI|wKCWM^&_(SA3vH<1Ahn5!pTd)a|LE;~Db$>q_jZHP`M;%>V32@&`9?s2Pt)u!& z={54N?R@%PR^iaEj~+Z|3a^||LSRcZM5S!#UBSWHv@6gKR_E-xtlzrTa(9{0HOg)d z9OnXZ0$t0Vk8+Ab^e*F;a9?jCAh$;A1)tv7BmBH{?b?XopzEZc4BIBTH z+S|*EoJNt(iAJB4O#C7?%UX)rf7UP@f8wv6)!I$hc-E)*;IzAU>p`x@am1c0ViceRGp z(})ksV!Y=R(QDYep5>^L&#kSK4;8s_FfpFeM&r|Gha4{a;RV!4VM#Y{-V}iKgrt`3z{Z=xgl)U*`hK7mb$7zK|?cA|LIj_SLJGSl`?kTerm>G4GZHOfv9zt8*k z@1=|7v&aV{kQ0Rq@|*c50}7EA13j8@ze?!Jr}%|@PS}a%YNg>)p>9j6C~pqp&4;(_ z&3V?Ol#6Jir1d6U@qV0@127Y|&tZ&IeE;<{FR`gK^;EFk=06#L4Yr{mv$@51STp;h$i-Vrq! zQVVPifcT7qxA^h7vXQBh$;{*)2aVo6dJM+?L+5<=h=mr{gp5KZAS;=Chp>IGIx7Yg%nF@QX6U zXSI@I=O3Po6@0PEO*_g^qH8E7*Z^f(+smloV;xjXgOA^`H`cfK6rZN1{O!maH#sGN zCFtJOgiZchcS+3A7C!;LE3_;JI<7eNsIP+h)2#ngpj5dsxdS@7oue7zjInNhc2Uu2 zxj17IKiWn{p90vFVvul%svKF0fH#9DL)lT*Th43=3cxxU?>rHP7di`%)Y#|W16N0% zGPQ0%9-ayZJS*xmCJ#bAJH9UJFe-Q*Aq6la&C?@C>%5BY8G8|4>{))k_6JjsEKce2cbg1N1H*6qIO62-$v{z@;bZ?>)ZOjT zr!3xnc3ZqS(|tUGK9uMVughafw_Tgzv9D%5%Vq*;&{5?heqg<(p@|BK0Z#-nxIz%E z06L&E#J8w0S)-9+xY~ta@Mh|rgNF`D0C~~JFx!I@6oZ?N8&j%S73 zlr&@nazu$5$dFI-d7!x47O(lfOWh>8zcc6%gUh|R&j2+|qB}fBpenT$ zX7;C?wcz+`bIL|Q3V6e{wE~>u_z~Oo9qxhgmWJ9<+N7|wVb+ag=kmU$F|6Ei^s85| zHsHCEnPDgc!JpeuY1v{WSNeM<$mk;4 zlWGc3dctd^tB~)5C^xn+XP1eWQ!%JG&(}fT-ki<86=)nozl3D!eq*1wBAXzxsv+NR z!a`s|%$+6;& zr>0iJ8 zY^p>hvTvzUGFbP!0~Oy@sQqPLk=L-8Yt7LTE0Xi_w9vyy6xiJ}^Cl^hiC=ELZx6-o z3W*3o@IpVl8EL^Y1Sm3xOd*UA6%e<%fwgYd$m5pomgm=$SJ7M=GlxW8m%I!1&Yuiu zpk=*FVFAFe_#U&u9>)Yb@D6{ZV>9iWm&Z*!lTOfky?%ZB4)^TPu3bo2m@@FBAiJ;{ zqA;C3XHEg6P)0ust}v=6neQMIcsSm>A~YO`IAQR@h26==XxpwRe0k}B+hC;z+1S3) zc&$)9%IECra&9XIfvrcDIXgLph)Jr**+RehviAeRc*4~2A;1(?Et#nclrlLayXDx_ ztVq4$aKlaoFV=5V+ERa8+C8H;4&x={EFc z(;1O++b$XR3TuW#k+onhT0rIZHZ0rm#t^a%->y4zx28AsK!W5Y9i7rtSnNcq`^hXi7OFB zneOU4+x6^e9q+3hbp0+J7~N47ufgz9wIfUd* zMj!QMj`H5Ud-p%m{T3Jg-u38jTwG)`k2yZ>AsF;Uh`h7&^M>+CN|3x1cGTjWT!pH! z=_sI&NZX6Ps5<948D+oic6k#X88z-9m+RngtJa2wEeZC%uG+F8NQSSbYMnYZ&1IVQ zJLnFx6l$<-<-Pm#kvcEuJzd6w#1&_A;`q2;yv=ag5O{4yC1Bdc{;N5<{gkduCv37g z38srFetA6#_VKE(8SeM^CR~Li$&PyiMfam+{raUhIBk1`CmnE@Pn+nRcLa12#0j7& zNTW|#Xw*gJeZ~Fu&kgL=y}QZq;a7V^pMo>3K@qvce zhWXPW)YJsbueXZ4jDDyD#akO_Y(D1{m_-S_$8A)gHOY}{WO`)tm4FtLb&L-TSl%5k zUEOf2npB}T_VsT9I(2hH%rQ^FYgzp@sEaf>vg*JQoF_;JFm(8I^V`971;CrCp5=sq zB31q#CE&&6WSJDp1t0U~gm2Wwom0m8UV%UKVVU$aFPL*b>RKmD_xzEj@3l4Grv1Wz zx#Fx=k6lcIrz0AIL%9O-AYBia=f;`_KMv)b)+%bWII2hKmi1ci+PLJmUmT*^Y(~?y z-LhI%g7H6+pmB-e3(qY2uAWs5id8+~L81oResDqYl{k;5r!uws>+K(Z!_T*PYcRDK7s_+M2WVISyrHz09ctZ+J)O z|5GCddvc8loFN4;wf|O!Haod#De&Iu!W|u<^;Nh96B`ijpKJ{Nl7P@x&{y4CD?mH|C711W&R4CklDQp=-A_yzSRT( z^yV$+QxcIi=HWQbFXHjCYW7u6R!b^9IbG16LBE-4zy zuHCywZ7lKCtrl~zL1L?)55(qxY;~5X;0d&unA;{ibA0&VK{CZ7FySzag^H)#aFE`| zjxF%hT?fnJV87vSPe#2qb^@^($Q&(5Xc-zY@Y`=al$DjwXO=<>hpTqvaHEhMGI#FW zc0)#5cl#}W^ty`Jt4naX;2e4o=h_B6g>8tV;X~~N#tEW!&zC<*LOG5N`)gZ;2FTKs z0wa&ejAT@NVN+>hCHAymjC$w|MyzdrqCbU#y##bto9wV*GF9EGVotU4vDv2mnNL1m+svgOJPy7^*VqoKkgdH zTT}UEU@^5?moJ^Wf8fe^=E*nF4KzDO63M)8^B=rVeBsueJKF~`a>0-NUimKh+fw?9 zC7@DzB#D=encme91xYA%Q$DZOYPqv>RMUYTaq@S#8mHx7_RFVdHL0XwN!s(v=#HTqtca*l09_nxc(Wd20Fgm;XCGg-C3a6!5wInAfa3n`#) zoDpc1)3_={0aZ=4@j(_dXVxXkENRo8B(4y$U)+6+3JoiiUJ9U&3djqix53gkof6aM zvhUd;UgquBViM$XIdY~J#U8AZ67|_J+gP*cxwOKN9f9v{E63#Ms;a3CqcUP?K7IOB zLWd)AjgE(hd!ubS$a|I25;hQON(Lg(v6JjCFudoR=i*&JyIunr2MTf;35d>x=sCP& zV|$@#%K3^8U@$%B6s`h@LK4i(4U`m`xoPOL7t0iAdLT;Oi(nAp3uyu5s)sjO!)r%l z%8c4Bfy@I^5gsd6E&4GC#2y)93BsNu;M5mBUD-Y(RP;e&H7yT(Lw}|tE{H1$H18Y$zgq@+43<(csV9-)Y zZiC=(c9UCHu0>ugE0oz2)sU60hS%XrTL-?r^V}-YEg=@3gm7XnLCRzWo<>x|~_eU1Q?|2?v0ieDPvPW@(4Cyg1jYB5otk z@(N=b)Y%3Kg-CQLwE(zPctz?!v0yJ!l8_l88gul802ARKpN{7Zqd^>zg0=+YSV_Sc z=hjeXBFg9hXryaI?DGIaLlCQ)-X^Q5j@xwZJSw`bS-92LM^~>F?|ufHqe!NZo-%D( z6^=Vj+~Kc|fY3W0PwEg79Qbsxpk}(fb3XJI|;6s>jewk}{Y8H?Ktq09Y8!i%) z8dz_{G7$K&rKz<6S{t9vxfLlpUAV=90P(A%_F`JkMOsCKMQm!%nFj23U=|gyH||1X zezqH75zH!Rp}-NT3Zq7Gl#CFT&(V~B1+DhwOhn}g+**b3%$@FKN`p8r8QrN2c`5=+ zf+vDpED#_4udBFjul9{-5mor`opqG6=gsRCW$)hAn}7bDPu%jd!y?Usi*E?de+fl0 z3)Ji$V>o1rmao(f01v)gO{(n9dYYQJ=BFVm6a)hAgfCEdP5$8|$h2$u_vNnKs;(8~ zD|ypx;f17zQP01tHw10jWlmdDW8>9*QKGZ&@z zyJrYFOn@zoVa`!;xspAG3%MO~o3LFHbbIz;vz7hVJhYR#$4&LkW zAj7`5oR3-4CYs;X`!LwlwD52{%BKm#)`g;v$C%Yx>9ddcEZr)U&CV`?9W= znd|$uQ|4ObT4s7Uyxu}_2IM7*82GH&_m4H>)gH8pRERmml&g6Gnp{yO;o!7-h>=c& zkW#Vg2G&9ta|P~Pq2TUE()jO)JnGu#4l^2pid-gWQ%nRNmYflWV6K5-sH$MAoFIt; zePY#RnKpSGOUx>9E~|VKmNzfy60IaMBnMPLG^9|MzqDQ@U>_@C) zO2|KR|2FZl6kxR*x92eRBR`>^7A(R*y2KRaea_Kz?;J+csL)_H*3!k}qp3rQ;}i@h zQ~7!K>?tDwrcIw7-kocZ%T~lXTvsAUeFyD5ve2Z)@16&gsWMO_=eq@Cp1kb3jBg&a zb7ysqE~n4Dc6wfK%3~@&z*IpGz`CbWr_x1up>vQxA8<1z4A2?pDZlxU^%HMZ?zV`p zU5$^}>cNv%uU?&sT4#|46z_}qJU6JB;VHsd(a#M0C*>08$tE_C%nlcjMd(}-pX`H+CRcoIa%S$sHIktqXtw;z zu3aTxx|o`VJ#=cbFd!(i9Om z(p(B&D5U&^w-*K@ohl)>E(AVGrYLv~Q&apvB2dEOl+47TQ0DK*vzv~D&gQ~EwnO@% z`|XPd7`J+AIe3#>X4)WJY3)8Lg9^d-I63X(BNaWCr+a69sJ<@6P7=1 zf+h3GUf31u2AMh8#{!F#pS?>-Shi-&H>5$5nFY4F>hN#ZTnZEn5>bV5Q3M>fi}^Fk z{#KC;it$3iEMf(`I!YzQ1K&%seX4&W$6FnOuAc^CL8f3TL%=kB5!}H9sxeZE+!bEfstF$k>|hn+6&eXH;MI(l5`t>_u^L#D@nF z!Wm+735++_4vktu-SlGheoidWo9x}UPw)Hyo(Z*^1hP_5M;z-)-QaM}_EBo0HY%wu zKkOfNs6mTk9S>jm6s%lkvIzOe(Hl8z>=44zTT}guFbgS>9}hxXSGN|D_`!wMMwyzv zuhrGqvx8@?2x`B=>iTy^es;yzu(4(T#?lVc9BvH?8b6{Q zcXXmg?3oQKh71^JEan$Z3BA|K(guq54eAL~fqKJ+)|KDtw3*9h))m7a=hYM$dU{$G zc|e-o^L7ZiD8%hNyWzuYP={t-@%HhFs?%IUL-0;#wXC+<+8K*&LqqkK93LXH0}@v( zMZYxM;@0uH^h=*U{ehQl{Sx_|#ZZLjkyQ4rmca2?G(P92Kh#PuF+Zu!@CJf=W|tR`)}Bgij^pp>dqTo?(Ce? zkr0pPVI=CK$P@S4Gz6E(q$W%Ka0!D78julWE5TG<20G9|I7u-P(wBT$sta}G`09vq z8-VsVpu<8!@eb{%Do{69H(Y2v=rU5TOer&IQ4iiR&a zM1kiFNADld5e&J5D46BNLMwA2Y@L>kG!JAT9B*Jl%!Kvgo)noN{Kp&rrm`)7sSPJZwmdhI~lGsrfP2psBtoV#-x_<8tfExb)2 z@tx`jS%6N|Tw@@&f`~H2pqh#YpiQ0i>(_6fZmUJe9Tkf}W1t2*)72m*q*CkdbkCv&sCfEC#nMqcP)l56Qs}OxPMM-mQCV+w{Xh#mhjozdAp)JOkqz&H z7`UoJX1-(Nr1a2aU>SsR4U~)$goYt#Di99p#4sU^JE>iZU~+rDz4* z#!{t-4;;wwFsm<+vuKfp3HklDriFBN<5&DGPso;41|$?1htaLJMx%mzc3kws@LVT_ z^pWW16s>3l89XiBhh3L6Dxz=ZbD4wgTsClX__5No>%_X6U?e3?FZ@GyZZlYboz3*> zp^xU>bCyQTHNW$!E1y3fc|`7{_ig9BuK2NnU|TI}{mT?y;jX|55tEY!D#+>5plmTg z0<+}plTCtcA_4zLw@uRNjYoo(16}Z}sw$8rN`*_q@PhQy^`JE&Cr+rpJzrK=lEBz% zt@QL%L9_@VnQi|cx@Vs&@b!fGEh~R#B&Yw8ZpJYwK(;iOg!PFnNVGl>($0jN{P&^4 ztiYXncqfRXLU|_;KmzAJEh2q5F(HZ2M`lqSP*N0mA{K_!mmdL! z)pPr?QPWTZ*4r5DA88^njo#3-Nq0mmi<$!nN<>fmMms0hSVpbWic>-Z5aWDjDgk4d zJi}M2AmuuGsVtzVfaRQfMLBUZfaKc$TG|@**oPxs8Yo^I`4$H~E6R8Mu2xWp4HSR@ zFY8kh84*^7@~`4?1`or4Kr^#i^f@S3Y+e2VE)+YNk^)R4MJg%aPnxk1kdDuk-N0p4 zxOv1DLjoKvZ5n>3ZZocKF|+R@bP2>`SI=?}LQzuibJF_j_k2sMdlHW)>AN7}2jm?6!@qfOW*>d}ZTU}D{eOb`CHhB0JkisLw*v_|+3KDHmVOmc2le)PfDUX0 zCrN|U^?#qbRG3WJn0ONY}1i2Lan- z7x#;#uXA0hy?KpBAfLZQz;Ew9h^p(1x z>`m=EzFo3X_(@(fI)k@I>zvrOr z7#CMW=xn{iz4tHL8Ghbv6W#RM}cnGHKZz_}kI4V(}YQ#3^IDelegWcEUEID=N%&H;vc3Lc6+*2B44zB_#Rq@Y& zb2F>veD|WezvfvEd0BP#$F~tO*)VQFYp}3)v}`i(O{}YE!^ggSAAEkLS82A&o!?%M z9sl#Wo;?oQ*pysuBAnI`=?tDDzzO`dO^fAUYmBDCa`fG0 zrU$ItR0;>N-H3E~#fopbo6mnwpOnqQnhrI;i&!*N!t1#%>+<{tBZF%+%wb^cyS=}3 zDvW+5o*$I>QYmsY4&edZjhSr36HLq>-plu(?mc9)-O%zo(cn+uruU;D-(m0r-$E)m z@{zrd(+B25Jb8q>UWh2YKa0cB1pSibVto}av^O4adID=TLfY?KfbBq=`us<&Hb>){ zMBd}UKfnFmFApK*LmR3g@!)reIi5=F42T11VwPz4XH9J^3!*R0a;Ko_e9pF79_CRI6a(h!}kTWhUyrdzS9w0UFElcBaKnPRTXGnwf)=r zY`Zv~ki&_Nhtu%oJ?uE+ppDXSpXB@{pPoJ`oXeKBRO&Uj<{Gba%Ailp5H7nEv#=lb z<>YJz?mBhjMo7cgC7%b!xZO`&K+p3&>^mqycbfYisB8*~R|LjGFUVaS*Y@N|5}IHE zB;i6Hu^pXxR>Y-*s=4L#1SN?tCW-)Q`FAA+wxicxpg!SEQ)n(< zbATrh8WXb}t40r?cgQ(j6y#E-(Tr;1SL;yVTEkcBf%{`J1qn^t0E`+cfZh<~ZfowF zw&V)c+5|Q#3G2c3v0Ik$WcTB#z?U=ldx*@V8(@vlv8bEfD7_o?C)ly3@~^3>nMwGE z?eZKpUSnmw3RIq_ZOYDdTnc5##km(N2i>Hu!SlNh;Ntp+Mf#W15)QgOU85h()Yoby3mv;y@B)X+3QDk6zDwfDpLo%G z!7cNW6Ny8q;DMI+y5w<;6>mID_OXRG-?hm)_na&8s$_~o@WiZG>^DlZ9j>&hf`WO6 z=P@#jLpuTi;4D3>LXnr3ckaT4a;uSv8!!p`ZGC-l=2w4NJoSF!6=Kqa=vs%Dv|qNj zGjj^-cD$;$^C@YYcO=dx91%5`Izn z)vM1m$uGR4+MXGFXq9Zg+Md*3cEQ?>9*sm|8H%hsBS|YQgcY|#V_xpy`3jFYdiR(= z?=5u8SNWTI-N-p?iaC4xzM-jL&E*WQzq3SXUFVBPh<167Gqh2ShdyEYA{H^Q4(IJ9 zvCs2A9b^d94DIY~I_KJg9EBeDK+ks+0!WgpC%I0watykLQ-OPWlKToID96m9ra$@K z9IL6N=Mn92_*o$+1`Bh!`LzP(i6NkT7UErieRiC!#D@e>jtn7?%4N6p3`x3yh; zfSJfZ@{J8|`9M}x59ST9sjsiMpyxK^Ip#te*aqYXis}soXGFEvs3oKJQJz)xsu{aVRr(X!W@G)Tm)bp)JG`@yl$Oc>BgTDK zdb)bCX7w`3UeT56?=FN?%gi1!Jp%s^oEMe6#~m0=nI=LUqsjV%gU90jiD3&ZQW$sL z!qtgix9pV;qs^+*Bo7IYlofExyaI5ksku3oAZKhzhg51ZS!;ImN>GNNeHQv9PGT8M z<3wN?u$y%?*tE?5q7xvhKi8z>Um-+$F{wS0I(prSw%SN+7p@e6z>6QL1*TG6|F{Pu zO2hv3pt|8hf)gf4`H}ZxLbcTdP#>F_S9i zBXpZ{-x*RU5O+|+(jg&lZd$^cHPhqU%^veo7S_H3#h0(#O_u5ZQ+JjH+ac8U;E>=| zPPhkyj|B|NJ$FfWtDJ=p*(K?1pe_pF#0KzBV0;VV z-oh+B3K<3#!97phehWMF&RHid68iaq@qSim9ruPYvHk2sN%5# zqLmtj7Po@aE(E3u=vPUw(57I<^yFuSC+F-mX-asbVl-h-9fr{{=9pdl7eB)xR<6ol>{8CMo6A~ZH*Va6-G$Tu zTd1T>+rPkjlA=tEQ5m+0{AKvLz4snG7zdPj$aTe~eSfrIWwCQpw-i(vQBx*Q4p#Ih zh;lU(;7t;ldlyF}KKnK@daM;s*3RDk`yQY80Obr_{hKivIR#s%Ef@}8eiPk-Jb4+R zQqFJ1RY&{~_^@Iz!G&g36BxQ2#5^b!WCXu*<>P?Qwd{-%#&OId(BoKsD2`?8JxUa4 zpfowXrjCGTJXV#!djTcoR%mc%03#6yF7g4zyEI>|@Wc4c=}86ZVbJIQOcwq}vT5mS$wv1tK9VgSh^dEZo2jmxM_+ zvSNNp85j!O|1QEmF&Jf(BJLi%)F-hgZ!2r<(X(gR`On@Lb4JcoGSE~KLonYzL|w5L zz6c@M3qukbz_%hJ?|Uy%bC+~m&xYD1%!<3+-QTnB2bz%#=m%w$mEH{dQ|PBtxu5eI zp|TD{+%Gu>IBE;SOJQ9Z+!)!5t{HGi= { - let now = ::std::time::Instant::now(); - $f; - let elapsed = now.elapsed(); - println!( - "{:25} {:15} {:7.3} sec", - $name, - "Rust segqueue", - elapsed.as_secs() as f64 + elapsed.subsec_nanos() as f64 / 1e9 - ); - }; - } - - run!("unbounded_mpmc", mpmc()); - run!("unbounded_mpsc", mpsc()); - run!("unbounded_seq", seq()); - run!("unbounded_spsc", spsc()); -} diff --git a/crossbeam-channel/examples/fibonacci.rs b/crossbeam-channel/examples/fibonacci.rs deleted file mode 100644 index e6f5e89c0..000000000 --- a/crossbeam-channel/examples/fibonacci.rs +++ /dev/null @@ -1,25 +0,0 @@ -//! An asynchronous fibonacci sequence generator. - -use std::thread; - -use crossbeam_channel::{bounded, Sender}; - -// Sends the Fibonacci sequence into the channel until it becomes disconnected. -fn fibonacci(sender: Sender) { - let (mut x, mut y) = (0, 1); - while sender.send(x).is_ok() { - let tmp = x; - x = y; - y += tmp; - } -} - -fn main() { - let (s, r) = bounded(0); - thread::spawn(|| fibonacci(s)); - - // Print the first 20 Fibonacci numbers. - for num in r.iter().take(20) { - println!("{}", num); - } -} diff --git a/crossbeam-channel/examples/matching.rs b/crossbeam-channel/examples/matching.rs deleted file mode 100644 index 5421169b9..000000000 --- a/crossbeam-channel/examples/matching.rs +++ /dev/null @@ -1,72 +0,0 @@ -//! Using `select!` to send and receive on the same channel at the same time. -//! -//! This example is based on the following program in Go. -//! -//! Source: -//! - https://web.archive.org/web/20171209034309/https://www.nada.kth.se/~snilsson/concurrency -//! - http://www.nada.kth.se/~snilsson/concurrency/src/matching.go -//! -//! Copyright & License: -//! - Stefan Nilsson -//! - Creative Commons Attribution 3.0 Unported License -//! - https://creativecommons.org/licenses/by/3.0/ -//! -//! ```go -//! func main() { -//! people := []string{"Anna", "Bob", "Cody", "Dave", "Eva"} -//! match := make(chan string, 1) // Make room for one unmatched send. -//! wg := new(sync.WaitGroup) -//! for _, name := range people { -//! wg.Add(1) -//! go Seek(name, match, wg) -//! } -//! wg.Wait() -//! select { -//! case name := <-match: -//! fmt.Printf("No one received %s’s message.\n", name) -//! default: -//! // There was no pending send operation. -//! } -//! } -//! -//! // Seek either sends or receives, whichever possible, a name on the match -//! // channel and notifies the wait group when done. -//! func Seek(name string, match chan string, wg *sync.WaitGroup) { -//! select { -//! case peer := <-match: -//! fmt.Printf("%s received a message from %s.\n", name, peer) -//! case match <- name: -//! // Wait for someone to receive my message. -//! } -//! wg.Done() -//! } -//! ``` - -use crossbeam_channel::{bounded, select}; -use crossbeam_utils::thread; - -fn main() { - let people = vec!["Anna", "Bob", "Cody", "Dave", "Eva"]; - let (s, r) = bounded(1); // Make room for one unmatched send. - - // Either send my name into the channel or receive someone else's, whatever happens first. - let seek = |name, s, r| { - select! { - recv(r) -> peer => println!("{} received a message from {}.", name, peer.unwrap()), - send(s, name) -> _ => {}, // Wait for someone to receive my message. - } - }; - - thread::scope(|scope| { - for name in people { - let (s, r) = (s.clone(), r.clone()); - scope.spawn(move |_| seek(name, s, r)); - } - }) - .unwrap(); - - // Check if there is a pending send operation. - if let Ok(name) = r.try_recv() { - println!("No one received {}’s message.", name); - } -} diff --git a/crossbeam-channel/examples/stopwatch.rs b/crossbeam-channel/examples/stopwatch.rs deleted file mode 100644 index 3a8962279..000000000 --- a/crossbeam-channel/examples/stopwatch.rs +++ /dev/null @@ -1,56 +0,0 @@ -//! Prints the elapsed time every 1 second and quits on Ctrl+C. - -#[cfg(windows)] // signal_hook::iterator does not work on windows -fn main() { - println!("This example does not work on Windows"); -} - -#[cfg(not(windows))] -fn main() { - use std::io; - use std::thread; - use std::time::{Duration, Instant}; - - use crossbeam_channel::{bounded, select, tick, Receiver}; - use signal_hook::consts::SIGINT; - use signal_hook::iterator::Signals; - - // Creates a channel that gets a message every time `SIGINT` is signalled. - fn sigint_notifier() -> io::Result> { - let (s, r) = bounded(100); - let mut signals = Signals::new([SIGINT])?; - - thread::spawn(move || { - for _ in signals.forever() { - if s.send(()).is_err() { - break; - } - } - }); - - Ok(r) - } - - // Prints the elapsed time. - fn show(dur: Duration) { - println!("Elapsed: {}.{:03} sec", dur.as_secs(), dur.subsec_millis()); - } - - let start = Instant::now(); - let update = tick(Duration::from_secs(1)); - let ctrl_c = sigint_notifier().unwrap(); - - loop { - select! { - recv(update) -> _ => { - show(start.elapsed()); - } - recv(ctrl_c) -> _ => { - println!(); - println!("Goodbye!"); - show(start.elapsed()); - break; - } - } - } -} diff --git a/crossbeam-channel/src/channel.rs b/crossbeam-channel/src/channel.rs deleted file mode 100644 index 5447e3303..000000000 --- a/crossbeam-channel/src/channel.rs +++ /dev/null @@ -1,1539 +0,0 @@ -//! The channel interface. - -use std::fmt; -use std::iter::FusedIterator; -use std::mem; -use std::panic::{RefUnwindSafe, UnwindSafe}; -use std::sync::Arc; -use std::time::{Duration, Instant}; - -use crate::context::Context; -use crate::counter; -use crate::err::{ - RecvError, RecvTimeoutError, SendError, SendTimeoutError, TryRecvError, TrySendError, -}; -use crate::flavors; -use crate::select::{Operation, SelectHandle, Token}; - -/// Creates a multi-producer multi-consumer channel of unbounded capacity. -/// -/// This channel has a growable buffer that can hold any number of messages at a time. -/// -/// For more info on how to use the channel see [module level documentation](index.html). -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use crossbeam_channel::unbounded; -/// -/// let (s, r) = unbounded(); -/// -/// // Computes the n-th Fibonacci number. -/// fn fib(n: i32) -> i32 { -/// if n <= 1 { -/// n -/// } else { -/// fib(n - 1) + fib(n - 2) -/// } -/// } -/// -/// // Spawn an asynchronous computation. -/// thread::spawn(move || s.send(fib(20)).unwrap()); -/// -/// // Print the result of the computation. -/// println!("{}", r.recv().unwrap()); -/// ``` -pub fn unbounded() -> (Sender, Receiver) { - let (s, r) = counter::new(flavors::list::Channel::new()); - let s = Sender { - flavor: SenderFlavor::List(s), - }; - let r = Receiver { - flavor: ReceiverFlavor::List(r), - }; - (s, r) -} - -/// Creates a multi-producer multi-consumer channel of bounded capacity. -/// -/// This channel has a buffer that can hold at most `cap` messages at a time. -/// -/// A special case is zero-capacity channel, which cannot hold any messages. Instead, send and -/// receive operations must appear at the same time in order to pair up and pass the message over. -/// -/// For more info on how to use the channel see [module level documentation](index.html). -/// -/// # Examples -/// -/// A channel of capacity 1: -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::bounded; -/// -/// let (s, r) = bounded(1); -/// -/// // This call returns immediately because there is enough space in the channel. -/// s.send(1).unwrap(); -/// -/// thread::spawn(move || { -/// // This call blocks the current thread because the channel is full. -/// // It will be able to complete only after the first message is received. -/// s.send(2).unwrap(); -/// }); -/// -/// thread::sleep(Duration::from_secs(1)); -/// assert_eq!(r.recv(), Ok(1)); -/// assert_eq!(r.recv(), Ok(2)); -/// ``` -/// -/// A zero-capacity channel: -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::bounded; -/// -/// let (s, r) = bounded(0); -/// -/// thread::spawn(move || { -/// // This call blocks the current thread until a receive operation appears -/// // on the other side of the channel. -/// s.send(1).unwrap(); -/// }); -/// -/// thread::sleep(Duration::from_secs(1)); -/// assert_eq!(r.recv(), Ok(1)); -/// ``` -pub fn bounded(cap: usize) -> (Sender, Receiver) { - if cap == 0 { - let (s, r) = counter::new(flavors::zero::Channel::new()); - let s = Sender { - flavor: SenderFlavor::Zero(s), - }; - let r = Receiver { - flavor: ReceiverFlavor::Zero(r), - }; - (s, r) - } else { - let (s, r) = counter::new(flavors::array::Channel::with_capacity(cap)); - let s = Sender { - flavor: SenderFlavor::Array(s), - }; - let r = Receiver { - flavor: ReceiverFlavor::Array(r), - }; - (s, r) - } -} - -/// Creates a receiver that delivers a message after a certain duration of time. -/// -/// The channel is bounded with capacity of 1 and never gets disconnected. Exactly one message will -/// be sent into the channel after `duration` elapses. The message is the instant at which it is -/// sent. -/// -/// # Examples -/// -/// Using an `after` channel for timeouts: -/// -/// ``` -/// use std::time::Duration; -/// use crossbeam_channel::{after, select, unbounded}; -/// -/// let (s, r) = unbounded::(); -/// let timeout = Duration::from_millis(100); -/// -/// select! { -/// recv(r) -> msg => println!("received {:?}", msg), -/// recv(after(timeout)) -> _ => println!("timed out"), -/// } -/// ``` -/// -/// When the message gets sent: -/// -/// ``` -/// use std::thread; -/// use std::time::{Duration, Instant}; -/// use crossbeam_channel::after; -/// -/// // Converts a number of milliseconds into a `Duration`. -/// let ms = |ms| Duration::from_millis(ms); -/// -/// // Returns `true` if `a` and `b` are very close `Instant`s. -/// let eq = |a, b| a + ms(60) > b && b + ms(60) > a; -/// -/// let start = Instant::now(); -/// let r = after(ms(100)); -/// -/// thread::sleep(ms(500)); -/// -/// // This message was sent 100 ms from the start and received 500 ms from the start. -/// assert!(eq(r.recv().unwrap(), start + ms(100))); -/// assert!(eq(Instant::now(), start + ms(500))); -/// ``` -pub fn after(duration: Duration) -> Receiver { - match Instant::now().checked_add(duration) { - Some(deadline) => Receiver { - flavor: ReceiverFlavor::At(Arc::new(flavors::at::Channel::new_deadline(deadline))), - }, - None => never(), - } -} - -/// Creates a receiver that delivers a message at a certain instant in time. -/// -/// The channel is bounded with capacity of 1 and never gets disconnected. Exactly one message will -/// be sent into the channel at the moment in time `when`. The message is the instant at which it -/// is sent, which is the same as `when`. If `when` is in the past, the message will be delivered -/// instantly to the receiver. -/// -/// # Examples -/// -/// Using an `at` channel for timeouts: -/// -/// ``` -/// use std::time::{Instant, Duration}; -/// use crossbeam_channel::{at, select, unbounded}; -/// -/// let (s, r) = unbounded::(); -/// let deadline = Instant::now() + Duration::from_millis(500); -/// -/// select! { -/// recv(r) -> msg => println!("received {:?}", msg), -/// recv(at(deadline)) -> _ => println!("timed out"), -/// } -/// ``` -/// -/// When the message gets sent: -/// -/// ``` -/// use std::time::{Duration, Instant}; -/// use crossbeam_channel::at; -/// -/// // Converts a number of milliseconds into a `Duration`. -/// let ms = |ms| Duration::from_millis(ms); -/// -/// let start = Instant::now(); -/// let end = start + ms(100); -/// -/// let r = at(end); -/// -/// // This message was sent 100 ms from the start -/// assert_eq!(r.recv().unwrap(), end); -/// assert!(Instant::now() > start + ms(100)); -/// ``` -pub fn at(when: Instant) -> Receiver { - Receiver { - flavor: ReceiverFlavor::At(Arc::new(flavors::at::Channel::new_deadline(when))), - } -} - -/// Creates a receiver that never delivers messages. -/// -/// The channel is bounded with capacity of 0 and never gets disconnected. -/// -/// # Examples -/// -/// Using a `never` channel to optionally add a timeout to [`select!`]: -/// -/// [`select!`]: crate::select! -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::{after, select, never, unbounded}; -/// -/// let (s, r) = unbounded(); -/// -/// # let t = -/// thread::spawn(move || { -/// thread::sleep(Duration::from_secs(1)); -/// s.send(1).unwrap(); -/// }); -/// -/// // Suppose this duration can be a `Some` or a `None`. -/// let duration = Some(Duration::from_millis(100)); -/// -/// // Create a channel that times out after the specified duration. -/// let timeout = duration -/// .map(after) -/// .unwrap_or_else(never); -/// -/// select! { -/// recv(r) -> msg => assert_eq!(msg, Ok(1)), -/// recv(timeout) -> _ => println!("timed out"), -/// } -/// # t.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 -/// ``` -pub fn never() -> Receiver { - Receiver { - flavor: ReceiverFlavor::Never(flavors::never::Channel::new()), - } -} - -/// Creates a receiver that delivers messages periodically. -/// -/// The channel is bounded with capacity of 1 and never gets disconnected. Messages will be -/// sent into the channel in intervals of `duration`. Each message is the instant at which it is -/// sent. -/// -/// # Examples -/// -/// Using a `tick` channel to periodically print elapsed time: -/// -/// ``` -/// use std::time::{Duration, Instant}; -/// use crossbeam_channel::tick; -/// -/// let start = Instant::now(); -/// let ticker = tick(Duration::from_millis(100)); -/// -/// for _ in 0..5 { -/// ticker.recv().unwrap(); -/// println!("elapsed: {:?}", start.elapsed()); -/// } -/// ``` -/// -/// When messages get sent: -/// -/// ``` -/// use std::thread; -/// use std::time::{Duration, Instant}; -/// use crossbeam_channel::tick; -/// -/// // Converts a number of milliseconds into a `Duration`. -/// let ms = |ms| Duration::from_millis(ms); -/// -/// // Returns `true` if `a` and `b` are very close `Instant`s. -/// let eq = |a, b| a + ms(65) > b && b + ms(65) > a; -/// -/// let start = Instant::now(); -/// let r = tick(ms(100)); -/// -/// // This message was sent 100 ms from the start and received 100 ms from the start. -/// assert!(eq(r.recv().unwrap(), start + ms(100))); -/// assert!(eq(Instant::now(), start + ms(100))); -/// -/// thread::sleep(ms(500)); -/// -/// // This message was sent 200 ms from the start and received 600 ms from the start. -/// assert!(eq(r.recv().unwrap(), start + ms(200))); -/// assert!(eq(Instant::now(), start + ms(600))); -/// -/// // This message was sent 700 ms from the start and received 700 ms from the start. -/// assert!(eq(r.recv().unwrap(), start + ms(700))); -/// assert!(eq(Instant::now(), start + ms(700))); -/// ``` -pub fn tick(duration: Duration) -> Receiver { - match Instant::now().checked_add(duration) { - Some(delivery_time) => Receiver { - flavor: ReceiverFlavor::Tick(Arc::new(flavors::tick::Channel::new( - delivery_time, - duration, - ))), - }, - None => never(), - } -} - -/// The sending side of a channel. -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use crossbeam_channel::unbounded; -/// -/// let (s1, r) = unbounded(); -/// let s2 = s1.clone(); -/// -/// thread::spawn(move || s1.send(1).unwrap()); -/// thread::spawn(move || s2.send(2).unwrap()); -/// -/// let msg1 = r.recv().unwrap(); -/// let msg2 = r.recv().unwrap(); -/// -/// assert_eq!(msg1 + msg2, 3); -/// ``` -pub struct Sender { - flavor: SenderFlavor, -} - -/// Sender flavors. -enum SenderFlavor { - /// Bounded channel based on a preallocated array. - Array(counter::Sender>), - - /// Unbounded channel implemented as a linked list. - List(counter::Sender>), - - /// Zero-capacity channel. - Zero(counter::Sender>), -} - -unsafe impl Send for Sender {} -unsafe impl Sync for Sender {} - -impl UnwindSafe for Sender {} -impl RefUnwindSafe for Sender {} - -impl Sender { - /// Attempts to send a message into the channel without blocking. - /// - /// This method will either send a message into the channel immediately or return an error if - /// the channel is full or disconnected. The returned error contains the original message. - /// - /// If called on a zero-capacity channel, this method will send the message only if there - /// happens to be a receive operation on the other side of the channel at the same time. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{bounded, TrySendError}; - /// - /// let (s, r) = bounded(1); - /// - /// assert_eq!(s.try_send(1), Ok(())); - /// assert_eq!(s.try_send(2), Err(TrySendError::Full(2))); - /// - /// drop(r); - /// assert_eq!(s.try_send(3), Err(TrySendError::Disconnected(3))); - /// ``` - pub fn try_send(&self, msg: T) -> Result<(), TrySendError> { - match &self.flavor { - SenderFlavor::Array(chan) => chan.try_send(msg), - SenderFlavor::List(chan) => chan.try_send(msg), - SenderFlavor::Zero(chan) => chan.try_send(msg), - } - } - - /// Blocks the current thread until a message is sent or the channel is disconnected. - /// - /// If the channel is full and not disconnected, this call will block until the send operation - /// can proceed. If the channel becomes disconnected, this call will wake up and return an - /// error. The returned error contains the original message. - /// - /// If called on a zero-capacity channel, this method will wait for a receive operation to - /// appear on the other side of the channel. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{bounded, SendError}; - /// - /// let (s, r) = bounded(1); - /// assert_eq!(s.send(1), Ok(())); - /// - /// thread::spawn(move || { - /// assert_eq!(r.recv(), Ok(1)); - /// thread::sleep(Duration::from_secs(1)); - /// drop(r); - /// }); - /// - /// assert_eq!(s.send(2), Ok(())); - /// assert_eq!(s.send(3), Err(SendError(3))); - /// ``` - pub fn send(&self, msg: T) -> Result<(), SendError> { - match &self.flavor { - SenderFlavor::Array(chan) => chan.send(msg, None), - SenderFlavor::List(chan) => chan.send(msg, None), - SenderFlavor::Zero(chan) => chan.send(msg, None), - } - .map_err(|err| match err { - SendTimeoutError::Disconnected(msg) => SendError(msg), - SendTimeoutError::Timeout(_) => unreachable!(), - }) - } - - /// Waits for a message to be sent into the channel, but only for a limited time. - /// - /// If the channel is full and not disconnected, this call will block until the send operation - /// can proceed or the operation times out. If the channel becomes disconnected, this call will - /// wake up and return an error. The returned error contains the original message. - /// - /// If called on a zero-capacity channel, this method will wait for a receive operation to - /// appear on the other side of the channel. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{bounded, SendTimeoutError}; - /// - /// let (s, r) = bounded(0); - /// - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// assert_eq!(r.recv(), Ok(2)); - /// drop(r); - /// }); - /// - /// assert_eq!( - /// s.send_timeout(1, Duration::from_millis(500)), - /// Err(SendTimeoutError::Timeout(1)), - /// ); - /// assert_eq!( - /// s.send_timeout(2, Duration::from_secs(1)), - /// Ok(()), - /// ); - /// assert_eq!( - /// s.send_timeout(3, Duration::from_millis(500)), - /// Err(SendTimeoutError::Disconnected(3)), - /// ); - /// ``` - pub fn send_timeout(&self, msg: T, timeout: Duration) -> Result<(), SendTimeoutError> { - match Instant::now().checked_add(timeout) { - Some(deadline) => self.send_deadline(msg, deadline), - None => self.send(msg).map_err(SendTimeoutError::from), - } - } - - /// Waits for a message to be sent into the channel, but only until a given deadline. - /// - /// If the channel is full and not disconnected, this call will block until the send operation - /// can proceed or the operation times out. If the channel becomes disconnected, this call will - /// wake up and return an error. The returned error contains the original message. - /// - /// If called on a zero-capacity channel, this method will wait for a receive operation to - /// appear on the other side of the channel. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::{Duration, Instant}; - /// use crossbeam_channel::{bounded, SendTimeoutError}; - /// - /// let (s, r) = bounded(0); - /// - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// assert_eq!(r.recv(), Ok(2)); - /// drop(r); - /// }); - /// - /// let now = Instant::now(); - /// - /// assert_eq!( - /// s.send_deadline(1, now + Duration::from_millis(500)), - /// Err(SendTimeoutError::Timeout(1)), - /// ); - /// assert_eq!( - /// s.send_deadline(2, now + Duration::from_millis(1500)), - /// Ok(()), - /// ); - /// assert_eq!( - /// s.send_deadline(3, now + Duration::from_millis(2000)), - /// Err(SendTimeoutError::Disconnected(3)), - /// ); - /// ``` - pub fn send_deadline(&self, msg: T, deadline: Instant) -> Result<(), SendTimeoutError> { - match &self.flavor { - SenderFlavor::Array(chan) => chan.send(msg, Some(deadline)), - SenderFlavor::List(chan) => chan.send(msg, Some(deadline)), - SenderFlavor::Zero(chan) => chan.send(msg, Some(deadline)), - } - } - - /// Returns `true` if the channel is empty. - /// - /// Note: Zero-capacity channels are always empty. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// assert!(s.is_empty()); - /// - /// s.send(0).unwrap(); - /// assert!(!s.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.is_empty(), - SenderFlavor::List(chan) => chan.is_empty(), - SenderFlavor::Zero(chan) => chan.is_empty(), - } - } - - /// Returns `true` if the channel is full. - /// - /// Note: Zero-capacity channels are always full. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::bounded; - /// - /// let (s, r) = bounded(1); - /// - /// assert!(!s.is_full()); - /// s.send(0).unwrap(); - /// assert!(s.is_full()); - /// ``` - pub fn is_full(&self) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.is_full(), - SenderFlavor::List(chan) => chan.is_full(), - SenderFlavor::Zero(chan) => chan.is_full(), - } - } - - /// Returns the number of messages in the channel. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// assert_eq!(s.len(), 0); - /// - /// s.send(1).unwrap(); - /// s.send(2).unwrap(); - /// assert_eq!(s.len(), 2); - /// ``` - pub fn len(&self) -> usize { - match &self.flavor { - SenderFlavor::Array(chan) => chan.len(), - SenderFlavor::List(chan) => chan.len(), - SenderFlavor::Zero(chan) => chan.len(), - } - } - - /// If the channel is bounded, returns its capacity. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{bounded, unbounded}; - /// - /// let (s, _) = unbounded::(); - /// assert_eq!(s.capacity(), None); - /// - /// let (s, _) = bounded::(5); - /// assert_eq!(s.capacity(), Some(5)); - /// - /// let (s, _) = bounded::(0); - /// assert_eq!(s.capacity(), Some(0)); - /// ``` - pub fn capacity(&self) -> Option { - match &self.flavor { - SenderFlavor::Array(chan) => chan.capacity(), - SenderFlavor::List(chan) => chan.capacity(), - SenderFlavor::Zero(chan) => chan.capacity(), - } - } - - /// Returns `true` if senders belong to the same channel. - /// - /// # Examples - /// - /// ```rust - /// use crossbeam_channel::unbounded; - /// - /// let (s, _) = unbounded::(); - /// - /// let s2 = s.clone(); - /// assert!(s.same_channel(&s2)); - /// - /// let (s3, _) = unbounded(); - /// assert!(!s.same_channel(&s3)); - /// ``` - pub fn same_channel(&self, other: &Self) -> bool { - match (&self.flavor, &other.flavor) { - (SenderFlavor::Array(ref a), SenderFlavor::Array(ref b)) => a == b, - (SenderFlavor::List(ref a), SenderFlavor::List(ref b)) => a == b, - (SenderFlavor::Zero(ref a), SenderFlavor::Zero(ref b)) => a == b, - _ => false, - } - } -} - -impl Drop for Sender { - fn drop(&mut self) { - unsafe { - match &self.flavor { - SenderFlavor::Array(chan) => chan.release(|c| c.disconnect()), - SenderFlavor::List(chan) => chan.release(|c| c.disconnect_senders()), - SenderFlavor::Zero(chan) => chan.release(|c| c.disconnect()), - } - } - } -} - -impl Clone for Sender { - fn clone(&self) -> Self { - let flavor = match &self.flavor { - SenderFlavor::Array(chan) => SenderFlavor::Array(chan.acquire()), - SenderFlavor::List(chan) => SenderFlavor::List(chan.acquire()), - SenderFlavor::Zero(chan) => SenderFlavor::Zero(chan.acquire()), - }; - - Self { flavor } - } -} - -impl fmt::Debug for Sender { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Sender { .. }") - } -} - -/// The receiving side of a channel. -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::unbounded; -/// -/// let (s, r) = unbounded(); -/// -/// thread::spawn(move || { -/// let _ = s.send(1); -/// thread::sleep(Duration::from_secs(1)); -/// let _ = s.send(2); -/// }); -/// -/// assert_eq!(r.recv(), Ok(1)); // Received immediately. -/// assert_eq!(r.recv(), Ok(2)); // Received after 1 second. -/// ``` -pub struct Receiver { - flavor: ReceiverFlavor, -} - -/// Receiver flavors. -enum ReceiverFlavor { - /// Bounded channel based on a preallocated array. - Array(counter::Receiver>), - - /// Unbounded channel implemented as a linked list. - List(counter::Receiver>), - - /// Zero-capacity channel. - Zero(counter::Receiver>), - - /// The after flavor. - At(Arc), - - /// The tick flavor. - Tick(Arc), - - /// The never flavor. - Never(flavors::never::Channel), -} - -unsafe impl Send for Receiver {} -unsafe impl Sync for Receiver {} - -impl UnwindSafe for Receiver {} -impl RefUnwindSafe for Receiver {} - -impl Receiver { - /// Attempts to receive a message from the channel without blocking. - /// - /// This method will either receive a message from the channel immediately or return an error - /// if the channel is empty. - /// - /// If called on a zero-capacity channel, this method will receive a message only if there - /// happens to be a send operation on the other side of the channel at the same time. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{unbounded, TryRecvError}; - /// - /// let (s, r) = unbounded(); - /// assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - /// - /// s.send(5).unwrap(); - /// drop(s); - /// - /// assert_eq!(r.try_recv(), Ok(5)); - /// assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); - /// ``` - pub fn try_recv(&self) -> Result { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.try_recv(), - ReceiverFlavor::List(chan) => chan.try_recv(), - ReceiverFlavor::Zero(chan) => chan.try_recv(), - ReceiverFlavor::At(chan) => { - let msg = chan.try_recv(); - unsafe { - mem::transmute_copy::, Result>( - &msg, - ) - } - } - ReceiverFlavor::Tick(chan) => { - let msg = chan.try_recv(); - unsafe { - mem::transmute_copy::, Result>( - &msg, - ) - } - } - ReceiverFlavor::Never(chan) => chan.try_recv(), - } - } - - /// Blocks the current thread until a message is received or the channel is empty and - /// disconnected. - /// - /// If the channel is empty and not disconnected, this call will block until the receive - /// operation can proceed. If the channel is empty and becomes disconnected, this call will - /// wake up and return an error. - /// - /// If called on a zero-capacity channel, this method will wait for a send operation to appear - /// on the other side of the channel. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{unbounded, RecvError}; - /// - /// let (s, r) = unbounded(); - /// - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s.send(5).unwrap(); - /// drop(s); - /// }); - /// - /// assert_eq!(r.recv(), Ok(5)); - /// assert_eq!(r.recv(), Err(RecvError)); - /// ``` - pub fn recv(&self) -> Result { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.recv(None), - ReceiverFlavor::List(chan) => chan.recv(None), - ReceiverFlavor::Zero(chan) => chan.recv(None), - ReceiverFlavor::At(chan) => { - let msg = chan.recv(None); - unsafe { - mem::transmute_copy::< - Result, - Result, - >(&msg) - } - } - ReceiverFlavor::Tick(chan) => { - let msg = chan.recv(None); - unsafe { - mem::transmute_copy::< - Result, - Result, - >(&msg) - } - } - ReceiverFlavor::Never(chan) => chan.recv(None), - } - .map_err(|_| RecvError) - } - - /// Waits for a message to be received from the channel, but only for a limited time. - /// - /// If the channel is empty and not disconnected, this call will block until the receive - /// operation can proceed or the operation times out. If the channel is empty and becomes - /// disconnected, this call will wake up and return an error. - /// - /// If called on a zero-capacity channel, this method will wait for a send operation to appear - /// on the other side of the channel. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{unbounded, RecvTimeoutError}; - /// - /// let (s, r) = unbounded(); - /// - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s.send(5).unwrap(); - /// drop(s); - /// }); - /// - /// assert_eq!( - /// r.recv_timeout(Duration::from_millis(500)), - /// Err(RecvTimeoutError::Timeout), - /// ); - /// assert_eq!( - /// r.recv_timeout(Duration::from_secs(1)), - /// Ok(5), - /// ); - /// assert_eq!( - /// r.recv_timeout(Duration::from_secs(1)), - /// Err(RecvTimeoutError::Disconnected), - /// ); - /// ``` - pub fn recv_timeout(&self, timeout: Duration) -> Result { - match Instant::now().checked_add(timeout) { - Some(deadline) => self.recv_deadline(deadline), - None => self.recv().map_err(RecvTimeoutError::from), - } - } - - /// Waits for a message to be received from the channel, but only before a given deadline. - /// - /// If the channel is empty and not disconnected, this call will block until the receive - /// operation can proceed or the operation times out. If the channel is empty and becomes - /// disconnected, this call will wake up and return an error. - /// - /// If called on a zero-capacity channel, this method will wait for a send operation to appear - /// on the other side of the channel. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::{Instant, Duration}; - /// use crossbeam_channel::{unbounded, RecvTimeoutError}; - /// - /// let (s, r) = unbounded(); - /// - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s.send(5).unwrap(); - /// drop(s); - /// }); - /// - /// let now = Instant::now(); - /// - /// assert_eq!( - /// r.recv_deadline(now + Duration::from_millis(500)), - /// Err(RecvTimeoutError::Timeout), - /// ); - /// assert_eq!( - /// r.recv_deadline(now + Duration::from_millis(1500)), - /// Ok(5), - /// ); - /// assert_eq!( - /// r.recv_deadline(now + Duration::from_secs(5)), - /// Err(RecvTimeoutError::Disconnected), - /// ); - /// ``` - pub fn recv_deadline(&self, deadline: Instant) -> Result { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.recv(Some(deadline)), - ReceiverFlavor::List(chan) => chan.recv(Some(deadline)), - ReceiverFlavor::Zero(chan) => chan.recv(Some(deadline)), - ReceiverFlavor::At(chan) => { - let msg = chan.recv(Some(deadline)); - unsafe { - mem::transmute_copy::< - Result, - Result, - >(&msg) - } - } - ReceiverFlavor::Tick(chan) => { - let msg = chan.recv(Some(deadline)); - unsafe { - mem::transmute_copy::< - Result, - Result, - >(&msg) - } - } - ReceiverFlavor::Never(chan) => chan.recv(Some(deadline)), - } - } - - /// Returns `true` if the channel is empty. - /// - /// Note: Zero-capacity channels are always empty. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// - /// assert!(r.is_empty()); - /// s.send(0).unwrap(); - /// assert!(!r.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.is_empty(), - ReceiverFlavor::List(chan) => chan.is_empty(), - ReceiverFlavor::Zero(chan) => chan.is_empty(), - ReceiverFlavor::At(chan) => chan.is_empty(), - ReceiverFlavor::Tick(chan) => chan.is_empty(), - ReceiverFlavor::Never(chan) => chan.is_empty(), - } - } - - /// Returns `true` if the channel is full. - /// - /// Note: Zero-capacity channels are always full. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::bounded; - /// - /// let (s, r) = bounded(1); - /// - /// assert!(!r.is_full()); - /// s.send(0).unwrap(); - /// assert!(r.is_full()); - /// ``` - pub fn is_full(&self) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.is_full(), - ReceiverFlavor::List(chan) => chan.is_full(), - ReceiverFlavor::Zero(chan) => chan.is_full(), - ReceiverFlavor::At(chan) => chan.is_full(), - ReceiverFlavor::Tick(chan) => chan.is_full(), - ReceiverFlavor::Never(chan) => chan.is_full(), - } - } - - /// Returns the number of messages in the channel. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// assert_eq!(r.len(), 0); - /// - /// s.send(1).unwrap(); - /// s.send(2).unwrap(); - /// assert_eq!(r.len(), 2); - /// ``` - pub fn len(&self) -> usize { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.len(), - ReceiverFlavor::List(chan) => chan.len(), - ReceiverFlavor::Zero(chan) => chan.len(), - ReceiverFlavor::At(chan) => chan.len(), - ReceiverFlavor::Tick(chan) => chan.len(), - ReceiverFlavor::Never(chan) => chan.len(), - } - } - - /// If the channel is bounded, returns its capacity. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{bounded, unbounded}; - /// - /// let (_, r) = unbounded::(); - /// assert_eq!(r.capacity(), None); - /// - /// let (_, r) = bounded::(5); - /// assert_eq!(r.capacity(), Some(5)); - /// - /// let (_, r) = bounded::(0); - /// assert_eq!(r.capacity(), Some(0)); - /// ``` - pub fn capacity(&self) -> Option { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.capacity(), - ReceiverFlavor::List(chan) => chan.capacity(), - ReceiverFlavor::Zero(chan) => chan.capacity(), - ReceiverFlavor::At(chan) => chan.capacity(), - ReceiverFlavor::Tick(chan) => chan.capacity(), - ReceiverFlavor::Never(chan) => chan.capacity(), - } - } - - /// A blocking iterator over messages in the channel. - /// - /// Each call to [`next`] blocks waiting for the next message and then returns it. However, if - /// the channel becomes empty and disconnected, it returns [`None`] without blocking. - /// - /// [`next`]: Iterator::next - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// - /// thread::spawn(move || { - /// s.send(1).unwrap(); - /// s.send(2).unwrap(); - /// s.send(3).unwrap(); - /// drop(s); // Disconnect the channel. - /// }); - /// - /// // Collect all messages from the channel. - /// // Note that the call to `collect` blocks until the sender is dropped. - /// let v: Vec<_> = r.iter().collect(); - /// - /// assert_eq!(v, [1, 2, 3]); - /// ``` - pub fn iter(&self) -> Iter<'_, T> { - Iter { receiver: self } - } - - /// A non-blocking iterator over messages in the channel. - /// - /// Each call to [`next`] returns a message if there is one ready to be received. The iterator - /// never blocks waiting for the next message. - /// - /// [`next`]: Iterator::next - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded::(); - /// - /// # let t = - /// thread::spawn(move || { - /// s.send(1).unwrap(); - /// thread::sleep(Duration::from_secs(1)); - /// s.send(2).unwrap(); - /// thread::sleep(Duration::from_secs(2)); - /// s.send(3).unwrap(); - /// }); - /// - /// thread::sleep(Duration::from_secs(2)); - /// - /// // Collect all messages from the channel without blocking. - /// // The third message hasn't been sent yet so we'll collect only the first two. - /// let v: Vec<_> = r.try_iter().collect(); - /// - /// assert_eq!(v, [1, 2]); - /// # t.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// ``` - pub fn try_iter(&self) -> TryIter<'_, T> { - TryIter { receiver: self } - } - - /// Returns `true` if receivers belong to the same channel. - /// - /// # Examples - /// - /// ```rust - /// use crossbeam_channel::unbounded; - /// - /// let (_, r) = unbounded::(); - /// - /// let r2 = r.clone(); - /// assert!(r.same_channel(&r2)); - /// - /// let (_, r3) = unbounded(); - /// assert!(!r.same_channel(&r3)); - /// ``` - pub fn same_channel(&self, other: &Self) -> bool { - match (&self.flavor, &other.flavor) { - (ReceiverFlavor::Array(a), ReceiverFlavor::Array(b)) => a == b, - (ReceiverFlavor::List(a), ReceiverFlavor::List(b)) => a == b, - (ReceiverFlavor::Zero(a), ReceiverFlavor::Zero(b)) => a == b, - (ReceiverFlavor::At(a), ReceiverFlavor::At(b)) => Arc::ptr_eq(a, b), - (ReceiverFlavor::Tick(a), ReceiverFlavor::Tick(b)) => Arc::ptr_eq(a, b), - (ReceiverFlavor::Never(_), ReceiverFlavor::Never(_)) => true, - _ => false, - } - } -} - -impl Drop for Receiver { - fn drop(&mut self) { - unsafe { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.release(|c| c.disconnect()), - ReceiverFlavor::List(chan) => chan.release(|c| c.disconnect_receivers()), - ReceiverFlavor::Zero(chan) => chan.release(|c| c.disconnect()), - ReceiverFlavor::At(_) => {} - ReceiverFlavor::Tick(_) => {} - ReceiverFlavor::Never(_) => {} - } - } - } -} - -impl Clone for Receiver { - fn clone(&self) -> Self { - let flavor = match &self.flavor { - ReceiverFlavor::Array(chan) => ReceiverFlavor::Array(chan.acquire()), - ReceiverFlavor::List(chan) => ReceiverFlavor::List(chan.acquire()), - ReceiverFlavor::Zero(chan) => ReceiverFlavor::Zero(chan.acquire()), - ReceiverFlavor::At(chan) => ReceiverFlavor::At(chan.clone()), - ReceiverFlavor::Tick(chan) => ReceiverFlavor::Tick(chan.clone()), - ReceiverFlavor::Never(_) => ReceiverFlavor::Never(flavors::never::Channel::new()), - }; - - Self { flavor } - } -} - -impl fmt::Debug for Receiver { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Receiver { .. }") - } -} - -impl<'a, T> IntoIterator for &'a Receiver { - type Item = T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for Receiver { - type Item = T; - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter { receiver: self } - } -} - -/// A blocking iterator over messages in a channel. -/// -/// Each call to [`next`] blocks waiting for the next message and then returns it. However, if the -/// channel becomes empty and disconnected, it returns [`None`] without blocking. -/// -/// [`next`]: Iterator::next -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use crossbeam_channel::unbounded; -/// -/// let (s, r) = unbounded(); -/// -/// thread::spawn(move || { -/// s.send(1).unwrap(); -/// s.send(2).unwrap(); -/// s.send(3).unwrap(); -/// drop(s); // Disconnect the channel. -/// }); -/// -/// // Collect all messages from the channel. -/// // Note that the call to `collect` blocks until the sender is dropped. -/// let v: Vec<_> = r.iter().collect(); -/// -/// assert_eq!(v, [1, 2, 3]); -/// ``` -pub struct Iter<'a, T> { - receiver: &'a Receiver, -} - -impl FusedIterator for Iter<'_, T> {} - -impl Iterator for Iter<'_, T> { - type Item = T; - - fn next(&mut self) -> Option { - self.receiver.recv().ok() - } -} - -impl fmt::Debug for Iter<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Iter { .. }") - } -} - -/// A non-blocking iterator over messages in a channel. -/// -/// Each call to [`next`] returns a message if there is one ready to be received. The iterator -/// never blocks waiting for the next message. -/// -/// [`next`]: Iterator::next -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::unbounded; -/// -/// let (s, r) = unbounded::(); -/// -/// # let t = -/// thread::spawn(move || { -/// s.send(1).unwrap(); -/// thread::sleep(Duration::from_secs(1)); -/// s.send(2).unwrap(); -/// thread::sleep(Duration::from_secs(2)); -/// s.send(3).unwrap(); -/// }); -/// -/// thread::sleep(Duration::from_secs(2)); -/// -/// // Collect all messages from the channel without blocking. -/// // The third message hasn't been sent yet so we'll collect only the first two. -/// let v: Vec<_> = r.try_iter().collect(); -/// -/// assert_eq!(v, [1, 2]); -/// # t.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 -/// ``` -pub struct TryIter<'a, T> { - receiver: &'a Receiver, -} - -impl Iterator for TryIter<'_, T> { - type Item = T; - - fn next(&mut self) -> Option { - self.receiver.try_recv().ok() - } -} - -impl fmt::Debug for TryIter<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("TryIter { .. }") - } -} - -/// A blocking iterator over messages in a channel. -/// -/// Each call to [`next`] blocks waiting for the next message and then returns it. However, if the -/// channel becomes empty and disconnected, it returns [`None`] without blocking. -/// -/// [`next`]: Iterator::next -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use crossbeam_channel::unbounded; -/// -/// let (s, r) = unbounded(); -/// -/// thread::spawn(move || { -/// s.send(1).unwrap(); -/// s.send(2).unwrap(); -/// s.send(3).unwrap(); -/// drop(s); // Disconnect the channel. -/// }); -/// -/// // Collect all messages from the channel. -/// // Note that the call to `collect` blocks until the sender is dropped. -/// let v: Vec<_> = r.into_iter().collect(); -/// -/// assert_eq!(v, [1, 2, 3]); -/// ``` -pub struct IntoIter { - receiver: Receiver, -} - -impl FusedIterator for IntoIter {} - -impl Iterator for IntoIter { - type Item = T; - - fn next(&mut self) -> Option { - self.receiver.recv().ok() - } -} - -impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("IntoIter { .. }") - } -} - -impl SelectHandle for Sender { - fn try_select(&self, token: &mut Token) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().try_select(token), - SenderFlavor::List(chan) => chan.sender().try_select(token), - SenderFlavor::Zero(chan) => chan.sender().try_select(token), - } - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().register(oper, cx), - SenderFlavor::List(chan) => chan.sender().register(oper, cx), - SenderFlavor::Zero(chan) => chan.sender().register(oper, cx), - } - } - - fn unregister(&self, oper: Operation) { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().unregister(oper), - SenderFlavor::List(chan) => chan.sender().unregister(oper), - SenderFlavor::Zero(chan) => chan.sender().unregister(oper), - } - } - - fn accept(&self, token: &mut Token, cx: &Context) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().accept(token, cx), - SenderFlavor::List(chan) => chan.sender().accept(token, cx), - SenderFlavor::Zero(chan) => chan.sender().accept(token, cx), - } - } - - fn is_ready(&self) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().is_ready(), - SenderFlavor::List(chan) => chan.sender().is_ready(), - SenderFlavor::Zero(chan) => chan.sender().is_ready(), - } - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().watch(oper, cx), - SenderFlavor::List(chan) => chan.sender().watch(oper, cx), - SenderFlavor::Zero(chan) => chan.sender().watch(oper, cx), - } - } - - fn unwatch(&self, oper: Operation) { - match &self.flavor { - SenderFlavor::Array(chan) => chan.sender().unwatch(oper), - SenderFlavor::List(chan) => chan.sender().unwatch(oper), - SenderFlavor::Zero(chan) => chan.sender().unwatch(oper), - } - } -} - -impl SelectHandle for Receiver { - fn try_select(&self, token: &mut Token) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().try_select(token), - ReceiverFlavor::List(chan) => chan.receiver().try_select(token), - ReceiverFlavor::Zero(chan) => chan.receiver().try_select(token), - ReceiverFlavor::At(chan) => chan.try_select(token), - ReceiverFlavor::Tick(chan) => chan.try_select(token), - ReceiverFlavor::Never(chan) => chan.try_select(token), - } - } - - fn deadline(&self) -> Option { - match &self.flavor { - ReceiverFlavor::Array(_) => None, - ReceiverFlavor::List(_) => None, - ReceiverFlavor::Zero(_) => None, - ReceiverFlavor::At(chan) => chan.deadline(), - ReceiverFlavor::Tick(chan) => chan.deadline(), - ReceiverFlavor::Never(chan) => chan.deadline(), - } - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().register(oper, cx), - ReceiverFlavor::List(chan) => chan.receiver().register(oper, cx), - ReceiverFlavor::Zero(chan) => chan.receiver().register(oper, cx), - ReceiverFlavor::At(chan) => chan.register(oper, cx), - ReceiverFlavor::Tick(chan) => chan.register(oper, cx), - ReceiverFlavor::Never(chan) => chan.register(oper, cx), - } - } - - fn unregister(&self, oper: Operation) { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().unregister(oper), - ReceiverFlavor::List(chan) => chan.receiver().unregister(oper), - ReceiverFlavor::Zero(chan) => chan.receiver().unregister(oper), - ReceiverFlavor::At(chan) => chan.unregister(oper), - ReceiverFlavor::Tick(chan) => chan.unregister(oper), - ReceiverFlavor::Never(chan) => chan.unregister(oper), - } - } - - fn accept(&self, token: &mut Token, cx: &Context) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().accept(token, cx), - ReceiverFlavor::List(chan) => chan.receiver().accept(token, cx), - ReceiverFlavor::Zero(chan) => chan.receiver().accept(token, cx), - ReceiverFlavor::At(chan) => chan.accept(token, cx), - ReceiverFlavor::Tick(chan) => chan.accept(token, cx), - ReceiverFlavor::Never(chan) => chan.accept(token, cx), - } - } - - fn is_ready(&self) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().is_ready(), - ReceiverFlavor::List(chan) => chan.receiver().is_ready(), - ReceiverFlavor::Zero(chan) => chan.receiver().is_ready(), - ReceiverFlavor::At(chan) => chan.is_ready(), - ReceiverFlavor::Tick(chan) => chan.is_ready(), - ReceiverFlavor::Never(chan) => chan.is_ready(), - } - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().watch(oper, cx), - ReceiverFlavor::List(chan) => chan.receiver().watch(oper, cx), - ReceiverFlavor::Zero(chan) => chan.receiver().watch(oper, cx), - ReceiverFlavor::At(chan) => chan.watch(oper, cx), - ReceiverFlavor::Tick(chan) => chan.watch(oper, cx), - ReceiverFlavor::Never(chan) => chan.watch(oper, cx), - } - } - - fn unwatch(&self, oper: Operation) { - match &self.flavor { - ReceiverFlavor::Array(chan) => chan.receiver().unwatch(oper), - ReceiverFlavor::List(chan) => chan.receiver().unwatch(oper), - ReceiverFlavor::Zero(chan) => chan.receiver().unwatch(oper), - ReceiverFlavor::At(chan) => chan.unwatch(oper), - ReceiverFlavor::Tick(chan) => chan.unwatch(oper), - ReceiverFlavor::Never(chan) => chan.unwatch(oper), - } - } -} - -/// Writes a message into the channel. -pub(crate) unsafe fn write(s: &Sender, token: &mut Token, msg: T) -> Result<(), T> { - unsafe { - match &s.flavor { - SenderFlavor::Array(chan) => chan.write(token, msg), - SenderFlavor::List(chan) => chan.write(token, msg), - SenderFlavor::Zero(chan) => chan.write(token, msg), - } - } -} - -/// Reads a message from the channel. -pub(crate) unsafe fn read(r: &Receiver, token: &mut Token) -> Result { - unsafe { - match &r.flavor { - ReceiverFlavor::Array(chan) => chan.read(token), - ReceiverFlavor::List(chan) => chan.read(token), - ReceiverFlavor::Zero(chan) => chan.read(token), - ReceiverFlavor::At(chan) => { - mem::transmute_copy::, Result>(&chan.read(token)) - } - ReceiverFlavor::Tick(chan) => { - mem::transmute_copy::, Result>(&chan.read(token)) - } - ReceiverFlavor::Never(chan) => chan.read(token), - } - } -} diff --git a/crossbeam-channel/src/context.rs b/crossbeam-channel/src/context.rs deleted file mode 100644 index 13046ed85..000000000 --- a/crossbeam-channel/src/context.rs +++ /dev/null @@ -1,178 +0,0 @@ -//! Thread-local context used in select. - -use std::cell::Cell; -use std::ptr; -use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; -use std::sync::Arc; -use std::thread::{self, Thread, ThreadId}; -use std::time::Instant; - -use crossbeam_utils::Backoff; - -use crate::select::Selected; - -/// Thread-local context used in select. -// This is a private API that is used by the select macro. -#[derive(Debug, Clone)] -pub struct Context { - inner: Arc, -} - -/// Inner representation of `Context`. -#[derive(Debug)] -struct Inner { - /// Selected operation. - select: AtomicUsize, - - /// A slot into which another thread may store a pointer to its `Packet`. - packet: AtomicPtr<()>, - - /// Thread handle. - thread: Thread, - - /// Thread id. - thread_id: ThreadId, -} - -impl Context { - /// Creates a new context for the duration of the closure. - #[inline] - pub fn with(f: F) -> R - where - F: FnOnce(&Self) -> R, - { - std::thread_local! { - /// Cached thread-local context. - static CONTEXT: Cell> = Cell::new(Some(Context::new())); - } - - let mut f = Some(f); - let mut f = |cx: &Self| -> R { - let f = f.take().unwrap(); - f(cx) - }; - - CONTEXT - .try_with(|cell| match cell.take() { - None => f(&Self::new()), - Some(cx) => { - cx.reset(); - let res = f(&cx); - cell.set(Some(cx)); - res - } - }) - .unwrap_or_else(|_| f(&Self::new())) - } - - /// Creates a new `Context`. - #[cold] - fn new() -> Self { - Self { - inner: Arc::new(Inner { - select: AtomicUsize::new(Selected::Waiting.into()), - packet: AtomicPtr::new(ptr::null_mut()), - thread: thread::current(), - thread_id: thread::current().id(), - }), - } - } - - /// Resets `select` and `packet`. - #[inline] - fn reset(&self) { - self.inner - .select - .store(Selected::Waiting.into(), Ordering::Release); - self.inner.packet.store(ptr::null_mut(), Ordering::Release); - } - - /// Attempts to select an operation. - /// - /// On failure, the previously selected operation is returned. - #[inline] - pub fn try_select(&self, select: Selected) -> Result<(), Selected> { - self.inner - .select - .compare_exchange( - Selected::Waiting.into(), - select.into(), - Ordering::AcqRel, - Ordering::Acquire, - ) - .map(|_| ()) - .map_err(|e| e.into()) - } - - /// Returns the selected operation. - #[inline] - pub fn selected(&self) -> Selected { - Selected::from(self.inner.select.load(Ordering::Acquire)) - } - - /// Stores a packet. - /// - /// This method must be called after `try_select` succeeds and there is a packet to provide. - #[inline] - pub fn store_packet(&self, packet: *mut ()) { - if !packet.is_null() { - self.inner.packet.store(packet, Ordering::Release); - } - } - - /// Waits until a packet is provided and returns it. - #[inline] - pub fn wait_packet(&self) -> *mut () { - let backoff = Backoff::new(); - loop { - let packet = self.inner.packet.load(Ordering::Acquire); - if !packet.is_null() { - return packet; - } - backoff.snooze(); - } - } - - /// Waits until an operation is selected and returns it. - /// - /// If the deadline is reached, `Selected::Aborted` will be selected. - #[inline] - pub fn wait_until(&self, deadline: Option) -> Selected { - loop { - // Check whether an operation has been selected. - let sel = Selected::from(self.inner.select.load(Ordering::Acquire)); - if sel != Selected::Waiting { - return sel; - } - - // If there's a deadline, park the current thread until the deadline is reached. - if let Some(end) = deadline { - let now = Instant::now(); - - if now < end { - thread::park_timeout(end - now); - } else { - // The deadline has been reached. Try aborting select. - return match self.try_select(Selected::Aborted) { - Ok(()) => Selected::Aborted, - Err(s) => s, - }; - } - } else { - thread::park(); - } - } - } - - /// Unparks the thread this context belongs to. - #[inline] - pub fn unpark(&self) { - self.inner.thread.unpark(); - } - - /// Returns the id of the thread this context belongs to. - #[inline] - pub fn thread_id(&self) -> ThreadId { - self.inner.thread_id - } -} diff --git a/crossbeam-channel/src/counter.rs b/crossbeam-channel/src/counter.rs deleted file mode 100644 index 622d513b7..000000000 --- a/crossbeam-channel/src/counter.rs +++ /dev/null @@ -1,145 +0,0 @@ -//! Reference counter for channels. - -use std::boxed::Box; -use std::ops; -use std::process; -use std::ptr::NonNull; -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; - -/// Reference counter internals. -struct Counter { - /// The number of senders associated with the channel. - senders: AtomicUsize, - - /// The number of receivers associated with the channel. - receivers: AtomicUsize, - - /// Set to `true` if the last sender or the last receiver reference deallocates the channel. - destroy: AtomicBool, - - /// The internal channel. - chan: C, -} - -/// Wraps a channel into the reference counter. -pub(crate) fn new(chan: C) -> (Sender, Receiver) { - let counter = NonNull::from(Box::leak(Box::new(Counter { - senders: AtomicUsize::new(1), - receivers: AtomicUsize::new(1), - destroy: AtomicBool::new(false), - chan, - }))); - let s = Sender { counter }; - let r = Receiver { counter }; - (s, r) -} - -/// The sending side. -pub(crate) struct Sender { - counter: NonNull>, -} - -impl Sender { - /// Returns the internal `Counter`. - fn counter(&self) -> &Counter { - unsafe { self.counter.as_ref() } - } - - /// Acquires another sender reference. - pub(crate) fn acquire(&self) -> Self { - let count = self.counter().senders.fetch_add(1, Ordering::Relaxed); - - // Cloning senders and calling `mem::forget` on the clones could potentially overflow the - // counter. It's very difficult to recover sensibly from such degenerate scenarios so we - // just abort when the count becomes very large. - if count > isize::MAX as usize { - process::abort(); - } - - Self { - counter: self.counter, - } - } - - /// Releases the sender reference. - /// - /// Function `disconnect` will be called if this is the last sender reference. - pub(crate) unsafe fn release bool>(&self, disconnect: F) { - if self.counter().senders.fetch_sub(1, Ordering::AcqRel) == 1 { - disconnect(&self.counter().chan); - - if self.counter().destroy.swap(true, Ordering::AcqRel) { - drop(unsafe { Box::from_raw(self.counter.as_ptr()) }); - } - } - } -} - -impl ops::Deref for Sender { - type Target = C; - - fn deref(&self) -> &C { - &self.counter().chan - } -} - -impl PartialEq for Sender { - fn eq(&self, other: &Self) -> bool { - self.counter == other.counter - } -} - -/// The receiving side. -pub(crate) struct Receiver { - counter: NonNull>, -} - -impl Receiver { - /// Returns the internal `Counter`. - fn counter(&self) -> &Counter { - unsafe { self.counter.as_ref() } - } - - /// Acquires another receiver reference. - pub(crate) fn acquire(&self) -> Self { - let count = self.counter().receivers.fetch_add(1, Ordering::Relaxed); - - // Cloning receivers and calling `mem::forget` on the clones could potentially overflow the - // counter. It's very difficult to recover sensibly from such degenerate scenarios so we - // just abort when the count becomes very large. - if count > isize::MAX as usize { - process::abort(); - } - - Self { - counter: self.counter, - } - } - - /// Releases the receiver reference. - /// - /// Function `disconnect` will be called if this is the last receiver reference. - pub(crate) unsafe fn release bool>(&self, disconnect: F) { - if self.counter().receivers.fetch_sub(1, Ordering::AcqRel) == 1 { - disconnect(&self.counter().chan); - - if self.counter().destroy.swap(true, Ordering::AcqRel) { - drop(unsafe { Box::from_raw(self.counter.as_ptr()) }); - } - } - } -} - -impl ops::Deref for Receiver { - type Target = C; - - fn deref(&self) -> &C { - &self.counter().chan - } -} - -impl PartialEq for Receiver { - fn eq(&self, other: &Self) -> bool { - self.counter == other.counter - } -} diff --git a/crossbeam-channel/src/err.rs b/crossbeam-channel/src/err.rs deleted file mode 100644 index 7306c46d5..000000000 --- a/crossbeam-channel/src/err.rs +++ /dev/null @@ -1,354 +0,0 @@ -use std::error; -use std::fmt; - -/// An error returned from the [`send`] method. -/// -/// The message could not be sent because the channel is disconnected. -/// -/// The error contains the message so it can be recovered. -/// -/// [`send`]: super::Sender::send -#[derive(PartialEq, Eq, Clone, Copy)] -pub struct SendError(pub T); - -/// An error returned from the [`try_send`] method. -/// -/// The error contains the message being sent so it can be recovered. -/// -/// [`try_send`]: super::Sender::try_send -#[derive(PartialEq, Eq, Clone, Copy)] -pub enum TrySendError { - /// The message could not be sent because the channel is full. - /// - /// If this is a zero-capacity channel, then the error indicates that there was no receiver - /// available to receive the message at the time. - Full(T), - - /// The message could not be sent because the channel is disconnected. - Disconnected(T), -} - -/// An error returned from the [`send_timeout`] method. -/// -/// The error contains the message being sent so it can be recovered. -/// -/// [`send_timeout`]: super::Sender::send_timeout -#[derive(PartialEq, Eq, Clone, Copy)] -pub enum SendTimeoutError { - /// The message could not be sent because the channel is full and the operation timed out. - /// - /// If this is a zero-capacity channel, then the error indicates that there was no receiver - /// available to receive the message and the operation timed out. - Timeout(T), - - /// The message could not be sent because the channel is disconnected. - Disconnected(T), -} - -/// An error returned from the [`recv`] method. -/// -/// A message could not be received because the channel is empty and disconnected. -/// -/// [`recv`]: super::Receiver::recv -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct RecvError; - -/// An error returned from the [`try_recv`] method. -/// -/// [`try_recv`]: super::Receiver::try_recv -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub enum TryRecvError { - /// A message could not be received because the channel is empty. - /// - /// If this is a zero-capacity channel, then the error indicates that there was no sender - /// available to send a message at the time. - Empty, - - /// The message could not be received because the channel is empty and disconnected. - Disconnected, -} - -/// An error returned from the [`recv_timeout`] method. -/// -/// [`recv_timeout`]: super::Receiver::recv_timeout -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub enum RecvTimeoutError { - /// A message could not be received because the channel is empty and the operation timed out. - /// - /// If this is a zero-capacity channel, then the error indicates that there was no sender - /// available to send a message and the operation timed out. - Timeout, - - /// The message could not be received because the channel is empty and disconnected. - Disconnected, -} - -/// An error returned from the [`try_select`] method. -/// -/// Failed because none of the channel operations were ready. -/// -/// [`try_select`]: super::Select::try_select -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct TrySelectError; - -/// An error returned from the [`select_timeout`] method. -/// -/// Failed because none of the channel operations became ready before the timeout. -/// -/// [`select_timeout`]: super::Select::select_timeout -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct SelectTimeoutError; - -/// An error returned from the [`try_ready`] method. -/// -/// Failed because none of the channel operations were ready. -/// -/// [`try_ready`]: super::Select::try_ready -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct TryReadyError; - -/// An error returned from the [`ready_timeout`] method. -/// -/// Failed because none of the channel operations became ready before the timeout. -/// -/// [`ready_timeout`]: super::Select::ready_timeout -#[derive(PartialEq, Eq, Clone, Copy, Debug)] -pub struct ReadyTimeoutError; - -impl fmt::Debug for SendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - "SendError(..)".fmt(f) - } -} - -impl fmt::Display for SendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - "sending on a disconnected channel".fmt(f) - } -} - -impl error::Error for SendError {} - -impl SendError { - /// Unwraps the message. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// drop(r); - /// - /// if let Err(err) = s.send("foo") { - /// assert_eq!(err.into_inner(), "foo"); - /// } - /// ``` - pub fn into_inner(self) -> T { - self.0 - } -} - -impl fmt::Debug for TrySendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Self::Full(..) => "Full(..)".fmt(f), - Self::Disconnected(..) => "Disconnected(..)".fmt(f), - } - } -} - -impl fmt::Display for TrySendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Self::Full(..) => "sending on a full channel".fmt(f), - Self::Disconnected(..) => "sending on a disconnected channel".fmt(f), - } - } -} - -impl error::Error for TrySendError {} - -impl From> for TrySendError { - fn from(err: SendError) -> Self { - match err { - SendError(t) => Self::Disconnected(t), - } - } -} - -impl TrySendError { - /// Unwraps the message. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::bounded; - /// - /// let (s, r) = bounded(0); - /// - /// if let Err(err) = s.try_send("foo") { - /// assert_eq!(err.into_inner(), "foo"); - /// } - /// ``` - pub fn into_inner(self) -> T { - match self { - Self::Full(v) => v, - Self::Disconnected(v) => v, - } - } - - /// Returns `true` if the send operation failed because the channel is full. - pub fn is_full(&self) -> bool { - matches!(self, Self::Full(_)) - } - - /// Returns `true` if the send operation failed because the channel is disconnected. - pub fn is_disconnected(&self) -> bool { - matches!(self, Self::Disconnected(_)) - } -} - -impl fmt::Debug for SendTimeoutError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - "SendTimeoutError(..)".fmt(f) - } -} - -impl fmt::Display for SendTimeoutError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Self::Timeout(..) => "timed out waiting on send operation".fmt(f), - Self::Disconnected(..) => "sending on a disconnected channel".fmt(f), - } - } -} - -impl error::Error for SendTimeoutError {} - -impl From> for SendTimeoutError { - fn from(err: SendError) -> Self { - match err { - SendError(e) => Self::Disconnected(e), - } - } -} - -impl SendTimeoutError { - /// Unwraps the message. - /// - /// # Examples - /// - /// ``` - /// use std::time::Duration; - /// use crossbeam_channel::unbounded; - /// - /// let (s, r) = unbounded(); - /// - /// if let Err(err) = s.send_timeout("foo", Duration::from_secs(1)) { - /// assert_eq!(err.into_inner(), "foo"); - /// } - /// ``` - pub fn into_inner(self) -> T { - match self { - Self::Timeout(v) => v, - Self::Disconnected(v) => v, - } - } - - /// Returns `true` if the send operation timed out. - pub fn is_timeout(&self) -> bool { - matches!(self, Self::Timeout(_)) - } - - /// Returns `true` if the send operation failed because the channel is disconnected. - pub fn is_disconnected(&self) -> bool { - matches!(self, Self::Disconnected(_)) - } -} - -impl fmt::Display for RecvError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - "receiving on an empty and disconnected channel".fmt(f) - } -} - -impl error::Error for RecvError {} - -impl fmt::Display for TryRecvError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Self::Empty => "receiving on an empty channel".fmt(f), - Self::Disconnected => "receiving on an empty and disconnected channel".fmt(f), - } - } -} - -impl error::Error for TryRecvError {} - -impl From for TryRecvError { - fn from(err: RecvError) -> Self { - match err { - RecvError => Self::Disconnected, - } - } -} - -impl TryRecvError { - /// Returns `true` if the receive operation failed because the channel is empty. - pub fn is_empty(&self) -> bool { - matches!(self, Self::Empty) - } - - /// Returns `true` if the receive operation failed because the channel is disconnected. - pub fn is_disconnected(&self) -> bool { - matches!(self, Self::Disconnected) - } -} - -impl fmt::Display for RecvTimeoutError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - Self::Timeout => "timed out waiting on receive operation".fmt(f), - Self::Disconnected => "channel is empty and disconnected".fmt(f), - } - } -} - -impl error::Error for RecvTimeoutError {} - -impl From for RecvTimeoutError { - fn from(err: RecvError) -> Self { - match err { - RecvError => Self::Disconnected, - } - } -} - -impl RecvTimeoutError { - /// Returns `true` if the receive operation timed out. - pub fn is_timeout(&self) -> bool { - matches!(self, Self::Timeout) - } - - /// Returns `true` if the receive operation failed because the channel is disconnected. - pub fn is_disconnected(&self) -> bool { - matches!(self, Self::Disconnected) - } -} - -impl fmt::Display for TrySelectError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - "all operations in select would block".fmt(f) - } -} - -impl error::Error for TrySelectError {} - -impl fmt::Display for SelectTimeoutError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - "timed out waiting on select".fmt(f) - } -} - -impl error::Error for SelectTimeoutError {} diff --git a/crossbeam-channel/src/flavors/array.rs b/crossbeam-channel/src/flavors/array.rs deleted file mode 100644 index 206a05a86..000000000 --- a/crossbeam-channel/src/flavors/array.rs +++ /dev/null @@ -1,639 +0,0 @@ -//! Bounded channel based on a preallocated array. -//! -//! This flavor has a fixed, positive capacity. -//! -//! The implementation is based on Dmitry Vyukov's bounded MPMC queue. -//! -//! Source: -//! - -//! - - -use std::boxed::Box; -use std::cell::UnsafeCell; -use std::mem::{self, MaybeUninit}; -use std::ptr; -use std::sync::atomic::{self, AtomicUsize, Ordering}; -use std::time::Instant; - -use crossbeam_utils::{Backoff, CachePadded}; - -use crate::context::Context; -use crate::err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError}; -use crate::select::{Operation, SelectHandle, Selected, Token}; -use crate::waker::SyncWaker; - -/// A slot in a channel. -struct Slot { - /// The current stamp. - stamp: AtomicUsize, - - /// The message in this slot. - msg: UnsafeCell>, -} - -/// The token type for the array flavor. -#[derive(Debug)] -pub(crate) struct ArrayToken { - /// Slot to read from or write to. - slot: *const u8, - - /// Stamp to store into the slot after reading or writing. - stamp: usize, -} - -impl Default for ArrayToken { - #[inline] - fn default() -> Self { - Self { - slot: ptr::null(), - stamp: 0, - } - } -} - -/// Bounded channel based on a preallocated array. -pub(crate) struct Channel { - /// The head of the channel. - /// - /// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but - /// packed into a single `usize`. The lower bits represent the index, while the upper bits - /// represent the lap. The mark bit in the head is always zero. - /// - /// Messages are popped from the head of the channel. - head: CachePadded, - - /// The tail of the channel. - /// - /// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but - /// packed into a single `usize`. The lower bits represent the index, while the upper bits - /// represent the lap. The mark bit indicates that the channel is disconnected. - /// - /// Messages are pushed into the tail of the channel. - tail: CachePadded, - - /// The buffer holding slots. - buffer: Box<[Slot]>, - - /// A stamp with the value of `{ lap: 1, mark: 0, index: 0 }`. - one_lap: usize, - - /// If this bit is set in the tail, that means the channel is disconnected. - mark_bit: usize, - - /// Senders waiting while the channel is full. - senders: SyncWaker, - - /// Receivers waiting while the channel is empty and not disconnected. - receivers: SyncWaker, -} - -impl Channel { - /// Creates a bounded channel of capacity `cap`. - pub(crate) fn with_capacity(cap: usize) -> Self { - assert!(cap > 0, "capacity must be positive"); - - // Compute constants `mark_bit` and `one_lap`. - let mark_bit = (cap + 1).next_power_of_two(); - let one_lap = mark_bit * 2; - - // Head is initialized to `{ lap: 0, mark: 0, index: 0 }`. - let head = 0; - // Tail is initialized to `{ lap: 0, mark: 0, index: 0 }`. - let tail = 0; - - // Allocate a buffer of `cap` slots initialized - // with stamps. - let buffer: Box<[Slot]> = (0..cap) - .map(|i| { - // Set the stamp to `{ lap: 0, mark: 0, index: i }`. - Slot { - stamp: AtomicUsize::new(i), - msg: UnsafeCell::new(MaybeUninit::uninit()), - } - }) - .collect(); - - Self { - buffer, - one_lap, - mark_bit, - head: CachePadded::new(AtomicUsize::new(head)), - tail: CachePadded::new(AtomicUsize::new(tail)), - senders: SyncWaker::new(), - receivers: SyncWaker::new(), - } - } - - /// Returns a receiver handle to the channel. - pub(crate) fn receiver(&self) -> Receiver<'_, T> { - Receiver(self) - } - - /// Returns a sender handle to the channel. - pub(crate) fn sender(&self) -> Sender<'_, T> { - Sender(self) - } - - /// Attempts to reserve a slot for sending a message. - fn start_send(&self, token: &mut Token) -> bool { - let backoff = Backoff::new(); - let mut tail = self.tail.load(Ordering::Relaxed); - - loop { - // Check if the channel is disconnected. - if tail & self.mark_bit != 0 { - token.array.slot = ptr::null(); - token.array.stamp = 0; - return true; - } - - // Deconstruct the tail. - let index = tail & (self.mark_bit - 1); - let lap = tail & !(self.one_lap - 1); - - // Inspect the corresponding slot. - debug_assert!(index < self.buffer.len()); - let slot = unsafe { self.buffer.get_unchecked(index) }; - let stamp = slot.stamp.load(Ordering::Acquire); - - // If the tail and the stamp match, we may attempt to push. - if tail == stamp { - let new_tail = if index + 1 < self.cap() { - // Same lap, incremented index. - // Set to `{ lap: lap, mark: 0, index: index + 1 }`. - tail + 1 - } else { - // One lap forward, index wraps around to zero. - // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`. - lap.wrapping_add(self.one_lap) - }; - - // Try moving the tail. - match self.tail.compare_exchange_weak( - tail, - new_tail, - Ordering::SeqCst, - Ordering::Relaxed, - ) { - Ok(_) => { - // Prepare the token for the follow-up call to `write`. - token.array.slot = slot as *const Slot as *const u8; - token.array.stamp = tail + 1; - return true; - } - Err(t) => { - tail = t; - backoff.spin(); - } - } - } else if stamp.wrapping_add(self.one_lap) == tail + 1 { - atomic::fence(Ordering::SeqCst); - let head = self.head.load(Ordering::Relaxed); - - // If the head lags one lap behind the tail as well... - if head.wrapping_add(self.one_lap) == tail { - // ...then the channel is full. - return false; - } - - backoff.spin(); - tail = self.tail.load(Ordering::Relaxed); - } else { - // Snooze because we need to wait for the stamp to get updated. - backoff.snooze(); - tail = self.tail.load(Ordering::Relaxed); - } - } - } - - /// Writes a message into the channel. - pub(crate) unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> { - // If there is no slot, the channel is disconnected. - if token.array.slot.is_null() { - return Err(msg); - } - - let slot: &Slot = unsafe { &*token.array.slot.cast::>() }; - - // Write the message into the slot and update the stamp. - unsafe { slot.msg.get().write(MaybeUninit::new(msg)) } - slot.stamp.store(token.array.stamp, Ordering::Release); - - // Wake a sleeping receiver. - self.receivers.notify(); - Ok(()) - } - - /// Attempts to reserve a slot for receiving a message. - fn start_recv(&self, token: &mut Token) -> bool { - let backoff = Backoff::new(); - let mut head = self.head.load(Ordering::Relaxed); - - loop { - // Deconstruct the head. - let index = head & (self.mark_bit - 1); - let lap = head & !(self.one_lap - 1); - - // Inspect the corresponding slot. - debug_assert!(index < self.buffer.len()); - let slot = unsafe { self.buffer.get_unchecked(index) }; - let stamp = slot.stamp.load(Ordering::Acquire); - - // If the stamp is ahead of the head by 1, we may attempt to pop. - if head + 1 == stamp { - let new = if index + 1 < self.cap() { - // Same lap, incremented index. - // Set to `{ lap: lap, mark: 0, index: index + 1 }`. - head + 1 - } else { - // One lap forward, index wraps around to zero. - // Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`. - lap.wrapping_add(self.one_lap) - }; - - // Try moving the head. - match self.head.compare_exchange_weak( - head, - new, - Ordering::SeqCst, - Ordering::Relaxed, - ) { - Ok(_) => { - // Prepare the token for the follow-up call to `read`. - token.array.slot = slot as *const Slot as *const u8; - token.array.stamp = head.wrapping_add(self.one_lap); - return true; - } - Err(h) => { - head = h; - backoff.spin(); - } - } - } else if stamp == head { - atomic::fence(Ordering::SeqCst); - let tail = self.tail.load(Ordering::Relaxed); - - // If the tail equals the head, that means the channel is empty. - if (tail & !self.mark_bit) == head { - // If the channel is disconnected... - if tail & self.mark_bit != 0 { - // ...then receive an error. - token.array.slot = ptr::null(); - token.array.stamp = 0; - return true; - } else { - // Otherwise, the receive operation is not ready. - return false; - } - } - - backoff.spin(); - head = self.head.load(Ordering::Relaxed); - } else { - // Snooze because we need to wait for the stamp to get updated. - backoff.snooze(); - head = self.head.load(Ordering::Relaxed); - } - } - } - - /// Reads a message from the channel. - pub(crate) unsafe fn read(&self, token: &mut Token) -> Result { - if token.array.slot.is_null() { - // The channel is disconnected. - return Err(()); - } - - let slot: &Slot = unsafe { &*token.array.slot.cast::>() }; - - // Read the message from the slot and update the stamp. - let msg = unsafe { slot.msg.get().read().assume_init() }; - slot.stamp.store(token.array.stamp, Ordering::Release); - - // Wake a sleeping sender. - self.senders.notify(); - Ok(msg) - } - - /// Attempts to send a message into the channel. - pub(crate) fn try_send(&self, msg: T) -> Result<(), TrySendError> { - let token = &mut Token::default(); - if self.start_send(token) { - unsafe { self.write(token, msg).map_err(TrySendError::Disconnected) } - } else { - Err(TrySendError::Full(msg)) - } - } - - /// Sends a message into the channel. - pub(crate) fn send( - &self, - msg: T, - deadline: Option, - ) -> Result<(), SendTimeoutError> { - let token = &mut Token::default(); - loop { - // Try sending a message several times. - let backoff = Backoff::new(); - loop { - if self.start_send(token) { - let res = unsafe { self.write(token, msg) }; - return res.map_err(SendTimeoutError::Disconnected); - } - - if backoff.is_completed() { - break; - } else { - backoff.snooze(); - } - } - - if let Some(d) = deadline { - if Instant::now() >= d { - return Err(SendTimeoutError::Timeout(msg)); - } - } - - Context::with(|cx| { - // Prepare for blocking until a receiver wakes us up. - let oper = Operation::hook(token); - self.senders.register(oper, cx); - - // Has the channel become ready just now? - if !self.is_full() || self.is_disconnected() { - let _ = cx.try_select(Selected::Aborted); - } - - // Block the current thread. - let sel = cx.wait_until(deadline); - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted | Selected::Disconnected => { - self.senders.unregister(oper).unwrap(); - } - Selected::Operation(_) => {} - } - }); - } - } - - /// Attempts to receive a message without blocking. - pub(crate) fn try_recv(&self) -> Result { - let token = &mut Token::default(); - - if self.start_recv(token) { - unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) } - } else { - Err(TryRecvError::Empty) - } - } - - /// Receives a message from the channel. - pub(crate) fn recv(&self, deadline: Option) -> Result { - let token = &mut Token::default(); - loop { - // Try receiving a message several times. - let backoff = Backoff::new(); - loop { - if self.start_recv(token) { - let res = unsafe { self.read(token) }; - return res.map_err(|_| RecvTimeoutError::Disconnected); - } - - if backoff.is_completed() { - break; - } else { - backoff.snooze(); - } - } - - if let Some(d) = deadline { - if Instant::now() >= d { - return Err(RecvTimeoutError::Timeout); - } - } - - Context::with(|cx| { - // Prepare for blocking until a sender wakes us up. - let oper = Operation::hook(token); - self.receivers.register(oper, cx); - - // Has the channel become ready just now? - if !self.is_empty() || self.is_disconnected() { - let _ = cx.try_select(Selected::Aborted); - } - - // Block the current thread. - let sel = cx.wait_until(deadline); - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted | Selected::Disconnected => { - self.receivers.unregister(oper).unwrap(); - // If the channel was disconnected, we still have to check for remaining - // messages. - } - Selected::Operation(_) => {} - } - }); - } - } - - /// Returns the current number of messages inside the channel. - pub(crate) fn len(&self) -> usize { - loop { - // Load the tail, then load the head. - let tail = self.tail.load(Ordering::SeqCst); - let head = self.head.load(Ordering::SeqCst); - - // If the tail didn't change, we've got consistent values to work with. - if self.tail.load(Ordering::SeqCst) == tail { - let hix = head & (self.mark_bit - 1); - let tix = tail & (self.mark_bit - 1); - - return if hix < tix { - tix - hix - } else if hix > tix { - self.cap() - hix + tix - } else if (tail & !self.mark_bit) == head { - 0 - } else { - self.cap() - }; - } - } - } - - /// Returns the capacity of the channel. - #[inline] - fn cap(&self) -> usize { - self.buffer.len() - } - - /// Returns the capacity of the channel. - pub(crate) fn capacity(&self) -> Option { - Some(self.cap()) - } - - /// Disconnects the channel and wakes up all blocked senders and receivers. - /// - /// Returns `true` if this call disconnected the channel. - pub(crate) fn disconnect(&self) -> bool { - let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst); - - if tail & self.mark_bit == 0 { - self.senders.disconnect(); - self.receivers.disconnect(); - true - } else { - false - } - } - - /// Returns `true` if the channel is disconnected. - pub(crate) fn is_disconnected(&self) -> bool { - self.tail.load(Ordering::SeqCst) & self.mark_bit != 0 - } - - /// Returns `true` if the channel is empty. - pub(crate) fn is_empty(&self) -> bool { - let head = self.head.load(Ordering::SeqCst); - let tail = self.tail.load(Ordering::SeqCst); - - // Is the tail equal to the head? - // - // Note: If the head changes just before we load the tail, that means there was a moment - // when the channel was not empty, so it is safe to just return `false`. - (tail & !self.mark_bit) == head - } - - /// Returns `true` if the channel is full. - pub(crate) fn is_full(&self) -> bool { - let tail = self.tail.load(Ordering::SeqCst); - let head = self.head.load(Ordering::SeqCst); - - // Is the head lagging one lap behind tail? - // - // Note: If the tail changes just before we load the head, that means there was a moment - // when the channel was not full, so it is safe to just return `false`. - head.wrapping_add(self.one_lap) == tail & !self.mark_bit - } -} - -impl Drop for Channel { - fn drop(&mut self) { - if mem::needs_drop::() { - // Get the index of the head. - let head = *self.head.get_mut(); - let tail = *self.tail.get_mut(); - - let hix = head & (self.mark_bit - 1); - let tix = tail & (self.mark_bit - 1); - - let len = if hix < tix { - tix - hix - } else if hix > tix { - self.cap() - hix + tix - } else if (tail & !self.mark_bit) == head { - 0 - } else { - self.cap() - }; - - // Loop over all slots that hold a message and drop them. - for i in 0..len { - // Compute the index of the next slot holding a message. - let index = if hix + i < self.cap() { - hix + i - } else { - hix + i - self.cap() - }; - - unsafe { - debug_assert!(index < self.buffer.len()); - let slot = self.buffer.get_unchecked_mut(index); - (*slot.msg.get()).assume_init_drop(); - } - } - } - } -} - -/// Receiver handle to a channel. -pub(crate) struct Receiver<'a, T>(&'a Channel); - -/// Sender handle to a channel. -pub(crate) struct Sender<'a, T>(&'a Channel); - -impl SelectHandle for Receiver<'_, T> { - fn try_select(&self, token: &mut Token) -> bool { - self.0.start_recv(token) - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - self.0.receivers.register(oper, cx); - self.is_ready() - } - - fn unregister(&self, oper: Operation) { - self.0.receivers.unregister(oper); - } - - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - fn is_ready(&self) -> bool { - !self.0.is_empty() || self.0.is_disconnected() - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - self.0.receivers.watch(oper, cx); - self.is_ready() - } - - fn unwatch(&self, oper: Operation) { - self.0.receivers.unwatch(oper); - } -} - -impl SelectHandle for Sender<'_, T> { - fn try_select(&self, token: &mut Token) -> bool { - self.0.start_send(token) - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - self.0.senders.register(oper, cx); - self.is_ready() - } - - fn unregister(&self, oper: Operation) { - self.0.senders.unregister(oper); - } - - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - fn is_ready(&self) -> bool { - !self.0.is_full() || self.0.is_disconnected() - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - self.0.senders.watch(oper, cx); - self.is_ready() - } - - fn unwatch(&self, oper: Operation) { - self.0.senders.unwatch(oper); - } -} diff --git a/crossbeam-channel/src/flavors/at.rs b/crossbeam-channel/src/flavors/at.rs deleted file mode 100644 index 83e69c1ed..000000000 --- a/crossbeam-channel/src/flavors/at.rs +++ /dev/null @@ -1,193 +0,0 @@ -//! Channel that delivers a message at a certain moment in time. -//! -//! Messages cannot be sent into this kind of channel; they are materialized on demand. - -use std::sync::atomic::{AtomicBool, Ordering}; -use std::thread; -use std::time::Instant; - -use crate::context::Context; -use crate::err::{RecvTimeoutError, TryRecvError}; -use crate::select::{Operation, SelectHandle, Token}; -use crate::utils; - -/// Result of a receive operation. -pub(crate) type AtToken = Option; - -/// Channel that delivers a message at a certain moment in time -pub(crate) struct Channel { - /// The instant at which the message will be delivered. - delivery_time: Instant, - - /// `true` if the message has been received. - received: AtomicBool, -} - -impl Channel { - /// Creates a channel that delivers a message at a certain instant in time. - #[inline] - pub(crate) fn new_deadline(when: Instant) -> Self { - Self { - delivery_time: when, - received: AtomicBool::new(false), - } - } - - /// Attempts to receive a message without blocking. - #[inline] - pub(crate) fn try_recv(&self) -> Result { - // We use relaxed ordering because this is just an optional optimistic check. - if self.received.load(Ordering::Relaxed) { - // The message has already been received. - return Err(TryRecvError::Empty); - } - - if Instant::now() < self.delivery_time { - // The message was not delivered yet. - return Err(TryRecvError::Empty); - } - - // Try receiving the message if it is still available. - if !self.received.swap(true, Ordering::SeqCst) { - // Success! Return delivery time as the message. - Ok(self.delivery_time) - } else { - // The message was already received. - Err(TryRecvError::Empty) - } - } - - /// Receives a message from the channel. - #[inline] - pub(crate) fn recv(&self, deadline: Option) -> Result { - // We use relaxed ordering because this is just an optional optimistic check. - if self.received.load(Ordering::Relaxed) { - // The message has already been received. - utils::sleep_until(deadline); - return Err(RecvTimeoutError::Timeout); - } - - // Wait until the message is received or the deadline is reached. - loop { - let now = Instant::now(); - - let deadline = match deadline { - // Check if we can receive the next message. - _ if now >= self.delivery_time => break, - // Check if the timeout deadline has been reached. - Some(d) if now >= d => return Err(RecvTimeoutError::Timeout), - - // Sleep until one of the above happens - Some(d) if d < self.delivery_time => d, - _ => self.delivery_time, - }; - - thread::sleep(deadline - now); - } - - // Try receiving the message if it is still available. - if !self.received.swap(true, Ordering::SeqCst) { - // Success! Return the message, which is the instant at which it was delivered. - Ok(self.delivery_time) - } else { - // The message was already received. Block forever. - utils::sleep_until(None); - unreachable!() - } - } - - /// Reads a message from the channel. - #[inline] - pub(crate) unsafe fn read(&self, token: &mut Token) -> Result { - token.at.ok_or(()) - } - - /// Returns `true` if the channel is empty. - #[inline] - pub(crate) fn is_empty(&self) -> bool { - // We use relaxed ordering because this is just an optional optimistic check. - if self.received.load(Ordering::Relaxed) { - return true; - } - - // If the delivery time hasn't been reached yet, the channel is empty. - if Instant::now() < self.delivery_time { - return true; - } - - // The delivery time has been reached. The channel is empty only if the message has already - // been received. - self.received.load(Ordering::SeqCst) - } - - /// Returns `true` if the channel is full. - #[inline] - pub(crate) fn is_full(&self) -> bool { - !self.is_empty() - } - - /// Returns the number of messages in the channel. - #[inline] - pub(crate) fn len(&self) -> usize { - usize::from(!self.is_empty()) - } - - /// Returns the capacity of the channel. - #[inline] - pub(crate) fn capacity(&self) -> Option { - Some(1) - } -} - -impl SelectHandle for Channel { - #[inline] - fn try_select(&self, token: &mut Token) -> bool { - match self.try_recv() { - Ok(msg) => { - token.at = Some(msg); - true - } - Err(TryRecvError::Disconnected) => { - token.at = None; - true - } - Err(TryRecvError::Empty) => false, - } - } - - #[inline] - fn deadline(&self) -> Option { - // We use relaxed ordering because this is just an optional optimistic check. - if self.received.load(Ordering::Relaxed) { - None - } else { - Some(self.delivery_time) - } - } - - #[inline] - fn register(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - #[inline] - fn unregister(&self, _oper: Operation) {} - - #[inline] - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - #[inline] - fn is_ready(&self) -> bool { - !self.is_empty() - } - - #[inline] - fn watch(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - #[inline] - fn unwatch(&self, _oper: Operation) {} -} diff --git a/crossbeam-channel/src/flavors/list.rs b/crossbeam-channel/src/flavors/list.rs deleted file mode 100644 index 8ffe58beb..000000000 --- a/crossbeam-channel/src/flavors/list.rs +++ /dev/null @@ -1,775 +0,0 @@ -//! Unbounded channel implemented as a linked list. - -use std::alloc::{alloc_zeroed, handle_alloc_error, Layout}; -use std::boxed::Box; -use std::cell::UnsafeCell; -use std::marker::PhantomData; -use std::mem::MaybeUninit; -use std::ptr; -use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; -use std::time::Instant; - -use crossbeam_utils::{Backoff, CachePadded}; - -use crate::context::Context; -use crate::err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError}; -use crate::select::{Operation, SelectHandle, Selected, Token}; -use crate::waker::SyncWaker; - -// TODO(stjepang): Once we bump the minimum required Rust version to 1.28 or newer, re-apply the -// following changes by @kleimkuhler: -// -// 1. https://github.com/crossbeam-rs/crossbeam-channel/pull/100 -// 2. https://github.com/crossbeam-rs/crossbeam-channel/pull/101 - -// Bits indicating the state of a slot: -// * If a message has been written into the slot, `WRITE` is set. -// * If a message has been read from the slot, `READ` is set. -// * If the block is being destroyed, `DESTROY` is set. -const WRITE: usize = 1; -const READ: usize = 2; -const DESTROY: usize = 4; - -// Each block covers one "lap" of indices. -const LAP: usize = 32; -// The maximum number of messages a block can hold. -const BLOCK_CAP: usize = LAP - 1; -// How many lower bits are reserved for metadata. -const SHIFT: usize = 1; -// Has two different purposes: -// * If set in head, indicates that the block is not the last one. -// * If set in tail, indicates that the channel is disconnected. -const MARK_BIT: usize = 1; - -/// A slot in a block. -struct Slot { - /// The message. - msg: UnsafeCell>, - - /// The state of the slot. - state: AtomicUsize, -} - -impl Slot { - /// Waits until a message is written into the slot. - fn wait_write(&self) { - let backoff = Backoff::new(); - while self.state.load(Ordering::Acquire) & WRITE == 0 { - backoff.snooze(); - } - } -} - -/// A block in a linked list. -/// -/// Each block in the list can hold up to `BLOCK_CAP` messages. -struct Block { - /// The next block in the linked list. - next: AtomicPtr>, - - /// Slots for messages. - slots: [Slot; BLOCK_CAP], -} - -impl Block { - const LAYOUT: Layout = { - let layout = Layout::new::(); - assert!( - layout.size() != 0, - "Block should never be zero-sized, as it has an AtomicPtr field" - ); - layout - }; - - /// Creates an empty block. - fn new() -> Box { - // SAFETY: layout is not zero-sized - let ptr = unsafe { alloc_zeroed(Self::LAYOUT) }; - // Handle allocation failure - if ptr.is_null() { - handle_alloc_error(Self::LAYOUT) - } - // SAFETY: This is safe because: - // [1] `Block::next` (AtomicPtr) may be safely zero initialized. - // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. - // [3] `Slot::msg` (UnsafeCell) may be safely zero initialized because it - // holds a MaybeUninit. - // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. - // TODO: unsafe { Box::new_zeroed().assume_init() } - unsafe { Box::from_raw(ptr.cast()) } - } - - /// Waits until the next pointer is set. - fn wait_next(&self) -> *mut Self { - let backoff = Backoff::new(); - loop { - let next = self.next.load(Ordering::Acquire); - if !next.is_null() { - return next; - } - backoff.snooze(); - } - } - - /// Sets the `DESTROY` bit in slots starting from `start` and destroys the block. - unsafe fn destroy(this: *mut Self, start: usize) { - // It is not necessary to set the `DESTROY` bit in the last slot because that slot has - // begun destruction of the block. - for i in start..BLOCK_CAP - 1 { - let slot = unsafe { (*this).slots.get_unchecked(i) }; - - // Mark the `DESTROY` bit if a thread is still using the slot. - if slot.state.load(Ordering::Acquire) & READ == 0 - && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0 - { - // If a thread is still using the slot, it will continue destruction of the block. - return; - } - } - - // No thread is using the block, now it is safe to destroy it. - drop(unsafe { Box::from_raw(this) }); - } -} - -/// A position in a channel. -#[derive(Debug)] -struct Position { - /// The index in the channel. - index: AtomicUsize, - - /// The block in the linked list. - block: AtomicPtr>, -} - -/// The token type for the list flavor. -#[derive(Debug)] -pub(crate) struct ListToken { - /// The block of slots. - block: *const u8, - - /// The offset into the block. - offset: usize, -} - -impl Default for ListToken { - #[inline] - fn default() -> Self { - Self { - block: ptr::null(), - offset: 0, - } - } -} - -/// Unbounded channel implemented as a linked list. -/// -/// Each message sent into the channel is assigned a sequence number, i.e. an index. Indices are -/// represented as numbers of type `usize` and wrap on overflow. -/// -/// Consecutive messages are grouped into blocks in order to put less pressure on the allocator and -/// improve cache efficiency. -pub(crate) struct Channel { - /// The head of the channel. - head: CachePadded>, - - /// The tail of the channel. - tail: CachePadded>, - - /// Receivers waiting while the channel is empty and not disconnected. - receivers: SyncWaker, - - /// Indicates that dropping a `Channel` may drop messages of type `T`. - _marker: PhantomData, -} - -impl Channel { - /// Creates a new unbounded channel. - pub(crate) fn new() -> Self { - Self { - head: CachePadded::new(Position { - block: AtomicPtr::new(ptr::null_mut()), - index: AtomicUsize::new(0), - }), - tail: CachePadded::new(Position { - block: AtomicPtr::new(ptr::null_mut()), - index: AtomicUsize::new(0), - }), - receivers: SyncWaker::new(), - _marker: PhantomData, - } - } - - /// Returns a receiver handle to the channel. - pub(crate) fn receiver(&self) -> Receiver<'_, T> { - Receiver(self) - } - - /// Returns a sender handle to the channel. - pub(crate) fn sender(&self) -> Sender<'_, T> { - Sender(self) - } - - /// Attempts to reserve a slot for sending a message. - fn start_send(&self, token: &mut Token) -> bool { - let backoff = Backoff::new(); - let mut tail = self.tail.index.load(Ordering::Acquire); - let mut block = self.tail.block.load(Ordering::Acquire); - let mut next_block = None; - - loop { - // Check if the channel is disconnected. - if tail & MARK_BIT != 0 { - token.list.block = ptr::null(); - return true; - } - - // Calculate the offset of the index into the block. - let offset = (tail >> SHIFT) % LAP; - - // If we reached the end of the block, wait until the next one is installed. - if offset == BLOCK_CAP { - backoff.snooze(); - tail = self.tail.index.load(Ordering::Acquire); - block = self.tail.block.load(Ordering::Acquire); - continue; - } - - // If we're going to have to install the next block, allocate it in advance in order to - // make the wait for other threads as short as possible. - if offset + 1 == BLOCK_CAP && next_block.is_none() { - next_block = Some(Block::::new()); - } - - // If this is the first message to be sent into the channel, we need to allocate the - // first block and install it. - if block.is_null() { - let new = Box::into_raw(Block::::new()); - - if self - .tail - .block - .compare_exchange(block, new, Ordering::Release, Ordering::Relaxed) - .is_ok() - { - self.head.block.store(new, Ordering::Release); - block = new; - } else { - next_block = unsafe { Some(Box::from_raw(new)) }; - tail = self.tail.index.load(Ordering::Acquire); - block = self.tail.block.load(Ordering::Acquire); - continue; - } - } - - let new_tail = tail + (1 << SHIFT); - - // Try advancing the tail forward. - match self.tail.index.compare_exchange_weak( - tail, - new_tail, - Ordering::SeqCst, - Ordering::Acquire, - ) { - Ok(_) => unsafe { - // If we've reached the end of the block, install the next one. - if offset + 1 == BLOCK_CAP { - let next_block = Box::into_raw(next_block.unwrap()); - self.tail.block.store(next_block, Ordering::Release); - self.tail.index.fetch_add(1 << SHIFT, Ordering::Release); - (*block).next.store(next_block, Ordering::Release); - } - - token.list.block = block as *const u8; - token.list.offset = offset; - return true; - }, - Err(t) => { - tail = t; - block = self.tail.block.load(Ordering::Acquire); - backoff.spin(); - } - } - } - } - - /// Writes a message into the channel. - pub(crate) unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> { - // If there is no slot, the channel is disconnected. - if token.list.block.is_null() { - return Err(msg); - } - - // Write the message into the slot. - let block = token.list.block.cast::>(); - let offset = token.list.offset; - let slot = unsafe { (*block).slots.get_unchecked(offset) }; - unsafe { slot.msg.get().write(MaybeUninit::new(msg)) } - slot.state.fetch_or(WRITE, Ordering::Release); - - // Wake a sleeping receiver. - self.receivers.notify(); - Ok(()) - } - - /// Attempts to reserve a slot for receiving a message. - fn start_recv(&self, token: &mut Token) -> bool { - let backoff = Backoff::new(); - let mut head = self.head.index.load(Ordering::Acquire); - let mut block = self.head.block.load(Ordering::Acquire); - - loop { - // Calculate the offset of the index into the block. - let offset = (head >> SHIFT) % LAP; - - // If we reached the end of the block, wait until the next one is installed. - if offset == BLOCK_CAP { - backoff.snooze(); - head = self.head.index.load(Ordering::Acquire); - block = self.head.block.load(Ordering::Acquire); - continue; - } - - let mut new_head = head + (1 << SHIFT); - - if new_head & MARK_BIT == 0 { - atomic::fence(Ordering::SeqCst); - let tail = self.tail.index.load(Ordering::Relaxed); - - // If the tail equals the head, that means the channel is empty. - if head >> SHIFT == tail >> SHIFT { - // If the channel is disconnected... - if tail & MARK_BIT != 0 { - // ...then receive an error. - token.list.block = ptr::null(); - return true; - } else { - // Otherwise, the receive operation is not ready. - return false; - } - } - - // If head and tail are not in the same block, set `MARK_BIT` in head. - if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { - new_head |= MARK_BIT; - } - } - - // The block can be null here only if the first message is being sent into the channel. - // In that case, just wait until it gets initialized. - if block.is_null() { - backoff.snooze(); - head = self.head.index.load(Ordering::Acquire); - block = self.head.block.load(Ordering::Acquire); - continue; - } - - // Try moving the head index forward. - match self.head.index.compare_exchange_weak( - head, - new_head, - Ordering::SeqCst, - Ordering::Acquire, - ) { - Ok(_) => unsafe { - // If we've reached the end of the block, move to the next one. - if offset + 1 == BLOCK_CAP { - let next = (*block).wait_next(); - let mut next_index = (new_head & !MARK_BIT).wrapping_add(1 << SHIFT); - if !(*next).next.load(Ordering::Relaxed).is_null() { - next_index |= MARK_BIT; - } - - self.head.block.store(next, Ordering::Release); - self.head.index.store(next_index, Ordering::Release); - } - - token.list.block = block as *const u8; - token.list.offset = offset; - return true; - }, - Err(h) => { - head = h; - block = self.head.block.load(Ordering::Acquire); - backoff.spin(); - } - } - } - } - - /// Reads a message from the channel. - pub(crate) unsafe fn read(&self, token: &mut Token) -> Result { - if token.list.block.is_null() { - // The channel is disconnected. - return Err(()); - } - - // Read the message. - let block = token.list.block as *mut Block; - let offset = token.list.offset; - let slot = unsafe { (*block).slots.get_unchecked(offset) }; - slot.wait_write(); - let msg = unsafe { slot.msg.get().read().assume_init() }; - - // Destroy the block if we've reached the end, or if another thread wanted to destroy but - // couldn't because we were busy reading from the slot. - unsafe { - if offset + 1 == BLOCK_CAP { - Block::destroy(block, 0); - } else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { - Block::destroy(block, offset + 1); - } - } - - Ok(msg) - } - - /// Attempts to send a message into the channel. - pub(crate) fn try_send(&self, msg: T) -> Result<(), TrySendError> { - self.send(msg, None).map_err(|err| match err { - SendTimeoutError::Disconnected(msg) => TrySendError::Disconnected(msg), - SendTimeoutError::Timeout(_) => unreachable!(), - }) - } - - /// Sends a message into the channel. - pub(crate) fn send( - &self, - msg: T, - _deadline: Option, - ) -> Result<(), SendTimeoutError> { - let token = &mut Token::default(); - assert!(self.start_send(token)); - unsafe { - self.write(token, msg) - .map_err(SendTimeoutError::Disconnected) - } - } - - /// Attempts to receive a message without blocking. - pub(crate) fn try_recv(&self) -> Result { - let token = &mut Token::default(); - - if self.start_recv(token) { - unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) } - } else { - Err(TryRecvError::Empty) - } - } - - /// Receives a message from the channel. - pub(crate) fn recv(&self, deadline: Option) -> Result { - let token = &mut Token::default(); - loop { - // Try receiving a message several times. - let backoff = Backoff::new(); - loop { - if self.start_recv(token) { - unsafe { - return self.read(token).map_err(|_| RecvTimeoutError::Disconnected); - } - } - - if backoff.is_completed() { - break; - } else { - backoff.snooze(); - } - } - - if let Some(d) = deadline { - if Instant::now() >= d { - return Err(RecvTimeoutError::Timeout); - } - } - - // Prepare for blocking until a sender wakes us up. - Context::with(|cx| { - let oper = Operation::hook(token); - self.receivers.register(oper, cx); - - // Has the channel become ready just now? - if !self.is_empty() || self.is_disconnected() { - let _ = cx.try_select(Selected::Aborted); - } - - // Block the current thread. - let sel = cx.wait_until(deadline); - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted | Selected::Disconnected => { - self.receivers.unregister(oper).unwrap(); - // If the channel was disconnected, we still have to check for remaining - // messages. - } - Selected::Operation(_) => {} - } - }); - } - } - - /// Returns the current number of messages inside the channel. - pub(crate) fn len(&self) -> usize { - loop { - // Load the tail index, then load the head index. - let mut tail = self.tail.index.load(Ordering::SeqCst); - let mut head = self.head.index.load(Ordering::SeqCst); - - // If the tail index didn't change, we've got consistent indices to work with. - if self.tail.index.load(Ordering::SeqCst) == tail { - // Erase the lower bits. - tail &= !((1 << SHIFT) - 1); - head &= !((1 << SHIFT) - 1); - - // Fix up indices if they fall onto block ends. - if (tail >> SHIFT) & (LAP - 1) == LAP - 1 { - tail = tail.wrapping_add(1 << SHIFT); - } - if (head >> SHIFT) & (LAP - 1) == LAP - 1 { - head = head.wrapping_add(1 << SHIFT); - } - - // Rotate indices so that head falls into the first block. - let lap = (head >> SHIFT) / LAP; - tail = tail.wrapping_sub((lap * LAP) << SHIFT); - head = head.wrapping_sub((lap * LAP) << SHIFT); - - // Remove the lower bits. - tail >>= SHIFT; - head >>= SHIFT; - - // Return the difference minus the number of blocks between tail and head. - return tail - head - tail / LAP; - } - } - } - - /// Returns the capacity of the channel. - pub(crate) fn capacity(&self) -> Option { - None - } - - /// Disconnects senders and wakes up all blocked receivers. - /// - /// Returns `true` if this call disconnected the channel. - pub(crate) fn disconnect_senders(&self) -> bool { - let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst); - - if tail & MARK_BIT == 0 { - self.receivers.disconnect(); - true - } else { - false - } - } - - /// Disconnects receivers. - /// - /// Returns `true` if this call disconnected the channel. - pub(crate) fn disconnect_receivers(&self) -> bool { - let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst); - - if tail & MARK_BIT == 0 { - // If receivers are dropped first, discard all messages to free - // memory eagerly. - self.discard_all_messages(); - true - } else { - false - } - } - - /// Discards all messages. - /// - /// This method should only be called when all receivers are dropped. - fn discard_all_messages(&self) { - let backoff = Backoff::new(); - let mut tail = self.tail.index.load(Ordering::Acquire); - loop { - let offset = (tail >> SHIFT) % LAP; - if offset != BLOCK_CAP { - break; - } - - // New updates to tail will be rejected by MARK_BIT and aborted unless it's - // at boundary. We need to wait for the updates take affect otherwise there - // can be memory leaks. - backoff.snooze(); - tail = self.tail.index.load(Ordering::Acquire); - } - - let mut head = self.head.index.load(Ordering::Acquire); - // The channel may be uninitialized, so we have to swap to avoid overwriting any sender's attempts - // to initialize the first block before noticing that the receivers disconnected. Late allocations - // will be deallocated by the sender in Drop - let mut block = self.head.block.swap(ptr::null_mut(), Ordering::AcqRel); - - // If we're going to be dropping messages we need to synchronize with initialization - if head >> SHIFT != tail >> SHIFT { - // The block can be null here only if a sender is in the process of initializing the - // channel while another sender managed to send a message by inserting it into the - // semi-initialized channel and advanced the tail. - // In that case, just wait until it gets initialized. - while block.is_null() { - backoff.snooze(); - block = self.head.block.load(Ordering::Acquire); - } - } - - unsafe { - // Drop all messages between head and tail and deallocate the heap-allocated blocks. - while head >> SHIFT != tail >> SHIFT { - let offset = (head >> SHIFT) % LAP; - - if offset < BLOCK_CAP { - // Drop the message in the slot. - let slot = (*block).slots.get_unchecked(offset); - slot.wait_write(); - (*slot.msg.get()).assume_init_drop(); - } else { - (*block).wait_next(); - // Deallocate the block and move to the next one. - let next = (*block).next.load(Ordering::Acquire); - drop(Box::from_raw(block)); - block = next; - } - - head = head.wrapping_add(1 << SHIFT); - } - - // Deallocate the last remaining block. - if !block.is_null() { - drop(Box::from_raw(block)); - } - } - head &= !MARK_BIT; - self.head.index.store(head, Ordering::Release); - } - - /// Returns `true` if the channel is disconnected. - pub(crate) fn is_disconnected(&self) -> bool { - self.tail.index.load(Ordering::SeqCst) & MARK_BIT != 0 - } - - /// Returns `true` if the channel is empty. - pub(crate) fn is_empty(&self) -> bool { - let head = self.head.index.load(Ordering::SeqCst); - let tail = self.tail.index.load(Ordering::SeqCst); - head >> SHIFT == tail >> SHIFT - } - - /// Returns `true` if the channel is full. - pub(crate) fn is_full(&self) -> bool { - false - } -} - -impl Drop for Channel { - fn drop(&mut self) { - let mut head = *self.head.index.get_mut(); - let mut tail = *self.tail.index.get_mut(); - let mut block = *self.head.block.get_mut(); - - // Erase the lower bits. - head &= !((1 << SHIFT) - 1); - tail &= !((1 << SHIFT) - 1); - - unsafe { - // Drop all messages between head and tail and deallocate the heap-allocated blocks. - while head != tail { - let offset = (head >> SHIFT) % LAP; - - if offset < BLOCK_CAP { - // Drop the message in the slot. - let slot = (*block).slots.get_unchecked(offset); - (*slot.msg.get()).assume_init_drop(); - } else { - // Deallocate the block and move to the next one. - let next = *(*block).next.get_mut(); - drop(Box::from_raw(block)); - block = next; - } - - head = head.wrapping_add(1 << SHIFT); - } - - // Deallocate the last remaining block. - if !block.is_null() { - drop(Box::from_raw(block)); - } - } - } -} - -/// Receiver handle to a channel. -pub(crate) struct Receiver<'a, T>(&'a Channel); - -/// Sender handle to a channel. -pub(crate) struct Sender<'a, T>(&'a Channel); - -impl SelectHandle for Receiver<'_, T> { - fn try_select(&self, token: &mut Token) -> bool { - self.0.start_recv(token) - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - self.0.receivers.register(oper, cx); - self.is_ready() - } - - fn unregister(&self, oper: Operation) { - self.0.receivers.unregister(oper); - } - - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - fn is_ready(&self) -> bool { - !self.0.is_empty() || self.0.is_disconnected() - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - self.0.receivers.watch(oper, cx); - self.is_ready() - } - - fn unwatch(&self, oper: Operation) { - self.0.receivers.unwatch(oper); - } -} - -impl SelectHandle for Sender<'_, T> { - fn try_select(&self, token: &mut Token) -> bool { - self.0.start_send(token) - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - fn unregister(&self, _oper: Operation) {} - - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - fn is_ready(&self) -> bool { - true - } - - fn watch(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - fn unwatch(&self, _oper: Operation) {} -} diff --git a/crossbeam-channel/src/flavors/mod.rs b/crossbeam-channel/src/flavors/mod.rs deleted file mode 100644 index 0314bf420..000000000 --- a/crossbeam-channel/src/flavors/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -//! Channel flavors. -//! -//! There are six flavors: -//! -//! 1. `at` - Channel that delivers a message after a certain amount of time. -//! 2. `array` - Bounded channel based on a preallocated array. -//! 3. `list` - Unbounded channel implemented as a linked list. -//! 4. `never` - Channel that never delivers messages. -//! 5. `tick` - Channel that delivers messages periodically. -//! 6. `zero` - Zero-capacity channel. - -pub(crate) mod array; -pub(crate) mod at; -pub(crate) mod list; -pub(crate) mod never; -pub(crate) mod tick; -pub(crate) mod zero; diff --git a/crossbeam-channel/src/flavors/never.rs b/crossbeam-channel/src/flavors/never.rs deleted file mode 100644 index 7a9f830ac..000000000 --- a/crossbeam-channel/src/flavors/never.rs +++ /dev/null @@ -1,110 +0,0 @@ -//! Channel that never delivers messages. -//! -//! Messages cannot be sent into this kind of channel. - -use std::marker::PhantomData; -use std::time::Instant; - -use crate::context::Context; -use crate::err::{RecvTimeoutError, TryRecvError}; -use crate::select::{Operation, SelectHandle, Token}; -use crate::utils; - -/// This flavor doesn't need a token. -pub(crate) type NeverToken = (); - -/// Channel that never delivers messages. -pub(crate) struct Channel { - _marker: PhantomData, -} - -impl Channel { - /// Creates a channel that never delivers messages. - #[inline] - pub(crate) fn new() -> Self { - Self { - _marker: PhantomData, - } - } - - /// Attempts to receive a message without blocking. - #[inline] - pub(crate) fn try_recv(&self) -> Result { - Err(TryRecvError::Empty) - } - - /// Receives a message from the channel. - #[inline] - pub(crate) fn recv(&self, deadline: Option) -> Result { - utils::sleep_until(deadline); - Err(RecvTimeoutError::Timeout) - } - - /// Reads a message from the channel. - #[inline] - pub(crate) unsafe fn read(&self, _token: &mut Token) -> Result { - Err(()) - } - - /// Returns `true` if the channel is empty. - #[inline] - pub(crate) fn is_empty(&self) -> bool { - true - } - - /// Returns `true` if the channel is full. - #[inline] - pub(crate) fn is_full(&self) -> bool { - true - } - - /// Returns the number of messages in the channel. - #[inline] - pub(crate) fn len(&self) -> usize { - 0 - } - - /// Returns the capacity of the channel. - #[inline] - pub(crate) fn capacity(&self) -> Option { - Some(0) - } -} - -impl SelectHandle for Channel { - #[inline] - fn try_select(&self, _token: &mut Token) -> bool { - false - } - - #[inline] - fn deadline(&self) -> Option { - None - } - - #[inline] - fn register(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - #[inline] - fn unregister(&self, _oper: Operation) {} - - #[inline] - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - #[inline] - fn is_ready(&self) -> bool { - false - } - - #[inline] - fn watch(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - #[inline] - fn unwatch(&self, _oper: Operation) {} -} diff --git a/crossbeam-channel/src/flavors/tick.rs b/crossbeam-channel/src/flavors/tick.rs deleted file mode 100644 index a5b67ed9e..000000000 --- a/crossbeam-channel/src/flavors/tick.rs +++ /dev/null @@ -1,163 +0,0 @@ -//! Channel that delivers messages periodically. -//! -//! Messages cannot be sent into this kind of channel; they are materialized on demand. - -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_utils::atomic::AtomicCell; - -use crate::context::Context; -use crate::err::{RecvTimeoutError, TryRecvError}; -use crate::select::{Operation, SelectHandle, Token}; - -/// Result of a receive operation. -pub(crate) type TickToken = Option; - -/// Channel that delivers messages periodically. -pub(crate) struct Channel { - /// The instant at which the next message will be delivered. - delivery_time: AtomicCell, - - /// The time interval in which messages get delivered. - duration: Duration, -} - -impl Channel { - /// Creates a channel that delivers messages periodically. - #[inline] - pub(crate) fn new(delivery_time: Instant, dur: Duration) -> Self { - Self { - delivery_time: AtomicCell::new(delivery_time), - duration: dur, - } - } - - /// Attempts to receive a message without blocking. - #[inline] - pub(crate) fn try_recv(&self) -> Result { - loop { - let now = Instant::now(); - let delivery_time = self.delivery_time.load(); - - if now < delivery_time { - return Err(TryRecvError::Empty); - } - - if self - .delivery_time - .compare_exchange(delivery_time, now + self.duration) - .is_ok() - { - return Ok(delivery_time); - } - } - } - - /// Receives a message from the channel. - #[inline] - pub(crate) fn recv(&self, deadline: Option) -> Result { - loop { - let delivery_time = self.delivery_time.load(); - let now = Instant::now(); - - if let Some(d) = deadline { - if d < delivery_time { - if now < d { - thread::sleep(d - now); - } - return Err(RecvTimeoutError::Timeout); - } - } - - if self - .delivery_time - .compare_exchange(delivery_time, delivery_time.max(now) + self.duration) - .is_ok() - { - if now < delivery_time { - thread::sleep(delivery_time - now); - } - return Ok(delivery_time); - } - } - } - - /// Reads a message from the channel. - #[inline] - pub(crate) unsafe fn read(&self, token: &mut Token) -> Result { - token.tick.ok_or(()) - } - - /// Returns `true` if the channel is empty. - #[inline] - pub(crate) fn is_empty(&self) -> bool { - Instant::now() < self.delivery_time.load() - } - - /// Returns `true` if the channel is full. - #[inline] - pub(crate) fn is_full(&self) -> bool { - !self.is_empty() - } - - /// Returns the number of messages in the channel. - #[inline] - pub(crate) fn len(&self) -> usize { - usize::from(!self.is_empty()) - } - - /// Returns the capacity of the channel. - #[inline] - pub(crate) fn capacity(&self) -> Option { - Some(1) - } -} - -impl SelectHandle for Channel { - #[inline] - fn try_select(&self, token: &mut Token) -> bool { - match self.try_recv() { - Ok(msg) => { - token.tick = Some(msg); - true - } - Err(TryRecvError::Disconnected) => { - token.tick = None; - true - } - Err(TryRecvError::Empty) => false, - } - } - - #[inline] - fn deadline(&self) -> Option { - Some(self.delivery_time.load()) - } - - #[inline] - fn register(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - #[inline] - fn unregister(&self, _oper: Operation) {} - - #[inline] - fn accept(&self, token: &mut Token, _cx: &Context) -> bool { - self.try_select(token) - } - - #[inline] - fn is_ready(&self) -> bool { - !self.is_empty() - } - - #[inline] - fn watch(&self, _oper: Operation, _cx: &Context) -> bool { - self.is_ready() - } - - #[inline] - fn unwatch(&self, _oper: Operation) {} -} diff --git a/crossbeam-channel/src/flavors/zero.rs b/crossbeam-channel/src/flavors/zero.rs deleted file mode 100644 index 08d226f87..000000000 --- a/crossbeam-channel/src/flavors/zero.rs +++ /dev/null @@ -1,496 +0,0 @@ -//! Zero-capacity channel. -//! -//! This kind of channel is also known as *rendezvous* channel. - -use std::boxed::Box; -use std::cell::UnsafeCell; -use std::marker::PhantomData; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Mutex; -use std::time::Instant; -use std::{fmt, ptr}; - -use crossbeam_utils::Backoff; - -use crate::context::Context; -use crate::err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError}; -use crate::select::{Operation, SelectHandle, Selected, Token}; -use crate::waker::Waker; - -/// A pointer to a packet. -pub(crate) struct ZeroToken(*mut ()); - -impl Default for ZeroToken { - fn default() -> Self { - Self(ptr::null_mut()) - } -} - -impl fmt::Debug for ZeroToken { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&(self.0 as usize), f) - } -} - -/// A slot for passing one message from a sender to a receiver. -struct Packet { - /// Equals `true` if the packet is allocated on the stack. - on_stack: bool, - - /// Equals `true` once the packet is ready for reading or writing. - ready: AtomicBool, - - /// The message. - msg: UnsafeCell>, -} - -impl Packet { - /// Creates an empty packet on the stack. - fn empty_on_stack() -> Self { - Self { - on_stack: true, - ready: AtomicBool::new(false), - msg: UnsafeCell::new(None), - } - } - - /// Creates an empty packet on the heap. - fn empty_on_heap() -> Box { - Box::new(Self { - on_stack: false, - ready: AtomicBool::new(false), - msg: UnsafeCell::new(None), - }) - } - - /// Creates a packet on the stack, containing a message. - fn message_on_stack(msg: T) -> Self { - Self { - on_stack: true, - ready: AtomicBool::new(false), - msg: UnsafeCell::new(Some(msg)), - } - } - - /// Waits until the packet becomes ready for reading or writing. - fn wait_ready(&self) { - let backoff = Backoff::new(); - while !self.ready.load(Ordering::Acquire) { - backoff.snooze(); - } - } -} - -/// Inner representation of a zero-capacity channel. -struct Inner { - /// Senders waiting to pair up with a receive operation. - senders: Waker, - - /// Receivers waiting to pair up with a send operation. - receivers: Waker, - - /// Equals `true` when the channel is disconnected. - is_disconnected: bool, -} - -/// Zero-capacity channel. -pub(crate) struct Channel { - /// Inner representation of the channel. - inner: Mutex, - - /// Indicates that dropping a `Channel` may drop values of type `T`. - _marker: PhantomData, -} - -impl Channel { - /// Constructs a new zero-capacity channel. - pub(crate) fn new() -> Self { - Self { - inner: Mutex::new(Inner { - senders: Waker::new(), - receivers: Waker::new(), - is_disconnected: false, - }), - _marker: PhantomData, - } - } - - /// Returns a receiver handle to the channel. - pub(crate) fn receiver(&self) -> Receiver<'_, T> { - Receiver(self) - } - - /// Returns a sender handle to the channel. - pub(crate) fn sender(&self) -> Sender<'_, T> { - Sender(self) - } - - /// Attempts to reserve a slot for sending a message. - fn start_send(&self, token: &mut Token) -> bool { - let mut inner = self.inner.lock().unwrap(); - - // If there's a waiting receiver, pair up with it. - if let Some(operation) = inner.receivers.try_select() { - token.zero.0 = operation.packet; - true - } else if inner.is_disconnected { - token.zero.0 = ptr::null_mut(); - true - } else { - false - } - } - - /// Writes a message into the packet. - pub(crate) unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> { - // If there is no packet, the channel is disconnected. - if token.zero.0.is_null() { - return Err(msg); - } - - let packet = unsafe { &*(token.zero.0 as *const Packet) }; - unsafe { packet.msg.get().write(Some(msg)) } - packet.ready.store(true, Ordering::Release); - Ok(()) - } - - /// Attempts to pair up with a sender. - fn start_recv(&self, token: &mut Token) -> bool { - let mut inner = self.inner.lock().unwrap(); - - // If there's a waiting sender, pair up with it. - if let Some(operation) = inner.senders.try_select() { - token.zero.0 = operation.packet; - true - } else if inner.is_disconnected { - token.zero.0 = ptr::null_mut(); - true - } else { - false - } - } - - /// Reads a message from the packet. - pub(crate) unsafe fn read(&self, token: &mut Token) -> Result { - // If there is no packet, the channel is disconnected. - if token.zero.0.is_null() { - return Err(()); - } - - let packet = unsafe { &*(token.zero.0 as *const Packet) }; - - if packet.on_stack { - // The message has been in the packet from the beginning, so there is no need to wait - // for it. However, after reading the message, we need to set `ready` to `true` in - // order to signal that the packet can be destroyed. - let msg = unsafe { packet.msg.get().replace(None).unwrap() }; - packet.ready.store(true, Ordering::Release); - Ok(msg) - } else { - // Wait until the message becomes available, then read it and destroy the - // heap-allocated packet. - packet.wait_ready(); - let msg = unsafe { packet.msg.get().replace(None).unwrap() }; - drop(unsafe { Box::from_raw(token.zero.0.cast::>()) }); - Ok(msg) - } - } - - /// Attempts to send a message into the channel. - pub(crate) fn try_send(&self, msg: T) -> Result<(), TrySendError> { - let token = &mut Token::default(); - let mut inner = self.inner.lock().unwrap(); - - // If there's a waiting receiver, pair up with it. - if let Some(operation) = inner.receivers.try_select() { - token.zero.0 = operation.packet; - drop(inner); - unsafe { - self.write(token, msg).ok().unwrap(); - } - Ok(()) - } else if inner.is_disconnected { - Err(TrySendError::Disconnected(msg)) - } else { - Err(TrySendError::Full(msg)) - } - } - - /// Sends a message into the channel. - pub(crate) fn send( - &self, - msg: T, - deadline: Option, - ) -> Result<(), SendTimeoutError> { - let token = &mut Token::default(); - let mut inner = self.inner.lock().unwrap(); - - // If there's a waiting receiver, pair up with it. - if let Some(operation) = inner.receivers.try_select() { - token.zero.0 = operation.packet; - drop(inner); - unsafe { - self.write(token, msg).ok().unwrap(); - } - return Ok(()); - } - - if inner.is_disconnected { - return Err(SendTimeoutError::Disconnected(msg)); - } - - Context::with(|cx| { - // Prepare for blocking until a receiver wakes us up. - let oper = Operation::hook(token); - let mut packet = Packet::::message_on_stack(msg); - inner - .senders - .register_with_packet(oper, &mut packet as *mut Packet as *mut (), cx); - inner.receivers.notify(); - drop(inner); - - // Block the current thread. - let sel = cx.wait_until(deadline); - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted => { - self.inner.lock().unwrap().senders.unregister(oper).unwrap(); - let msg = unsafe { packet.msg.get().replace(None).unwrap() }; - Err(SendTimeoutError::Timeout(msg)) - } - Selected::Disconnected => { - self.inner.lock().unwrap().senders.unregister(oper).unwrap(); - let msg = unsafe { packet.msg.get().replace(None).unwrap() }; - Err(SendTimeoutError::Disconnected(msg)) - } - Selected::Operation(_) => { - // Wait until the message is read, then drop the packet. - packet.wait_ready(); - Ok(()) - } - } - }) - } - - /// Attempts to receive a message without blocking. - pub(crate) fn try_recv(&self) -> Result { - let token = &mut Token::default(); - let mut inner = self.inner.lock().unwrap(); - - // If there's a waiting sender, pair up with it. - if let Some(operation) = inner.senders.try_select() { - token.zero.0 = operation.packet; - drop(inner); - unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) } - } else if inner.is_disconnected { - Err(TryRecvError::Disconnected) - } else { - Err(TryRecvError::Empty) - } - } - - /// Receives a message from the channel. - pub(crate) fn recv(&self, deadline: Option) -> Result { - let token = &mut Token::default(); - let mut inner = self.inner.lock().unwrap(); - - // If there's a waiting sender, pair up with it. - if let Some(operation) = inner.senders.try_select() { - token.zero.0 = operation.packet; - drop(inner); - unsafe { - return self.read(token).map_err(|_| RecvTimeoutError::Disconnected); - } - } - - if inner.is_disconnected { - return Err(RecvTimeoutError::Disconnected); - } - - Context::with(|cx| { - // Prepare for blocking until a sender wakes us up. - let oper = Operation::hook(token); - let mut packet = Packet::::empty_on_stack(); - inner.receivers.register_with_packet( - oper, - &mut packet as *mut Packet as *mut (), - cx, - ); - inner.senders.notify(); - drop(inner); - - // Block the current thread. - let sel = cx.wait_until(deadline); - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted => { - self.inner - .lock() - .unwrap() - .receivers - .unregister(oper) - .unwrap(); - Err(RecvTimeoutError::Timeout) - } - Selected::Disconnected => { - self.inner - .lock() - .unwrap() - .receivers - .unregister(oper) - .unwrap(); - Err(RecvTimeoutError::Disconnected) - } - Selected::Operation(_) => { - // Wait until the message is provided, then read it. - packet.wait_ready(); - unsafe { Ok(packet.msg.get().replace(None).unwrap()) } - } - } - }) - } - - /// Disconnects the channel and wakes up all blocked senders and receivers. - /// - /// Returns `true` if this call disconnected the channel. - pub(crate) fn disconnect(&self) -> bool { - let mut inner = self.inner.lock().unwrap(); - - if !inner.is_disconnected { - inner.is_disconnected = true; - inner.senders.disconnect(); - inner.receivers.disconnect(); - true - } else { - false - } - } - - /// Returns the current number of messages inside the channel. - pub(crate) fn len(&self) -> usize { - 0 - } - - /// Returns the capacity of the channel. - pub(crate) fn capacity(&self) -> Option { - Some(0) - } - - /// Returns `true` if the channel is empty. - pub(crate) fn is_empty(&self) -> bool { - true - } - - /// Returns `true` if the channel is full. - pub(crate) fn is_full(&self) -> bool { - true - } -} - -/// Receiver handle to a channel. -pub(crate) struct Receiver<'a, T>(&'a Channel); - -/// Sender handle to a channel. -pub(crate) struct Sender<'a, T>(&'a Channel); - -impl SelectHandle for Receiver<'_, T> { - fn try_select(&self, token: &mut Token) -> bool { - self.0.start_recv(token) - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - let packet = Box::into_raw(Packet::::empty_on_heap()); - - let mut inner = self.0.inner.lock().unwrap(); - inner - .receivers - .register_with_packet(oper, packet.cast::<()>(), cx); - inner.senders.notify(); - inner.senders.can_select() || inner.is_disconnected - } - - fn unregister(&self, oper: Operation) { - if let Some(operation) = self.0.inner.lock().unwrap().receivers.unregister(oper) { - unsafe { - drop(Box::from_raw(operation.packet.cast::>())); - } - } - } - - fn accept(&self, token: &mut Token, cx: &Context) -> bool { - token.zero.0 = cx.wait_packet(); - true - } - - fn is_ready(&self) -> bool { - let inner = self.0.inner.lock().unwrap(); - inner.senders.can_select() || inner.is_disconnected - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - let mut inner = self.0.inner.lock().unwrap(); - inner.receivers.watch(oper, cx); - inner.senders.can_select() || inner.is_disconnected - } - - fn unwatch(&self, oper: Operation) { - let mut inner = self.0.inner.lock().unwrap(); - inner.receivers.unwatch(oper); - } -} - -impl SelectHandle for Sender<'_, T> { - fn try_select(&self, token: &mut Token) -> bool { - self.0.start_send(token) - } - - fn deadline(&self) -> Option { - None - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - let packet = Box::into_raw(Packet::::empty_on_heap()); - - let mut inner = self.0.inner.lock().unwrap(); - inner - .senders - .register_with_packet(oper, packet.cast::<()>(), cx); - inner.receivers.notify(); - inner.receivers.can_select() || inner.is_disconnected - } - - fn unregister(&self, oper: Operation) { - if let Some(operation) = self.0.inner.lock().unwrap().senders.unregister(oper) { - unsafe { - drop(Box::from_raw(operation.packet.cast::>())); - } - } - } - - fn accept(&self, token: &mut Token, cx: &Context) -> bool { - token.zero.0 = cx.wait_packet(); - true - } - - fn is_ready(&self) -> bool { - let inner = self.0.inner.lock().unwrap(); - inner.receivers.can_select() || inner.is_disconnected - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - let mut inner = self.0.inner.lock().unwrap(); - inner.senders.watch(oper, cx); - inner.receivers.can_select() || inner.is_disconnected - } - - fn unwatch(&self, oper: Operation) { - let mut inner = self.0.inner.lock().unwrap(); - inner.senders.unwatch(oper); - } -} diff --git a/crossbeam-channel/src/lib.rs b/crossbeam-channel/src/lib.rs deleted file mode 100644 index 35876c160..000000000 --- a/crossbeam-channel/src/lib.rs +++ /dev/null @@ -1,377 +0,0 @@ -//! Multi-producer multi-consumer channels for message passing. -//! -//! This crate is an alternative to [`std::sync::mpsc`] with more features and better performance. -//! -//! # Hello, world! -//! -//! ``` -//! use crossbeam_channel::unbounded; -//! -//! // Create a channel of unbounded capacity. -//! let (s, r) = unbounded(); -//! -//! // Send a message into the channel. -//! s.send("Hello, world!").unwrap(); -//! -//! // Receive the message from the channel. -//! assert_eq!(r.recv(), Ok("Hello, world!")); -//! ``` -//! -//! # Channel types -//! -//! Channels can be created using two functions: -//! -//! * [`bounded`] creates a channel of bounded capacity, i.e. there is a limit to how many messages -//! it can hold at a time. -//! -//! * [`unbounded`] creates a channel of unbounded capacity, i.e. it can hold any number of -//! messages at a time. -//! -//! Both functions return a [`Sender`] and a [`Receiver`], which represent the two opposite sides -//! of a channel. -//! -//! Creating a bounded channel: -//! -//! ``` -//! use crossbeam_channel::bounded; -//! -//! // Create a channel that can hold at most 5 messages at a time. -//! let (s, r) = bounded(5); -//! -//! // Can send only 5 messages without blocking. -//! for i in 0..5 { -//! s.send(i).unwrap(); -//! } -//! -//! // Another call to `send` would block because the channel is full. -//! // s.send(5).unwrap(); -//! ``` -//! -//! Creating an unbounded channel: -//! -//! ``` -//! use crossbeam_channel::unbounded; -//! -//! // Create an unbounded channel. -//! let (s, r) = unbounded(); -//! -//! // Can send any number of messages into the channel without blocking. -//! for i in 0..1000 { -//! s.send(i).unwrap(); -//! } -//! ``` -//! -//! A special case is zero-capacity channel, which cannot hold any messages. Instead, send and -//! receive operations must appear at the same time in order to pair up and pass the message over: -//! -//! ``` -//! use std::thread; -//! use crossbeam_channel::bounded; -//! -//! // Create a zero-capacity channel. -//! let (s, r) = bounded(0); -//! -//! // Sending blocks until a receive operation appears on the other side. -//! thread::spawn(move || s.send("Hi!").unwrap()); -//! -//! // Receiving blocks until a send operation appears on the other side. -//! assert_eq!(r.recv(), Ok("Hi!")); -//! ``` -//! -//! # Sharing channels -//! -//! Senders and receivers can be cloned and sent to other threads: -//! -//! ``` -//! use std::thread; -//! use crossbeam_channel::bounded; -//! -//! let (s1, r1) = bounded(0); -//! let (s2, r2) = (s1.clone(), r1.clone()); -//! -//! // Spawn a thread that receives a message and then sends one. -//! thread::spawn(move || { -//! r2.recv().unwrap(); -//! s2.send(2).unwrap(); -//! }); -//! -//! // Send a message and then receive one. -//! s1.send(1).unwrap(); -//! r1.recv().unwrap(); -//! ``` -//! -//! Note that cloning only creates a new handle to the same sending or receiving side. It does not -//! create a separate stream of messages in any way: -//! -//! ``` -//! use crossbeam_channel::unbounded; -//! -//! let (s1, r1) = unbounded(); -//! let (s2, r2) = (s1.clone(), r1.clone()); -//! let (s3, r3) = (s2.clone(), r2.clone()); -//! -//! s1.send(10).unwrap(); -//! s2.send(20).unwrap(); -//! s3.send(30).unwrap(); -//! -//! assert_eq!(r3.recv(), Ok(10)); -//! assert_eq!(r1.recv(), Ok(20)); -//! assert_eq!(r2.recv(), Ok(30)); -//! ``` -//! -//! It's also possible to share senders and receivers by reference: -//! -//! ``` -//! use crossbeam_channel::bounded; -//! use crossbeam_utils::thread::scope; -//! -//! let (s, r) = bounded(0); -//! -//! scope(|scope| { -//! // Spawn a thread that receives a message and then sends one. -//! scope.spawn(|_| { -//! r.recv().unwrap(); -//! s.send(2).unwrap(); -//! }); -//! -//! // Send a message and then receive one. -//! s.send(1).unwrap(); -//! r.recv().unwrap(); -//! }).unwrap(); -//! ``` -//! -//! # Disconnection -//! -//! When all senders or all receivers associated with a channel get dropped, the channel becomes -//! disconnected. No more messages can be sent, but any remaining messages can still be received. -//! Send and receive operations on a disconnected channel never block. -//! -//! ``` -//! use crossbeam_channel::{unbounded, RecvError}; -//! -//! let (s, r) = unbounded(); -//! s.send(1).unwrap(); -//! s.send(2).unwrap(); -//! s.send(3).unwrap(); -//! -//! // The only sender is dropped, disconnecting the channel. -//! drop(s); -//! -//! // The remaining messages can be received. -//! assert_eq!(r.recv(), Ok(1)); -//! assert_eq!(r.recv(), Ok(2)); -//! assert_eq!(r.recv(), Ok(3)); -//! -//! // There are no more messages in the channel. -//! assert!(r.is_empty()); -//! -//! // Note that calling `r.recv()` does not block. -//! // Instead, `Err(RecvError)` is returned immediately. -//! assert_eq!(r.recv(), Err(RecvError)); -//! ``` -//! -//! # Blocking operations -//! -//! Send and receive operations come in three flavors: -//! -//! * Non-blocking (returns immediately with success or failure). -//! * Blocking (waits until the operation succeeds or the channel becomes disconnected). -//! * Blocking with a timeout (blocks only for a certain duration of time). -//! -//! A simple example showing the difference between non-blocking and blocking operations: -//! -//! ``` -//! use crossbeam_channel::{bounded, RecvError, TryRecvError}; -//! -//! let (s, r) = bounded(1); -//! -//! // Send a message into the channel. -//! s.send("foo").unwrap(); -//! -//! // This call would block because the channel is full. -//! // s.send("bar").unwrap(); -//! -//! // Receive the message. -//! assert_eq!(r.recv(), Ok("foo")); -//! -//! // This call would block because the channel is empty. -//! // r.recv(); -//! -//! // Try receiving a message without blocking. -//! assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -//! -//! // Disconnect the channel. -//! drop(s); -//! -//! // This call doesn't block because the channel is now disconnected. -//! assert_eq!(r.recv(), Err(RecvError)); -//! ``` -//! -//! # Iteration -//! -//! Receivers can be used as iterators. For example, method [`iter`] creates an iterator that -//! receives messages until the channel becomes empty and disconnected. Note that iteration may -//! block waiting for next message to arrive. -//! -//! ``` -//! use std::thread; -//! use crossbeam_channel::unbounded; -//! -//! let (s, r) = unbounded(); -//! -//! thread::spawn(move || { -//! s.send(1).unwrap(); -//! s.send(2).unwrap(); -//! s.send(3).unwrap(); -//! drop(s); // Disconnect the channel. -//! }); -//! -//! // Collect all messages from the channel. -//! // Note that the call to `collect` blocks until the sender is dropped. -//! let v: Vec<_> = r.iter().collect(); -//! -//! assert_eq!(v, [1, 2, 3]); -//! ``` -//! -//! A non-blocking iterator can be created using [`try_iter`], which receives all available -//! messages without blocking: -//! -//! ``` -//! use crossbeam_channel::unbounded; -//! -//! let (s, r) = unbounded(); -//! s.send(1).unwrap(); -//! s.send(2).unwrap(); -//! s.send(3).unwrap(); -//! // No need to drop the sender. -//! -//! // Receive all messages currently in the channel. -//! let v: Vec<_> = r.try_iter().collect(); -//! -//! assert_eq!(v, [1, 2, 3]); -//! ``` -//! -//! # Selection -//! -//! The [`select!`] macro allows you to define a set of channel operations, wait until any one of -//! them becomes ready, and finally execute it. If multiple operations are ready at the same time, -//! a random one among them is selected. -//! -//! It is also possible to define a `default` case that gets executed if none of the operations are -//! ready, either right away or for a certain duration of time. -//! -//! An operation is considered to be ready if it doesn't have to block. Note that it is ready even -//! when it will simply return an error because the channel is disconnected. -//! -//! An example of receiving a message from two channels: -//! -//! ``` -//! use std::thread; -//! use std::time::Duration; -//! use crossbeam_channel::{select, unbounded}; -//! -//! let (s1, r1) = unbounded(); -//! let (s2, r2) = unbounded(); -//! -//! # let t1 = -//! thread::spawn(move || s1.send(10).unwrap()); -//! # let t2 = -//! thread::spawn(move || s2.send(20).unwrap()); -//! -//! // At most one of these two receive operations will be executed. -//! select! { -//! recv(r1) -> msg => assert_eq!(msg, Ok(10)), -//! recv(r2) -> msg => assert_eq!(msg, Ok(20)), -//! default(Duration::from_secs(1)) => println!("timed out"), -//! } -//! # t1.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 -//! # t2.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 -//! ``` -//! -//! If you need to select over a dynamically created list of channel operations, use [`Select`] -//! instead. The [`select!`] macro is just a convenience wrapper around [`Select`]. -//! -//! # Extra channels -//! -//! Three functions can create special kinds of channels, all of which return just a [`Receiver`] -//! handle: -//! -//! * [`after`] creates a channel that delivers a single message after a certain duration of time. -//! * [`tick`] creates a channel that delivers messages periodically. -//! * [`never`](never()) creates a channel that never delivers messages. -//! -//! These channels are very efficient because messages get lazily generated on receive operations. -//! -//! An example that prints elapsed time every 50 milliseconds for the duration of 1 second: -//! -//! ``` -//! use std::time::{Duration, Instant}; -//! use crossbeam_channel::{after, select, tick}; -//! -//! let start = Instant::now(); -//! let ticker = tick(Duration::from_millis(50)); -//! let timeout = after(Duration::from_secs(1)); -//! -//! loop { -//! select! { -//! recv(ticker) -> _ => println!("elapsed: {:?}", start.elapsed()), -//! recv(timeout) -> _ => break, -//! } -//! } -//! ``` -//! -//! [`send`]: Sender::send -//! [`recv`]: Receiver::recv -//! [`iter`]: Receiver::iter -//! [`try_iter`]: Receiver::try_iter - -#![no_std] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] -#![warn(missing_docs, unsafe_op_in_unsafe_fn)] - -#[cfg(feature = "std")] -extern crate std; - -#[cfg(feature = "std")] -mod channel; -#[cfg(feature = "std")] -mod context; -#[cfg(feature = "std")] -mod counter; -#[cfg(feature = "std")] -mod err; -#[cfg(feature = "std")] -mod flavors; -#[cfg(feature = "std")] -mod select; -#[cfg(feature = "std")] -mod select_macro; -#[cfg(feature = "std")] -mod utils; -#[cfg(feature = "std")] -mod waker; - -/// Crate internals used by the `select!` macro. -#[doc(hidden)] -#[cfg(feature = "std")] -pub mod internal { - pub use crate::select::{select, select_timeout, try_select, SelectHandle}; -} - -#[cfg(feature = "std")] -pub use crate::{ - channel::{ - after, at, bounded, never, tick, unbounded, IntoIter, Iter, Receiver, Sender, TryIter, - }, - err::{ - ReadyTimeoutError, RecvError, RecvTimeoutError, SelectTimeoutError, SendError, - SendTimeoutError, TryReadyError, TryRecvError, TrySelectError, TrySendError, - }, - select::{Select, SelectedOperation}, -}; diff --git a/crossbeam-channel/src/select.rs b/crossbeam-channel/src/select.rs deleted file mode 100644 index bf484d786..000000000 --- a/crossbeam-channel/src/select.rs +++ /dev/null @@ -1,1323 +0,0 @@ -//! Interface to the select mechanism. - -use std::fmt; -use std::marker::PhantomData; -use std::mem; -use std::time::{Duration, Instant}; -use std::vec::Vec; - -use crossbeam_utils::Backoff; - -use crate::channel::{self, Receiver, Sender}; -use crate::context::Context; -use crate::err::{ReadyTimeoutError, TryReadyError}; -use crate::err::{RecvError, SendError}; -use crate::err::{SelectTimeoutError, TrySelectError}; -use crate::flavors; -use crate::utils; - -/// Temporary data that gets initialized during select or a blocking operation, and is consumed by -/// `read` or `write`. -/// -/// Each field contains data associated with a specific channel flavor. -// This is a private API that is used by the select macro. -#[derive(Debug, Default)] -pub struct Token { - pub(crate) at: flavors::at::AtToken, - pub(crate) array: flavors::array::ArrayToken, - pub(crate) list: flavors::list::ListToken, - #[allow(dead_code)] - pub(crate) never: flavors::never::NeverToken, - pub(crate) tick: flavors::tick::TickToken, - pub(crate) zero: flavors::zero::ZeroToken, -} - -/// Identifier associated with an operation by a specific thread on a specific channel. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct Operation(usize); - -impl Operation { - /// Creates an operation identifier from a mutable reference. - /// - /// This function essentially just turns the address of the reference into a number. The - /// reference should point to a variable that is specific to the thread and the operation, - /// and is alive for the entire duration of select or blocking operation. - #[inline] - pub fn hook(r: &mut T) -> Self { - let val = r as *mut T as usize; - // Make sure that the pointer address doesn't equal the numerical representation of - // `Selected::{Waiting, Aborted, Disconnected}`. - assert!(val > 2); - Self(val) - } -} - -/// Current state of a select or a blocking operation. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum Selected { - /// Still waiting for an operation. - Waiting, - - /// The attempt to block the current thread has been aborted. - Aborted, - - /// An operation became ready because a channel is disconnected. - Disconnected, - - /// An operation became ready because a message can be sent or received. - Operation(Operation), -} - -impl From for Selected { - #[inline] - fn from(val: usize) -> Self { - match val { - 0 => Self::Waiting, - 1 => Self::Aborted, - 2 => Self::Disconnected, - oper => Self::Operation(Operation(oper)), - } - } -} - -impl From for usize { - #[inline] - fn from(val: Selected) -> Self { - match val { - Selected::Waiting => 0, - Selected::Aborted => 1, - Selected::Disconnected => 2, - Selected::Operation(Operation(val)) => val, - } - } -} - -/// A receiver or a sender that can participate in select. -/// -/// This is a handle that assists select in executing an operation, registration, deciding on the -/// appropriate deadline for blocking, etc. -// This is a private API (exposed inside crossbeam_channel::internal module) that is used by the select macro. -pub trait SelectHandle { - /// Attempts to select an operation and returns `true` on success. - fn try_select(&self, token: &mut Token) -> bool; - - /// Returns a deadline for an operation, if there is one. - fn deadline(&self) -> Option; - - /// Registers an operation for execution and returns `true` if it is now ready. - fn register(&self, oper: Operation, cx: &Context) -> bool; - - /// Unregisters an operation for execution. - fn unregister(&self, oper: Operation); - - /// Attempts to select an operation the thread got woken up for and returns `true` on success. - fn accept(&self, token: &mut Token, cx: &Context) -> bool; - - /// Returns `true` if an operation can be executed without blocking. - fn is_ready(&self) -> bool; - - /// Registers an operation for readiness notification and returns `true` if it is now ready. - fn watch(&self, oper: Operation, cx: &Context) -> bool; - - /// Unregisters an operation for readiness notification. - fn unwatch(&self, oper: Operation); -} - -impl SelectHandle for &T { - fn try_select(&self, token: &mut Token) -> bool { - (**self).try_select(token) - } - - fn deadline(&self) -> Option { - (**self).deadline() - } - - fn register(&self, oper: Operation, cx: &Context) -> bool { - (**self).register(oper, cx) - } - - fn unregister(&self, oper: Operation) { - (**self).unregister(oper); - } - - fn accept(&self, token: &mut Token, cx: &Context) -> bool { - (**self).accept(token, cx) - } - - fn is_ready(&self) -> bool { - (**self).is_ready() - } - - fn watch(&self, oper: Operation, cx: &Context) -> bool { - (**self).watch(oper, cx) - } - - fn unwatch(&self, oper: Operation) { - (**self).unwatch(oper) - } -} - -/// Determines when a select operation should time out. -#[derive(Clone, Copy, Eq, PartialEq)] -enum Timeout { - /// No blocking. - Now, - - /// Block forever. - Never, - - /// Time out after the time instant. - At(Instant), -} - -/// Runs until one of the operations is selected, potentially blocking the current thread. -/// -/// Successful receive operations will have to be followed up by `channel::read()` and successful -/// send operations by `channel::write()`. -fn run_select( - handles: &mut [(&dyn SelectHandle, usize, *const u8)], - timeout: Timeout, - is_biased: bool, -) -> Option<(Token, usize, *const u8)> { - if handles.is_empty() { - // Wait until the timeout and return. - match timeout { - Timeout::Now => return None, - Timeout::Never => { - utils::sleep_until(None); - unreachable!(); - } - Timeout::At(when) => { - utils::sleep_until(Some(when)); - return None; - } - } - } - - if !is_biased { - // Shuffle the operations for fairness. - utils::shuffle(handles); - } - - // Create a token, which serves as a temporary variable that gets initialized in this function - // and is later used by a call to `channel::read()` or `channel::write()` that completes the - // selected operation. - let mut token = Token::default(); - - // Try selecting one of the operations without blocking. - for &(handle, i, ptr) in handles.iter() { - if handle.try_select(&mut token) { - return Some((token, i, ptr)); - } - } - - loop { - // Prepare for blocking. - let res = Context::with(|cx| { - let mut sel = Selected::Waiting; - let mut registered_count = 0; - let mut index_ready = None; - - if let Timeout::Now = timeout { - cx.try_select(Selected::Aborted).unwrap(); - } - - // Register all operations. - for (handle, i, _) in handles.iter_mut() { - registered_count += 1; - - // If registration returns `false`, that means the operation has just become ready. - if handle.register(Operation::hook::<&dyn SelectHandle>(handle), cx) { - // Try aborting select. - sel = match cx.try_select(Selected::Aborted) { - Ok(()) => { - index_ready = Some(*i); - Selected::Aborted - } - Err(s) => s, - }; - break; - } - - // If another thread has already selected one of the operations, stop registration. - sel = cx.selected(); - if sel != Selected::Waiting { - break; - } - } - - if sel == Selected::Waiting { - // Check with each operation for how long we're allowed to block, and compute the - // earliest deadline. - let mut deadline: Option = match timeout { - Timeout::Now => return None, - Timeout::Never => None, - Timeout::At(when) => Some(when), - }; - for &(handle, _, _) in handles.iter() { - if let Some(x) = handle.deadline() { - deadline = deadline.map(|y| x.min(y)).or(Some(x)); - } - } - - // Block the current thread. - sel = cx.wait_until(deadline); - } - - // Unregister all registered operations. - for (handle, _, _) in handles.iter_mut().take(registered_count) { - handle.unregister(Operation::hook::<&dyn SelectHandle>(handle)); - } - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted => { - // If an operation became ready during registration, try selecting it. - if let Some(index_ready) = index_ready { - for &(handle, i, ptr) in handles.iter() { - if i == index_ready && handle.try_select(&mut token) { - return Some((i, ptr)); - } - } - } - } - Selected::Disconnected => {} - Selected::Operation(_) => { - // Find the selected operation. - for (handle, i, ptr) in handles.iter_mut() { - // Is this the selected operation? - if sel == Selected::Operation(Operation::hook::<&dyn SelectHandle>(handle)) - { - // Try selecting this operation. - if handle.accept(&mut token, cx) { - return Some((*i, *ptr)); - } - } - } - } - } - - None - }); - - // Return if an operation was selected. - if let Some((i, ptr)) = res { - return Some((token, i, ptr)); - } - - // Try selecting one of the operations without blocking. - for &(handle, i, ptr) in handles.iter() { - if handle.try_select(&mut token) { - return Some((token, i, ptr)); - } - } - - match timeout { - Timeout::Now => return None, - Timeout::Never => {} - Timeout::At(when) => { - if Instant::now() >= when { - return None; - } - } - } - } -} - -/// Runs until one of the operations becomes ready, potentially blocking the current thread. -fn run_ready( - handles: &mut [(&dyn SelectHandle, usize, *const u8)], - timeout: Timeout, - is_biased: bool, -) -> Option { - if handles.is_empty() { - // Wait until the timeout and return. - match timeout { - Timeout::Now => return None, - Timeout::Never => { - utils::sleep_until(None); - unreachable!(); - } - Timeout::At(when) => { - utils::sleep_until(Some(when)); - return None; - } - } - } - - if !is_biased { - // Shuffle the operations for fairness. - utils::shuffle(handles); - } - - loop { - let backoff = Backoff::new(); - loop { - // Check operations for readiness. - for &(handle, i, _) in handles.iter() { - if handle.is_ready() { - return Some(i); - } - } - - if backoff.is_completed() { - break; - } else { - backoff.snooze(); - } - } - - // Check for timeout. - match timeout { - Timeout::Now => return None, - Timeout::Never => {} - Timeout::At(when) => { - if Instant::now() >= when { - return None; - } - } - } - - // Prepare for blocking. - let res = Context::with(|cx| { - let mut sel = Selected::Waiting; - let mut registered_count = 0; - - // Begin watching all operations. - for (handle, _, _) in handles.iter_mut() { - registered_count += 1; - let oper = Operation::hook::<&dyn SelectHandle>(handle); - - // If registration returns `false`, that means the operation has just become ready. - if handle.watch(oper, cx) { - sel = match cx.try_select(Selected::Operation(oper)) { - Ok(()) => Selected::Operation(oper), - Err(s) => s, - }; - break; - } - - // If another thread has already chosen one of the operations, stop registration. - sel = cx.selected(); - if sel != Selected::Waiting { - break; - } - } - - if sel == Selected::Waiting { - // Check with each operation for how long we're allowed to block, and compute the - // earliest deadline. - let mut deadline: Option = match timeout { - Timeout::Now => unreachable!(), - Timeout::Never => None, - Timeout::At(when) => Some(when), - }; - for &(handle, _, _) in handles.iter() { - if let Some(x) = handle.deadline() { - deadline = deadline.map(|y| x.min(y)).or(Some(x)); - } - } - - // Block the current thread. - sel = cx.wait_until(deadline); - } - - // Unwatch all operations. - for (handle, _, _) in handles.iter_mut().take(registered_count) { - handle.unwatch(Operation::hook::<&dyn SelectHandle>(handle)); - } - - match sel { - Selected::Waiting => unreachable!(), - Selected::Aborted => {} - Selected::Disconnected => {} - Selected::Operation(_) => { - for (handle, i, _) in handles.iter_mut() { - let oper = Operation::hook::<&dyn SelectHandle>(handle); - if sel == Selected::Operation(oper) { - return Some(*i); - } - } - } - } - - None - }); - - // Return if an operation became ready. - if res.is_some() { - return res; - } - } -} - -/// Attempts to select one of the operations without blocking. -// This is a private API (exposed inside crossbeam_channel::internal module) that is used by the select macro. -#[inline] -pub fn try_select<'a>( - handles: &mut [(&'a dyn SelectHandle, usize, *const u8)], - is_biased: bool, -) -> Result, TrySelectError> { - match run_select(handles, Timeout::Now, is_biased) { - None => Err(TrySelectError), - Some((token, index, ptr)) => Ok(SelectedOperation { - token, - index, - ptr, - _marker: PhantomData, - }), - } -} - -/// Blocks until one of the operations becomes ready and selects it. -// This is a private API (exposed inside crossbeam_channel::internal module) that is used by the select macro. -#[inline] -pub fn select<'a>( - handles: &mut [(&'a dyn SelectHandle, usize, *const u8)], - is_biased: bool, -) -> SelectedOperation<'a> { - if handles.is_empty() { - panic!("no operations have been added to `Select`"); - } - - let (token, index, ptr) = run_select(handles, Timeout::Never, is_biased).unwrap(); - SelectedOperation { - token, - index, - ptr, - _marker: PhantomData, - } -} - -/// Blocks for a limited time until one of the operations becomes ready and selects it. -// This is a private API (exposed inside crossbeam_channel::internal module) that is used by the select macro. -#[inline] -pub fn select_timeout<'a>( - handles: &mut [(&'a dyn SelectHandle, usize, *const u8)], - timeout: Duration, - is_biased: bool, -) -> Result, SelectTimeoutError> { - match Instant::now().checked_add(timeout) { - Some(deadline) => select_deadline(handles, deadline, is_biased), - None => Ok(select(handles, is_biased)), - } -} - -/// Blocks until a given deadline, or until one of the operations becomes ready and selects it. -#[inline] -pub(crate) fn select_deadline<'a>( - handles: &mut [(&'a dyn SelectHandle, usize, *const u8)], - deadline: Instant, - is_biased: bool, -) -> Result, SelectTimeoutError> { - match run_select(handles, Timeout::At(deadline), is_biased) { - None => Err(SelectTimeoutError), - Some((token, index, ptr)) => Ok(SelectedOperation { - token, - index, - ptr, - _marker: PhantomData, - }), - } -} - -/// Selects from a set of channel operations. -/// -/// `Select` allows you to define a set of channel operations, wait until any one of them becomes -/// ready, and finally execute it. If multiple operations are ready at the same time, a random one -/// among them is selected. -/// -/// An operation is considered to be ready if it doesn't have to block. Note that it is ready even -/// when it will simply return an error because the channel is disconnected. -/// -/// The [`select!`] macro is a convenience wrapper around `Select`. However, it cannot select over a -/// dynamically created list of channel operations. -/// -/// [`select!`]: crate::select! -/// -/// Once a list of operations has been built with `Select`, there are two different ways of -/// proceeding: -/// -/// * Select an operation with [`try_select`], [`select`], or [`select_timeout`]. If successful, -/// the returned selected operation has already begun and **must** be completed. If we don't -/// complete it, a panic will occur. -/// -/// * Wait for an operation to become ready with [`try_ready`], [`ready`], or [`ready_timeout`]. If -/// successful, we may attempt to execute the operation, but are not obliged to. In fact, it's -/// possible for another thread to make the operation not ready just before we try executing it, -/// so it's wise to use a retry loop. However, note that these methods might return with success -/// spuriously, so it's a good idea to always double check if the operation is really ready. -/// -/// # Examples -/// -/// Use [`select`] to receive a message from a list of receivers: -/// -/// ``` -/// use crossbeam_channel::{Receiver, RecvError, Select}; -/// -/// fn recv_multiple(rs: &[Receiver]) -> Result { -/// // Build a list of operations. -/// let mut sel = Select::new(); -/// for r in rs { -/// sel.recv(r); -/// } -/// -/// // Complete the selected operation. -/// let oper = sel.select(); -/// let index = oper.index(); -/// oper.recv(&rs[index]) -/// } -/// ``` -/// -/// Use [`ready`] to receive a message from a list of receivers: -/// -/// ``` -/// use crossbeam_channel::{Receiver, RecvError, Select}; -/// -/// fn recv_multiple(rs: &[Receiver]) -> Result { -/// // Build a list of operations. -/// let mut sel = Select::new(); -/// for r in rs { -/// sel.recv(r); -/// } -/// -/// loop { -/// // Wait until a receive operation becomes ready and try executing it. -/// let index = sel.ready(); -/// let res = rs[index].try_recv(); -/// -/// // If the operation turns out not to be ready, retry. -/// if let Err(e) = res { -/// if e.is_empty() { -/// continue; -/// } -/// } -/// -/// // Success! -/// return res.map_err(|_| RecvError); -/// } -/// } -/// ``` -/// -/// [`try_select`]: Select::try_select -/// [`select`]: Select::select -/// [`select_timeout`]: Select::select_timeout -/// [`try_ready`]: Select::try_ready -/// [`ready`]: Select::ready -/// [`ready_timeout`]: Select::ready_timeout -pub struct Select<'a> { - /// A list of senders and receivers participating in selection. - handles: Vec<(&'a dyn SelectHandle, usize, *const u8)>, - - /// The next index to assign to an operation. - next_index: usize, - - /// Whether to use the index of handles as bias for selecting ready operations. - biased: bool, -} - -unsafe impl Send for Select<'_> {} -unsafe impl Sync for Select<'_> {} - -impl<'a> Select<'a> { - /// Creates an empty list of channel operations for selection. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::Select; - /// - /// let mut sel = Select::new(); - /// - /// // The list of operations is empty, which means no operation can be selected. - /// assert!(sel.try_select().is_err()); - /// ``` - pub fn new() -> Self { - Self { - handles: Vec::with_capacity(4), - next_index: 0, - biased: false, - } - } - - /// Creates an empty list of channel operations with biased selection. - /// - /// When multiple handles are ready, this will select the operation with the lowest index. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::Select; - /// - /// let mut sel = Select::new_biased(); - /// - /// // The list of operations is empty, which means no operation can be selected. - /// assert!(sel.try_select().is_err()); - /// ``` - pub fn new_biased() -> Self { - Self { - biased: true, - ..Default::default() - } - } - - /// Adds a send operation. - /// - /// Returns the index of the added operation. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s, r) = unbounded::(); - /// - /// let mut sel = Select::new(); - /// let index = sel.send(&s); - /// ``` - pub fn send(&mut self, s: &'a Sender) -> usize { - let i = self.next_index; - let ptr = s as *const Sender<_> as *const u8; - self.handles.push((s, i, ptr)); - self.next_index += 1; - i - } - - /// Adds a receive operation. - /// - /// Returns the index of the added operation. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s, r) = unbounded::(); - /// - /// let mut sel = Select::new(); - /// let index = sel.recv(&r); - /// ``` - pub fn recv(&mut self, r: &'a Receiver) -> usize { - let i = self.next_index; - let ptr = r as *const Receiver<_> as *const u8; - self.handles.push((r, i, ptr)); - self.next_index += 1; - i - } - - /// Removes a previously added operation. - /// - /// This is useful when an operation is selected because the channel got disconnected and we - /// want to try again to select a different operation instead. - /// - /// If new operations are added after removing some, the indices of removed operations will not - /// be reused. - /// - /// # Panics - /// - /// An attempt to remove a non-existing or already removed operation will panic. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded::(); - /// let (_, r2) = unbounded::(); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // Both operations are initially ready, so a random one will be executed. - /// let oper = sel.select(); - /// assert_eq!(oper.index(), oper2); - /// assert!(oper.recv(&r2).is_err()); - /// sel.remove(oper2); - /// - /// s1.send(10).unwrap(); - /// - /// let oper = sel.select(); - /// assert_eq!(oper.index(), oper1); - /// assert_eq!(oper.recv(&r1), Ok(10)); - /// ``` - pub fn remove(&mut self, index: usize) { - assert!( - index < self.next_index, - "index out of bounds; {} >= {}", - index, - self.next_index, - ); - - let i = self - .handles - .iter() - .enumerate() - .find(|(_, (_, i, _))| *i == index) - .expect("no operation with this index") - .0; - - self.handles.swap_remove(i); - } - - /// Attempts to select one of the operations without blocking. - /// - /// If an operation is ready, it is selected and returned. If multiple operations are ready at - /// the same time, a random one among them is selected. If none of the operations are ready, an - /// error is returned. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// The selected operation must be completed with [`SelectedOperation::send`] - /// or [`SelectedOperation::recv`]. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// s1.send(10).unwrap(); - /// s2.send(20).unwrap(); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // Both operations are initially ready, so a random one will be executed. - /// let oper = sel.try_select(); - /// match oper { - /// Err(_) => panic!("both operations should be ready"), - /// Ok(oper) => match oper.index() { - /// i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(10)), - /// i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(20)), - /// _ => unreachable!(), - /// } - /// } - /// ``` - pub fn try_select(&mut self) -> Result, TrySelectError> { - try_select(&mut self.handles, self.biased) - } - - /// Blocks until one of the operations becomes ready and selects it. - /// - /// Once an operation becomes ready, it is selected and returned. If multiple operations are - /// ready at the same time, a random one among them is selected. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// The selected operation must be completed with [`SelectedOperation::send`] - /// or [`SelectedOperation::recv`]. - /// - /// # Panics - /// - /// Panics if no operations have been added to `Select`. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// # let t1 = - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s1.send(10).unwrap(); - /// }); - /// # let t2 = - /// thread::spawn(move || s2.send(20).unwrap()); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // The second operation will be selected because it becomes ready first. - /// let oper = sel.select(); - /// match oper.index() { - /// i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(10)), - /// i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(20)), - /// _ => unreachable!(), - /// } - /// # t1.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// # t2.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// ``` - pub fn select(&mut self) -> SelectedOperation<'a> { - select(&mut self.handles, self.biased) - } - - /// Blocks for a limited time until one of the operations becomes ready and selects it. - /// - /// If an operation becomes ready, it is selected and returned. If multiple operations are - /// ready at the same time, a random one among them is selected. If none of the operations - /// become ready for the specified duration, an error is returned. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// The selected operation must be completed with [`SelectedOperation::send`] - /// or [`SelectedOperation::recv`]. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// # let t1 = - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s1.send(10).unwrap(); - /// }); - /// # let t2 = - /// thread::spawn(move || s2.send(20).unwrap()); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // The second operation will be selected because it becomes ready first. - /// let oper = sel.select_timeout(Duration::from_millis(500)); - /// match oper { - /// Err(_) => panic!("should not have timed out"), - /// Ok(oper) => match oper.index() { - /// i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(10)), - /// i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(20)), - /// _ => unreachable!(), - /// } - /// } - /// # t1.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// # t2.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// ``` - pub fn select_timeout( - &mut self, - timeout: Duration, - ) -> Result, SelectTimeoutError> { - select_timeout(&mut self.handles, timeout, self.biased) - } - - /// Blocks until a given deadline, or until one of the operations becomes ready and selects it. - /// - /// If an operation becomes ready, it is selected and returned. If multiple operations are - /// ready at the same time, a random one among them is selected. If none of the operations - /// become ready before the given deadline, an error is returned. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// The selected operation must be completed with [`SelectedOperation::send`] - /// or [`SelectedOperation::recv`]. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::{Instant, Duration}; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// # let t1 = - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s1.send(10).unwrap(); - /// }); - /// # let t2 = - /// thread::spawn(move || s2.send(20).unwrap()); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// let deadline = Instant::now() + Duration::from_millis(500); - /// - /// // The second operation will be selected because it becomes ready first. - /// let oper = sel.select_deadline(deadline); - /// match oper { - /// Err(_) => panic!("should not have timed out"), - /// Ok(oper) => match oper.index() { - /// i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(10)), - /// i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(20)), - /// _ => unreachable!(), - /// } - /// } - /// # t1.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// # t2.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// ``` - pub fn select_deadline( - &mut self, - deadline: Instant, - ) -> Result, SelectTimeoutError> { - select_deadline(&mut self.handles, deadline, self.biased) - } - - /// Attempts to find a ready operation without blocking. - /// - /// If an operation is ready, its index is returned. If multiple operations are ready at the - /// same time, a random one among them is chosen. If none of the operations are ready, an error - /// is returned. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// Note that this method might return with success spuriously, so it's a good idea to always - /// double check if the operation is really ready. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// s1.send(10).unwrap(); - /// s2.send(20).unwrap(); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // Both operations are initially ready, so a random one will be chosen. - /// match sel.try_ready() { - /// Err(_) => panic!("both operations should be ready"), - /// Ok(i) if i == oper1 => assert_eq!(r1.try_recv(), Ok(10)), - /// Ok(i) if i == oper2 => assert_eq!(r2.try_recv(), Ok(20)), - /// Ok(_) => unreachable!(), - /// } - /// ``` - pub fn try_ready(&mut self) -> Result { - match run_ready(&mut self.handles, Timeout::Now, self.biased) { - None => Err(TryReadyError), - Some(index) => Ok(index), - } - } - - /// Blocks until one of the operations becomes ready. - /// - /// Once an operation becomes ready, its index is returned. If multiple operations are ready at - /// the same time, a random one among them is chosen. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// Note that this method might return with success spuriously, so it's a good idea to always - /// double check if the operation is really ready. - /// - /// # Panics - /// - /// Panics if no operations have been added to `Select`. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// # let t1 = - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s1.send(10).unwrap(); - /// }); - /// # let t2 = - /// thread::spawn(move || s2.send(20).unwrap()); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // The second operation will be selected because it becomes ready first. - /// match sel.ready() { - /// i if i == oper1 => assert_eq!(r1.try_recv(), Ok(10)), - /// i if i == oper2 => assert_eq!(r2.try_recv(), Ok(20)), - /// _ => unreachable!(), - /// } - /// # t1.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// # t2.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// ``` - pub fn ready(&mut self) -> usize { - if self.handles.is_empty() { - panic!("no operations have been added to `Select`"); - } - - run_ready(&mut self.handles, Timeout::Never, self.biased).unwrap() - } - - /// Blocks for a limited time until one of the operations becomes ready. - /// - /// If an operation becomes ready, its index is returned. If multiple operations are ready at - /// the same time, a random one among them is chosen. If none of the operations become ready - /// for the specified duration, an error is returned. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// Note that this method might return with success spuriously, so it's a good idea to double - /// check if the operation is really ready. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// # let t1 = - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s1.send(10).unwrap(); - /// }); - /// # let t2 = - /// thread::spawn(move || s2.send(20).unwrap()); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // The second operation will be selected because it becomes ready first. - /// match sel.ready_timeout(Duration::from_millis(500)) { - /// Err(_) => panic!("should not have timed out"), - /// Ok(i) if i == oper1 => assert_eq!(r1.try_recv(), Ok(10)), - /// Ok(i) if i == oper2 => assert_eq!(r2.try_recv(), Ok(20)), - /// Ok(_) => unreachable!(), - /// } - /// # t1.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// # t2.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// ``` - pub fn ready_timeout(&mut self, timeout: Duration) -> Result { - match Instant::now().checked_add(timeout) { - Some(deadline) => self.ready_deadline(deadline), - None => Ok(self.ready()), - } - } - - /// Blocks until a given deadline, or until one of the operations becomes ready. - /// - /// If an operation becomes ready, its index is returned. If multiple operations are ready at - /// the same time, a random one among them is chosen. If none of the operations become ready - /// before the deadline, an error is returned. - /// - /// An operation is considered to be ready if it doesn't have to block. Note that it is ready - /// even when it will simply return an error because the channel is disconnected. - /// - /// Note that this method might return with success spuriously, so it's a good idea to double - /// check if the operation is really ready. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::{Duration, Instant}; - /// use crossbeam_channel::{unbounded, Select}; - /// - /// let deadline = Instant::now() + Duration::from_millis(500); - /// - /// let (s1, r1) = unbounded(); - /// let (s2, r2) = unbounded(); - /// - /// # let t1 = - /// thread::spawn(move || { - /// thread::sleep(Duration::from_secs(1)); - /// s1.send(10).unwrap(); - /// }); - /// # let t2 = - /// thread::spawn(move || s2.send(20).unwrap()); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r1); - /// let oper2 = sel.recv(&r2); - /// - /// // The second operation will be selected because it becomes ready first. - /// match sel.ready_deadline(deadline) { - /// Err(_) => panic!("should not have timed out"), - /// Ok(i) if i == oper1 => assert_eq!(r1.try_recv(), Ok(10)), - /// Ok(i) if i == oper2 => assert_eq!(r2.try_recv(), Ok(20)), - /// Ok(_) => unreachable!(), - /// } - /// # t1.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// # t2.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// ``` - pub fn ready_deadline(&mut self, deadline: Instant) -> Result { - match run_ready(&mut self.handles, Timeout::At(deadline), self.biased) { - None => Err(ReadyTimeoutError), - Some(index) => Ok(index), - } - } -} - -impl Clone for Select<'_> { - fn clone(&self) -> Self { - Self { - handles: self.handles.clone(), - next_index: self.next_index, - biased: self.biased, - } - } -} - -impl Default for Select<'_> { - fn default() -> Self { - Self::new() - } -} - -impl fmt::Debug for Select<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Select { .. }") - } -} - -/// A selected operation that needs to be completed. -/// -/// To complete the operation, call [`send`] or [`recv`]. -/// -/// # Panics -/// -/// Forgetting to complete the operation is an error and might lead to deadlocks. If a -/// `SelectedOperation` is dropped without completion, a panic occurs. -/// -/// [`send`]: SelectedOperation::send -/// [`recv`]: SelectedOperation::recv -#[must_use] -pub struct SelectedOperation<'a> { - /// Token needed to complete the operation. - token: Token, - - /// The index of the selected operation. - index: usize, - - /// The address of the selected `Sender` or `Receiver`. - ptr: *const u8, - - /// Indicates that `Sender`s and `Receiver`s are borrowed. - _marker: PhantomData<&'a ()>, -} - -impl SelectedOperation<'_> { - /// Returns the index of the selected operation. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{bounded, Select}; - /// - /// let (s1, r1) = bounded::<()>(0); - /// let (s2, r2) = bounded::<()>(0); - /// let (s3, r3) = bounded::<()>(1); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.send(&s1); - /// let oper2 = sel.recv(&r2); - /// let oper3 = sel.send(&s3); - /// - /// // Only the last operation is ready. - /// let oper = sel.select(); - /// assert_eq!(oper.index(), 2); - /// assert_eq!(oper.index(), oper3); - /// - /// // Complete the operation. - /// oper.send(&s3, ()).unwrap(); - /// ``` - pub fn index(&self) -> usize { - self.index - } - - /// Completes the send operation. - /// - /// The passed [`Sender`] reference must be the same one that was used in [`Select::send`] - /// when the operation was added. - /// - /// # Panics - /// - /// Panics if an incorrect [`Sender`] reference is passed. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{bounded, Select, SendError}; - /// - /// let (s, r) = bounded::(0); - /// drop(r); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.send(&s); - /// - /// let oper = sel.select(); - /// assert_eq!(oper.index(), oper1); - /// assert_eq!(oper.send(&s, 10), Err(SendError(10))); - /// ``` - pub fn send(mut self, s: &Sender, msg: T) -> Result<(), SendError> { - assert!( - s as *const Sender as *const u8 == self.ptr, - "passed a sender that wasn't selected", - ); - let res = unsafe { channel::write(s, &mut self.token, msg) }; - mem::forget(self); - res.map_err(SendError) - } - - /// Completes the receive operation. - /// - /// The passed [`Receiver`] reference must be the same one that was used in [`Select::recv`] - /// when the operation was added. - /// - /// # Panics - /// - /// Panics if an incorrect [`Receiver`] reference is passed. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_channel::{bounded, Select, RecvError}; - /// - /// let (s, r) = bounded::(0); - /// drop(s); - /// - /// let mut sel = Select::new(); - /// let oper1 = sel.recv(&r); - /// - /// let oper = sel.select(); - /// assert_eq!(oper.index(), oper1); - /// assert_eq!(oper.recv(&r), Err(RecvError)); - /// ``` - pub fn recv(mut self, r: &Receiver) -> Result { - assert!( - r as *const Receiver as *const u8 == self.ptr, - "passed a receiver that wasn't selected", - ); - let res = unsafe { channel::read(r, &mut self.token) }; - mem::forget(self); - res.map_err(|_| RecvError) - } -} - -impl fmt::Debug for SelectedOperation<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("SelectedOperation { .. }") - } -} - -impl Drop for SelectedOperation<'_> { - fn drop(&mut self) { - panic!("dropped `SelectedOperation` without completing the operation"); - } -} diff --git a/crossbeam-channel/src/select_macro.rs b/crossbeam-channel/src/select_macro.rs deleted file mode 100644 index 7ab3f2325..000000000 --- a/crossbeam-channel/src/select_macro.rs +++ /dev/null @@ -1,1154 +0,0 @@ -//! The `select!` macro. - -/// A helper macro for `select!` to hide the long list of macro patterns from the documentation. -/// -/// The macro consists of two stages: -/// 1. Parsing -/// 2. Code generation -/// -/// The parsing stage consists of these subparts: -/// 1. `@list`: Turns a list of tokens into a list of cases. -/// 2. `@list_errorN`: Diagnoses the syntax error. -/// 3. `@case`: Parses a single case and verifies its argument list. -/// -/// The codegen stage consists of these subparts: -/// 1. `@init`: Attempts to optimize `select!` away and initializes the list of handles. -/// 1. `@count`: Counts the listed cases. -/// 3. `@add`: Adds send/receive operations to the list of handles and starts selection. -/// 4. `@complete`: Completes the selected send/receive operation. -/// -/// If the parsing stage encounters a syntax error or the codegen stage ends up with too many -/// cases to process, the macro fails with a compile-time error. -#[doc(hidden)] -#[macro_export] -macro_rules! crossbeam_channel_internal { - // The list is empty. Now check the arguments of each processed case. - (@list - () - ($($head:tt)*) - ) => { - $crate::crossbeam_channel_internal!( - @case - ($($head)*) - () - () - ) - }; - // If necessary, insert an empty argument list after `default`. - (@list - (default => $($tail:tt)*) - ($($head:tt)*) - ) => { - $crate::crossbeam_channel_internal!( - @list - (default() => $($tail)*) - ($($head)*) - ) - }; - // But print an error if `default` is followed by a `->`. - (@list - (default -> $($tail:tt)*) - ($($head:tt)*) - ) => { - compile_error!( - "expected `=>` after `default` case, found `->`" - ) - }; - // Print an error if there's an `->` after the argument list in the default case. - (@list - (default $args:tt -> $($tail:tt)*) - ($($head:tt)*) - ) => { - compile_error!( - "expected `=>` after `default` case, found `->`" - ) - }; - // Print an error if there is a missing result in a recv case. - (@list - (recv($($args:tt)*) => $($tail:tt)*) - ($($head:tt)*) - ) => { - compile_error!( - "expected `->` after `recv` case, found `=>`" - ) - }; - // Print an error if there is a missing result in a send case. - (@list - (send($($args:tt)*) => $($tail:tt)*) - ($($head:tt)*) - ) => { - compile_error!( - "expected `->` after `send` operation, found `=>`" - ) - }; - // Make sure the arrow and the result are not repeated. - (@list - ($case:ident $args:tt -> $res:tt -> $($tail:tt)*) - ($($head:tt)*) - ) => { - compile_error!("expected `=>`, found `->`") - }; - // Print an error if there is a semicolon after the block. - (@list - ($case:ident $args:tt $(-> $res:pat)* => $body:block; $($tail:tt)*) - ($($head:tt)*) - ) => { - compile_error!( - "did you mean to put a comma instead of the semicolon after `}`?" - ) - }; - // The first case is separated by a comma. - (@list - ($case:ident ($($args:tt)*) $(-> $res:pat)* => $body:expr, $($tail:tt)*) - ($($head:tt)*) - ) => { - $crate::crossbeam_channel_internal!( - @list - ($($tail)*) - ($($head)* $case ($($args)*) $(-> $res)* => { $body },) - ) - }; - // Don't require a comma after the case if it has a proper block. - (@list - ($case:ident ($($args:tt)*) $(-> $res:pat)* => $body:block $($tail:tt)*) - ($($head:tt)*) - ) => { - $crate::crossbeam_channel_internal!( - @list - ($($tail)*) - ($($head)* $case ($($args)*) $(-> $res)* => { $body },) - ) - }; - // Only one case remains. - (@list - ($case:ident ($($args:tt)*) $(-> $res:pat)* => $body:expr $(,)?) - ($($head:tt)*) - ) => { - $crate::crossbeam_channel_internal!( - @list - () - ($($head)* $case ($($args)*) $(-> $res)* => { $body },) - ) - }; - // Diagnose and print an error. - (@list - ($($tail:tt)*) - ($($head:tt)*) - ) => { - $crate::crossbeam_channel_internal!(@list_error1 $($tail)*) - }; - // Stage 1: check the case type. - (@list_error1 recv $($tail:tt)*) => { - $crate::crossbeam_channel_internal!(@list_error2 recv $($tail)*) - }; - (@list_error1 send $($tail:tt)*) => { - $crate::crossbeam_channel_internal!(@list_error2 send $($tail)*) - }; - (@list_error1 default $($tail:tt)*) => { - $crate::crossbeam_channel_internal!(@list_error2 default $($tail)*) - }; - (@list_error1 $t:tt $($tail:tt)*) => { - compile_error!( - concat!( - "expected one of `recv`, `send`, or `default`, found `", - stringify!($t), - "`", - ) - ) - }; - (@list_error1 $($tail:tt)*) => { - $crate::crossbeam_channel_internal!(@list_error2 $($tail)*); - }; - // Stage 2: check the argument list. - (@list_error2 $case:ident) => { - compile_error!( - concat!( - "missing argument list after `", - stringify!($case), - "`", - ) - ) - }; - (@list_error2 $case:ident => $($tail:tt)*) => { - compile_error!( - concat!( - "missing argument list after `", - stringify!($case), - "`", - ) - ) - }; - (@list_error2 $($tail:tt)*) => { - $crate::crossbeam_channel_internal!(@list_error3 $($tail)*) - }; - // Stage 3: check the `=>` and what comes after it. - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)*) => { - compile_error!( - concat!( - "missing `=>` after `", - stringify!($case), - "` case", - ) - ) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* =>) => { - compile_error!( - "expected expression after `=>`" - ) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $body:expr; $($tail:tt)*) => { - compile_error!( - concat!( - "did you mean to put a comma instead of the semicolon after `", - stringify!($body), - "`?", - ) - ) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => recv($($a:tt)*) $($tail:tt)*) => { - compile_error!( - "expected an expression after `=>`" - ) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => send($($a:tt)*) $($tail:tt)*) => { - compile_error!( - "expected an expression after `=>`" - ) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => default($($a:tt)*) $($tail:tt)*) => { - compile_error!( - "expected an expression after `=>`" - ) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $f:ident($($a:tt)*) $($tail:tt)*) => { - compile_error!( - concat!( - "did you mean to put a comma after `", - stringify!($f), - "(", - stringify!($($a)*), - ")`?", - ) - ) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $f:ident!($($a:tt)*) $($tail:tt)*) => { - compile_error!( - concat!( - "did you mean to put a comma after `", - stringify!($f), - "!(", - stringify!($($a)*), - ")`?", - ) - ) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $f:ident![$($a:tt)*] $($tail:tt)*) => { - compile_error!( - concat!( - "did you mean to put a comma after `", - stringify!($f), - "![", - stringify!($($a)*), - "]`?", - ) - ) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $f:ident!{$($a:tt)*} $($tail:tt)*) => { - compile_error!( - concat!( - "did you mean to put a comma after `", - stringify!($f), - "!{", - stringify!($($a)*), - "}`?", - ) - ) - }; - (@list_error3 $case:ident($($args:tt)*) $(-> $r:pat)* => $body:tt $($tail:tt)*) => { - compile_error!( - concat!( - "did you mean to put a comma after `", - stringify!($body), - "`?", - ) - ) - }; - (@list_error3 $case:ident($($args:tt)*) -> => $($tail:tt)*) => { - compile_error!("missing pattern after `->`") - }; - (@list_error3 $case:ident($($args:tt)*) $t:tt $(-> $r:pat)* => $($tail:tt)*) => { - compile_error!( - concat!( - "expected `->`, found `", - stringify!($t), - "`", - ) - ) - }; - (@list_error3 $case:ident($($args:tt)*) -> $t:tt $($tail:tt)*) => { - compile_error!( - concat!( - "expected a pattern, found `", - stringify!($t), - "`", - ) - ) - }; - (@list_error3 recv($($args:tt)*) $t:tt $($tail:tt)*) => { - compile_error!( - concat!( - "expected `->`, found `", - stringify!($t), - "`", - ) - ) - }; - (@list_error3 send($($args:tt)*) $t:tt $($tail:tt)*) => { - compile_error!( - concat!( - "expected `->`, found `", - stringify!($t), - "`", - ) - ) - }; - (@list_error3 recv $args:tt $($tail:tt)*) => { - compile_error!( - concat!( - "expected an argument list after `recv`, found `", - stringify!($args), - "`", - ) - ) - }; - (@list_error3 send $args:tt $($tail:tt)*) => { - compile_error!( - concat!( - "expected an argument list after `send`, found `", - stringify!($args), - "`", - ) - ) - }; - (@list_error3 default $args:tt $($tail:tt)*) => { - compile_error!( - concat!( - "expected an argument list or `=>` after `default`, found `", - stringify!($args), - "`", - ) - ) - }; - (@list_error3 $($tail:tt)*) => { - $crate::crossbeam_channel_internal!(@list_error4 $($tail)*) - }; - // Stage 4: fail with a generic error message. - (@list_error4 $($tail:tt)*) => { - compile_error!("invalid syntax") - }; - - // Success! All cases were parsed. - (@case - () - $cases:tt - $default:tt - ) => { - $crate::crossbeam_channel_internal!( - @init - $cases - $default - ) - }; - - // Check the format of a recv case. - (@case - (recv($r:expr $(,)?) -> $res:pat => $body:tt, $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - $crate::crossbeam_channel_internal!( - @case - ($($tail)*) - ($($cases)* recv($r) -> $res => $body,) - $default - ) - }; - // Print an error if the argument list is invalid. - (@case - (recv($($args:tt)*) -> $res:pat => $body:tt, $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - compile_error!( - concat!( - "invalid argument list in `recv(", - stringify!($($args)*), - ")`", - ) - ) - }; - // Print an error if there is no argument list. - (@case - (recv $t:tt $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - compile_error!( - concat!( - "expected an argument list after `recv`, found `", - stringify!($t), - "`", - ) - ) - }; - - // Check the format of a send case. - (@case - (send($s:expr, $m:expr $(,)?) -> $res:pat => $body:tt, $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - $crate::crossbeam_channel_internal!( - @case - ($($tail)*) - ($($cases)* send($s, $m) -> $res => $body,) - $default - ) - }; - // Print an error if the argument list is invalid. - (@case - (send($($args:tt)*) -> $res:pat => $body:tt, $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - compile_error!( - concat!( - "invalid argument list in `send(", - stringify!($($args)*), - ")`", - ) - ) - }; - // Print an error if there is no argument list. - (@case - (send $t:tt $($tail:tt)*) - ($($cases:tt)*) - $default:tt - ) => { - compile_error!( - concat!( - "expected an argument list after `send`, found `", - stringify!($t), - "`", - ) - ) - }; - - // Check the format of a default case. - (@case - (default() => $body:tt, $($tail:tt)*) - $cases:tt - () - ) => { - $crate::crossbeam_channel_internal!( - @case - ($($tail)*) - $cases - (default() => $body,) - ) - }; - // Check the format of a default case with timeout. - (@case - (default($timeout:expr $(,)?) => $body:tt, $($tail:tt)*) - $cases:tt - () - ) => { - $crate::crossbeam_channel_internal!( - @case - ($($tail)*) - $cases - (default($timeout) => $body,) - ) - }; - // Check for duplicate default cases... - (@case - (default $($tail:tt)*) - $cases:tt - ($($def:tt)+) - ) => { - compile_error!( - "there can be only one `default` case in a `select!` block" - ) - }; - // Print an error if the argument list is invalid. - (@case - (default($($args:tt)*) => $body:tt, $($tail:tt)*) - $cases:tt - $default:tt - ) => { - compile_error!( - concat!( - "invalid argument list in `default(", - stringify!($($args)*), - ")`", - ) - ) - }; - // Print an error if there is an unexpected token after `default`. - (@case - (default $t:tt $($tail:tt)*) - $cases:tt - $default:tt - ) => { - compile_error!( - concat!( - "expected an argument list or `=>` after `default`, found `", - stringify!($t), - "`", - ) - ) - }; - - // The case was not consumed, therefore it must be invalid. - (@case - ($case:ident $($tail:tt)*) - $cases:tt - $default:tt - ) => { - compile_error!( - concat!( - "expected one of `recv`, `send`, or `default`, found `", - stringify!($case), - "`", - ) - ) - }; - - // Optimize `select!` into `try_recv()`. - (@init - (recv($r:expr) -> $res:pat => $recv_body:tt,) - (default() => $default_body:tt,) - ) => {{ - match $r { - ref _r => { - let _r: &$crate::Receiver<_> = _r; - match _r.try_recv() { - ::std::result::Result::Err($crate::TryRecvError::Empty) => { - $default_body - } - _res => { - let _res = _res.map_err(|_| $crate::RecvError); - let $res = _res; - $recv_body - } - } - } - } - }}; - // Optimize `select!` into `recv()`. - (@init - (recv($r:expr) -> $res:pat => $body:tt,) - () - ) => {{ - match $r { - ref _r => { - let _r: &$crate::Receiver<_> = _r; - let _res = _r.recv(); - let $res = _res; - $body - } - } - }}; - // Optimize `select!` into `recv_timeout()`. - (@init - (recv($r:expr) -> $res:pat => $recv_body:tt,) - (default($timeout:expr) => $default_body:tt,) - ) => {{ - match $r { - ref _r => { - let _r: &$crate::Receiver<_> = _r; - match _r.recv_timeout($timeout) { - ::std::result::Result::Err($crate::RecvTimeoutError::Timeout) => { - $default_body - } - _res => { - let _res = _res.map_err(|_| $crate::RecvError); - let $res = _res; - $recv_body - } - } - } - } - }}; - - // // Optimize the non-blocking case with two receive operations. - // (@init - // (recv($r1:expr) -> $res1:pat => $recv_body1:tt,) - // (recv($r2:expr) -> $res2:pat => $recv_body2:tt,) - // (default() => $default_body:tt,) - // ) => {{ - // match $r1 { - // ref _r1 => { - // let _r1: &$crate::Receiver<_> = _r1; - // - // match $r2 { - // ref _r2 => { - // let _r2: &$crate::Receiver<_> = _r2; - // - // // TODO(stjepang): Implement this optimization. - // } - // } - // } - // } - // }}; - // // Optimize the blocking case with two receive operations. - // (@init - // (recv($r1:expr) -> $res1:pat => $body1:tt,) - // (recv($r2:expr) -> $res2:pat => $body2:tt,) - // () - // ) => {{ - // match $r1 { - // ref _r1 => { - // let _r1: &$crate::Receiver<_> = _r1; - // - // match $r2 { - // ref _r2 => { - // let _r2: &$crate::Receiver<_> = _r2; - // - // // TODO(stjepang): Implement this optimization. - // } - // } - // } - // } - // }}; - // // Optimize the case with two receive operations and a timeout. - // (@init - // (recv($r1:expr) -> $res1:pat => $recv_body1:tt,) - // (recv($r2:expr) -> $res2:pat => $recv_body2:tt,) - // (default($timeout:expr) => $default_body:tt,) - // ) => {{ - // match $r1 { - // ref _r1 => { - // let _r1: &$crate::Receiver<_> = _r1; - // - // match $r2 { - // ref _r2 => { - // let _r2: &$crate::Receiver<_> = _r2; - // - // // TODO(stjepang): Implement this optimization. - // } - // } - // } - // } - // }}; - - // // Optimize `select!` into `try_send()`. - // (@init - // (send($s:expr, $m:expr) -> $res:pat => $send_body:tt,) - // (default() => $default_body:tt,) - // ) => {{ - // match $s { - // ref _s => { - // let _s: &$crate::Sender<_> = _s; - // // TODO(stjepang): Implement this optimization. - // } - // } - // }}; - // // Optimize `select!` into `send()`. - // (@init - // (send($s:expr, $m:expr) -> $res:pat => $body:tt,) - // () - // ) => {{ - // match $s { - // ref _s => { - // let _s: &$crate::Sender<_> = _s; - // // TODO(stjepang): Implement this optimization. - // } - // } - // }}; - // // Optimize `select!` into `send_timeout()`. - // (@init - // (send($s:expr, $m:expr) -> $res:pat => $body:tt,) - // (default($timeout:expr) => $body:tt,) - // ) => {{ - // match $s { - // ref _s => { - // let _s: &$crate::Sender<_> = _s; - // // TODO(stjepang): Implement this optimization. - // } - // } - // }}; - - // Create the list of handles and add operations to it. - (@init - ($($cases:tt)*) - $default:tt - ) => {{ - const _LEN: usize = $crate::crossbeam_channel_internal!(@count ($($cases)*)); - let _handle: &dyn $crate::internal::SelectHandle = &$crate::never::<()>(); - - #[allow(unused_mut, clippy::zero_repeat_side_effects)] - let mut _sel = [(_handle, 0, ::std::ptr::null()); _LEN]; - - $crate::crossbeam_channel_internal!( - @add - _sel - ($($cases)*) - $default - ( - (0usize _oper0) - (1usize _oper1) - (2usize _oper2) - (3usize _oper3) - (4usize _oper4) - (5usize _oper5) - (6usize _oper6) - (7usize _oper7) - (8usize _oper8) - (9usize _oper9) - (10usize _oper10) - (11usize _oper11) - (12usize _oper12) - (13usize _oper13) - (14usize _oper14) - (15usize _oper15) - (16usize _oper16) - (17usize _oper17) - (18usize _oper18) - (19usize _oper19) - (20usize _oper20) - (21usize _oper21) - (22usize _oper22) - (23usize _oper23) - (24usize _oper24) - (25usize _oper25) - (26usize _oper26) - (27usize _oper27) - (28usize _oper28) - (29usize _oper29) - (30usize _oper30) - (31usize _oper31) - ) - () - ) - }}; - - // Count the listed cases. - (@count ()) => { - 0 - }; - (@count ($oper:ident $args:tt -> $res:pat => $body:tt, $($cases:tt)*)) => { - 1 + $crate::crossbeam_channel_internal!(@count ($($cases)*)) - }; - - // Run blocking selection. - (@add - $sel:ident - () - () - $labels:tt - $cases:tt - ) => {{ - let _oper: $crate::SelectedOperation<'_> = { - let _oper = $crate::internal::select(&mut $sel, _IS_BIASED); - - // Erase the lifetime so that `sel` can be dropped early even without NLL. - unsafe { ::std::mem::transmute(_oper) } - }; - - $crate::crossbeam_channel_internal! { - @complete - $sel - _oper - $cases - } - }}; - // Run non-blocking selection. - (@add - $sel:ident - () - (default() => $body:tt,) - $labels:tt - $cases:tt - ) => {{ - let _oper: ::std::option::Option<$crate::SelectedOperation<'_>> = { - let _oper = $crate::internal::try_select(&mut $sel, _IS_BIASED); - - // Erase the lifetime so that `sel` can be dropped early even without NLL. - unsafe { ::std::mem::transmute(_oper) } - }; - - match _oper { - None => { - { $sel }; - $body - } - Some(_oper) => { - $crate::crossbeam_channel_internal! { - @complete - $sel - _oper - $cases - } - } - } - }}; - // Run selection with a timeout. - (@add - $sel:ident - () - (default($timeout:expr) => $body:tt,) - $labels:tt - $cases:tt - ) => {{ - let _oper: ::std::option::Option<$crate::SelectedOperation<'_>> = { - let _oper = $crate::internal::select_timeout(&mut $sel, $timeout, _IS_BIASED); - - // Erase the lifetime so that `sel` can be dropped early even without NLL. - unsafe { ::std::mem::transmute(_oper) } - }; - - match _oper { - ::std::option::Option::None => { - { $sel }; - $body - } - ::std::option::Option::Some(_oper) => { - $crate::crossbeam_channel_internal! { - @complete - $sel - _oper - $cases - } - } - } - }}; - // Have we used up all labels? - (@add - $sel:ident - $input:tt - $default:tt - () - $cases:tt - ) => { - compile_error!("too many operations in a `select!` block") - }; - // Add a receive operation to `sel`. - (@add - $sel:ident - (recv($r:expr) -> $res:pat => $body:tt, $($tail:tt)*) - $default:tt - (($i:tt $var:ident) $($labels:tt)*) - ($($cases:tt)*) - ) => {{ - match $r { - ref _r => { - let $var: &$crate::Receiver<_> = unsafe { - let _r: &$crate::Receiver<_> = _r; - - // Erase the lifetime so that `sel` can be dropped early even without NLL. - unsafe fn unbind<'a, T>(x: &T) -> &'a T { - ::std::mem::transmute(x) - } - unbind(_r) - }; - $sel[$i] = ($var, $i, $var as *const $crate::Receiver<_> as *const u8); - - $crate::crossbeam_channel_internal!( - @add - $sel - ($($tail)*) - $default - ($($labels)*) - ($($cases)* [$i] recv($var) -> $res => $body,) - ) - } - } - }}; - // Add a send operation to `sel`. - (@add - $sel:ident - (send($s:expr, $m:expr) -> $res:pat => $body:tt, $($tail:tt)*) - $default:tt - (($i:tt $var:ident) $($labels:tt)*) - ($($cases:tt)*) - ) => {{ - match $s { - ref _s => { - let $var: &$crate::Sender<_> = unsafe { - let _s: &$crate::Sender<_> = _s; - - // Erase the lifetime so that `sel` can be dropped early even without NLL. - unsafe fn unbind<'a, T>(x: &T) -> &'a T { - ::std::mem::transmute(x) - } - unbind(_s) - }; - $sel[$i] = ($var, $i, $var as *const $crate::Sender<_> as *const u8); - - $crate::crossbeam_channel_internal!( - @add - $sel - ($($tail)*) - $default - ($($labels)*) - ($($cases)* [$i] send($var, $m) -> $res => $body,) - ) - } - } - }}; - - // Complete a receive operation. - (@complete - $sel:ident - $oper:ident - ([$i:tt] recv($r:ident) -> $res:pat => $body:tt, $($tail:tt)*) - ) => {{ - if $oper.index() == $i { - let _res = $oper.recv($r); - { $sel }; - - let $res = _res; - $body - } else { - $crate::crossbeam_channel_internal! { - @complete - $sel - $oper - ($($tail)*) - } - } - }}; - // Complete a send operation. - (@complete - $sel:ident - $oper:ident - ([$i:tt] send($s:ident, $m:expr) -> $res:pat => $body:tt, $($tail:tt)*) - ) => {{ - if $oper.index() == $i { - let _res = $oper.send($s, $m); - { $sel }; - - let $res = _res; - $body - } else { - $crate::crossbeam_channel_internal! { - @complete - $sel - $oper - ($($tail)*) - } - } - }}; - // Panic if we don't identify the selected case, but this should never happen. - (@complete - $sel:ident - $oper:ident - () - ) => {{ - unreachable!( - "internal error in crossbeam-channel: invalid case" - ) - }}; - - // Catches a bug within this macro (should not happen). - (@$($tokens:tt)*) => { - compile_error!( - concat!( - "internal error in crossbeam-channel: ", - stringify!(@$($tokens)*), - ) - ) - }; - - // The entry points. - () => { - compile_error!("empty `select!` block") - }; - ($($case:ident $(($($args:tt)*))* => $body:expr $(,)*)*) => { - $crate::crossbeam_channel_internal!( - @list - ($($case $(($($args)*))* => { $body },)*) - () - ) - }; - ($($tokens:tt)*) => { - $crate::crossbeam_channel_internal!( - @list - ($($tokens)*) - () - ) - }; -} - -/// Selects from a set of channel operations. -/// -/// This macro allows you to define a set of channel operations, wait until any one of them becomes -/// ready, and finally execute it. If multiple operations are ready at the same time, a random one -/// among them is selected (i.e. the unbiased selection). Use `select_biased!` for the biased -/// selection. -/// -/// It is also possible to define a `default` case that gets executed if none of the operations are -/// ready, either right away or for a certain duration of time. -/// -/// An operation is considered to be ready if it doesn't have to block. Note that it is ready even -/// when it will simply return an error because the channel is disconnected. -/// -/// The `select!` macro is a convenience wrapper around [`Select`]. However, it cannot select over a -/// dynamically created list of channel operations. -/// -/// [`Select`]: super::Select -/// -/// # Examples -/// -/// Block until a send or a receive operation is selected: -/// -/// ``` -/// use crossbeam_channel::{select, unbounded}; -/// -/// let (s1, r1) = unbounded(); -/// let (s2, r2) = unbounded(); -/// s1.send(10).unwrap(); -/// -/// // Since both operations are initially ready, a random one will be executed. -/// select! { -/// recv(r1) -> msg => assert_eq!(msg, Ok(10)), -/// send(s2, 20) -> res => { -/// assert_eq!(res, Ok(())); -/// assert_eq!(r2.recv(), Ok(20)); -/// } -/// } -/// ``` -/// -/// Select from a set of operations without blocking: -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::{select, unbounded}; -/// -/// let (s1, r1) = unbounded(); -/// let (s2, r2) = unbounded(); -/// -/// # let t1 = -/// thread::spawn(move || { -/// thread::sleep(Duration::from_secs(1)); -/// s1.send(10).unwrap(); -/// }); -/// # let t2 = -/// thread::spawn(move || { -/// thread::sleep(Duration::from_millis(500)); -/// s2.send(20).unwrap(); -/// }); -/// -/// // None of the operations are initially ready. -/// select! { -/// recv(r1) -> msg => panic!(), -/// recv(r2) -> msg => panic!(), -/// default => println!("not ready"), -/// } -/// # t1.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 -/// # t2.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 -/// ``` -/// -/// Select over a set of operations with a timeout: -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::{select, unbounded}; -/// -/// let (s1, r1) = unbounded(); -/// let (s2, r2) = unbounded(); -/// -/// # let t1 = -/// thread::spawn(move || { -/// thread::sleep(Duration::from_secs(1)); -/// s1.send(10).unwrap(); -/// }); -/// # let t2 = -/// thread::spawn(move || { -/// thread::sleep(Duration::from_millis(500)); -/// s2.send(20).unwrap(); -/// }); -/// -/// // None of the two operations will become ready within 100 milliseconds. -/// select! { -/// recv(r1) -> msg => panic!(), -/// recv(r2) -> msg => panic!(), -/// default(Duration::from_millis(100)) => println!("timed out"), -/// } -/// # t1.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 -/// # t2.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 -/// ``` -/// -/// Optionally add a receive operation to `select!` using [`never`]: -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_channel::{select, never, unbounded}; -/// -/// let (s1, r1) = unbounded(); -/// let (s2, r2) = unbounded(); -/// -/// # let t1 = -/// thread::spawn(move || { -/// thread::sleep(Duration::from_secs(1)); -/// s1.send(10).unwrap(); -/// }); -/// # let t2 = -/// thread::spawn(move || { -/// thread::sleep(Duration::from_millis(500)); -/// s2.send(20).unwrap(); -/// }); -/// -/// // This receiver can be a `Some` or a `None`. -/// let r2 = Some(&r2); -/// -/// // None of the two operations will become ready within 100 milliseconds. -/// select! { -/// recv(r1) -> msg => panic!(), -/// recv(r2.unwrap_or(&never())) -> msg => assert_eq!(msg, Ok(20)), -/// } -/// # t1.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 -/// # t2.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 -/// ``` -/// -/// To optionally add a timeout to `select!`, see the [example] for [`never`]. -/// -/// [`never`]: super::never -/// [example]: super::never#examples -#[macro_export] -macro_rules! select { - ($($tokens:tt)*) => { - { - const _IS_BIASED: bool = false; - - $crate::crossbeam_channel_internal!( - $($tokens)* - ) - } - }; -} - -/// Selects from a set of channel operations. -/// -/// This macro allows you to define a list of channel operations, wait until any one of them -/// becomes ready, and finally execute it. If multiple operations are ready at the same time, the -/// operation nearest to the front of the list is always selected (i.e. the biased selection). Use -/// [`select!`] for the unbiased selection. -/// -/// Otherwise, this macro's functionality is identical to [`select!`]. Refer to it for the syntax. -#[macro_export] -macro_rules! select_biased { - ($($tokens:tt)*) => { - { - const _IS_BIASED: bool = true; - - $crate::crossbeam_channel_internal!( - $($tokens)* - ) - } - }; -} diff --git a/crossbeam-channel/src/utils.rs b/crossbeam-channel/src/utils.rs deleted file mode 100644 index a425d96f8..000000000 --- a/crossbeam-channel/src/utils.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! Miscellaneous utilities. - -use std::cell::Cell; -use std::num::Wrapping; -use std::thread; -use std::time::{Duration, Instant}; - -/// Randomly shuffles a slice. -pub(crate) fn shuffle(v: &mut [T]) { - let len = v.len(); - if len <= 1 { - return; - } - - std::thread_local! { - static RNG: Cell> = const { Cell::new(Wrapping(1_406_868_647)) }; - } - - let _ = RNG.try_with(|rng| { - for i in 1..len { - // This is the 32-bit variant of Xorshift. - // - // Source: https://en.wikipedia.org/wiki/Xorshift - let mut x = rng.get(); - x ^= x << 13; - x ^= x >> 17; - x ^= x << 5; - rng.set(x); - - let x = x.0; - let n = i + 1; - - // This is a fast alternative to `let j = x % n`. - // - // Author: Daniel Lemire - // Source: https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ - let j = ((x as u64).wrapping_mul(n as u64) >> 32) as u32 as usize; - - v.swap(i, j); - } - }); -} - -/// Sleeps until the deadline, or forever if the deadline isn't specified. -pub(crate) fn sleep_until(deadline: Option) { - loop { - match deadline { - None => thread::sleep(Duration::from_secs(1000)), - Some(d) => { - let now = Instant::now(); - if now >= d { - break; - } - thread::sleep(d - now); - } - } - } -} diff --git a/crossbeam-channel/src/waker.rs b/crossbeam-channel/src/waker.rs deleted file mode 100644 index 7a88c8fdc..000000000 --- a/crossbeam-channel/src/waker.rs +++ /dev/null @@ -1,287 +0,0 @@ -//! Waking mechanism for threads blocked on channel operations. - -use std::ptr; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Mutex; -use std::thread::{self, ThreadId}; -use std::vec::Vec; - -use crate::context::Context; -use crate::select::{Operation, Selected}; - -/// Represents a thread blocked on a specific channel operation. -pub(crate) struct Entry { - /// The operation. - pub(crate) oper: Operation, - - /// Optional packet. - pub(crate) packet: *mut (), - - /// Context associated with the thread owning this operation. - pub(crate) cx: Context, -} - -/// A queue of threads blocked on channel operations. -/// -/// This data structure is used by threads to register blocking operations and get woken up once -/// an operation becomes ready. -pub(crate) struct Waker { - /// A list of select operations. - selectors: Vec, - - /// A list of operations waiting to be ready. - observers: Vec, -} - -impl Waker { - /// Creates a new `Waker`. - #[inline] - pub(crate) fn new() -> Self { - Self { - selectors: Vec::new(), - observers: Vec::new(), - } - } - - /// Registers a select operation. - #[inline] - pub(crate) fn register(&mut self, oper: Operation, cx: &Context) { - self.register_with_packet(oper, ptr::null_mut(), cx); - } - - /// Registers a select operation and a packet. - #[inline] - pub(crate) fn register_with_packet(&mut self, oper: Operation, packet: *mut (), cx: &Context) { - self.selectors.push(Entry { - oper, - packet, - cx: cx.clone(), - }); - } - - /// Unregisters a select operation. - #[inline] - pub(crate) fn unregister(&mut self, oper: Operation) -> Option { - if let Some((i, _)) = self - .selectors - .iter() - .enumerate() - .find(|&(_, entry)| entry.oper == oper) - { - let entry = self.selectors.remove(i); - Some(entry) - } else { - None - } - } - - /// Attempts to find another thread's entry, select the operation, and wake it up. - #[inline] - pub(crate) fn try_select(&mut self) -> Option { - if self.selectors.is_empty() { - None - } else { - let thread_id = current_thread_id(); - - self.selectors - .iter() - .position(|selector| { - // Does the entry belong to a different thread? - selector.cx.thread_id() != thread_id - && selector // Try selecting this operation. - .cx - .try_select(Selected::Operation(selector.oper)) - .is_ok() - && { - // Provide the packet. - selector.cx.store_packet(selector.packet); - // Wake the thread up. - selector.cx.unpark(); - true - } - }) - // Remove the entry from the queue to keep it clean and improve - // performance. - .map(|pos| self.selectors.remove(pos)) - } - } - - /// Returns `true` if there is an entry which can be selected by the current thread. - #[inline] - pub(crate) fn can_select(&self) -> bool { - if self.selectors.is_empty() { - false - } else { - let thread_id = current_thread_id(); - - self.selectors.iter().any(|entry| { - entry.cx.thread_id() != thread_id && entry.cx.selected() == Selected::Waiting - }) - } - } - - /// Registers an operation waiting to be ready. - #[inline] - pub(crate) fn watch(&mut self, oper: Operation, cx: &Context) { - self.observers.push(Entry { - oper, - packet: ptr::null_mut(), - cx: cx.clone(), - }); - } - - /// Unregisters an operation waiting to be ready. - #[inline] - pub(crate) fn unwatch(&mut self, oper: Operation) { - self.observers.retain(|e| e.oper != oper); - } - - /// Notifies all operations waiting to be ready. - #[inline] - pub(crate) fn notify(&mut self) { - for entry in self.observers.drain(..) { - if entry.cx.try_select(Selected::Operation(entry.oper)).is_ok() { - entry.cx.unpark(); - } - } - } - - /// Notifies all registered operations that the channel is disconnected. - #[inline] - pub(crate) fn disconnect(&mut self) { - for entry in self.selectors.iter() { - if entry.cx.try_select(Selected::Disconnected).is_ok() { - // Wake the thread up. - // - // Here we don't remove the entry from the queue. Registered threads must - // unregister from the waker by themselves. They might also want to recover the - // packet value and destroy it, if necessary. - entry.cx.unpark(); - } - } - - self.notify(); - } -} - -impl Drop for Waker { - #[inline] - fn drop(&mut self) { - debug_assert_eq!(self.selectors.len(), 0); - debug_assert_eq!(self.observers.len(), 0); - } -} - -/// A waker that can be shared among threads without locking. -/// -/// This is a simple wrapper around `Waker` that internally uses a mutex for synchronization. -pub(crate) struct SyncWaker { - /// The inner `Waker`. - inner: Mutex, - - /// `true` if the waker is empty. - is_empty: AtomicBool, -} - -impl SyncWaker { - /// Creates a new `SyncWaker`. - #[inline] - pub(crate) fn new() -> Self { - Self { - inner: Mutex::new(Waker::new()), - is_empty: AtomicBool::new(true), - } - } - - /// Registers the current thread with an operation. - #[inline] - pub(crate) fn register(&self, oper: Operation, cx: &Context) { - let mut inner = self.inner.lock().unwrap(); - inner.register(oper, cx); - self.is_empty.store( - inner.selectors.is_empty() && inner.observers.is_empty(), - Ordering::SeqCst, - ); - } - - /// Unregisters an operation previously registered by the current thread. - #[inline] - pub(crate) fn unregister(&self, oper: Operation) -> Option { - let mut inner = self.inner.lock().unwrap(); - let entry = inner.unregister(oper); - self.is_empty.store( - inner.selectors.is_empty() && inner.observers.is_empty(), - Ordering::SeqCst, - ); - entry - } - - /// Attempts to find one thread (not the current one), select its operation, and wake it up. - #[inline] - pub(crate) fn notify(&self) { - if !self.is_empty.load(Ordering::SeqCst) { - let mut inner = self.inner.lock().unwrap(); - if !self.is_empty.load(Ordering::SeqCst) { - inner.try_select(); - inner.notify(); - self.is_empty.store( - inner.selectors.is_empty() && inner.observers.is_empty(), - Ordering::SeqCst, - ); - } - } - } - - /// Registers an operation waiting to be ready. - #[inline] - pub(crate) fn watch(&self, oper: Operation, cx: &Context) { - let mut inner = self.inner.lock().unwrap(); - inner.watch(oper, cx); - self.is_empty.store( - inner.selectors.is_empty() && inner.observers.is_empty(), - Ordering::SeqCst, - ); - } - - /// Unregisters an operation waiting to be ready. - #[inline] - pub(crate) fn unwatch(&self, oper: Operation) { - let mut inner = self.inner.lock().unwrap(); - inner.unwatch(oper); - self.is_empty.store( - inner.selectors.is_empty() && inner.observers.is_empty(), - Ordering::SeqCst, - ); - } - - /// Notifies all threads that the channel is disconnected. - #[inline] - pub(crate) fn disconnect(&self) { - let mut inner = self.inner.lock().unwrap(); - inner.disconnect(); - self.is_empty.store( - inner.selectors.is_empty() && inner.observers.is_empty(), - Ordering::SeqCst, - ); - } -} - -impl Drop for SyncWaker { - #[inline] - fn drop(&mut self) { - debug_assert!(self.is_empty.load(Ordering::SeqCst)); - } -} - -/// Returns the id of the current thread. -#[inline] -fn current_thread_id() -> ThreadId { - std::thread_local! { - /// Cached thread-local id. - static THREAD_ID: ThreadId = thread::current().id(); - } - - THREAD_ID - .try_with(|id| *id) - .unwrap_or_else(|_| thread::current().id()) -} diff --git a/crossbeam-channel/tests/after.rs b/crossbeam-channel/tests/after.rs deleted file mode 100644 index 678a8c679..000000000 --- a/crossbeam-channel/tests/after.rs +++ /dev/null @@ -1,336 +0,0 @@ -//! Tests for the after channel flavor. - -#![cfg(not(miri))] // TODO: many assertions failed due to Miri is slow - -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{after, select, Select, TryRecvError}; -use crossbeam_utils::thread::scope; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn fire() { - let start = Instant::now(); - let r = after(ms(50)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - thread::sleep(ms(100)); - - let fired = r.try_recv().unwrap(); - assert!(start < fired); - assert!(fired - start >= ms(50)); - - let now = Instant::now(); - assert!(fired < now); - assert!(now - fired >= ms(50)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - - select! { - recv(r) -> _ => panic!(), - default => {} - } - - select! { - recv(r) -> _ => panic!(), - recv(after(ms(200))) -> _ => {} - } -} - -#[test] -fn capacity() { - const COUNT: usize = 10; - - for i in 0..COUNT { - let r = after(ms(i as u64)); - assert_eq!(r.capacity(), Some(1)); - } -} - -#[test] -fn len_empty_full() { - let r = after(ms(50)); - - assert_eq!(r.len(), 0); - assert!(r.is_empty()); - assert!(!r.is_full()); - - thread::sleep(ms(100)); - - assert_eq!(r.len(), 1); - assert!(!r.is_empty()); - assert!(r.is_full()); - - r.try_recv().unwrap(); - - assert_eq!(r.len(), 0); - assert!(r.is_empty()); - assert!(!r.is_full()); -} - -#[test] -fn try_recv() { - let r = after(ms(200)); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(100)); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(200)); - assert!(r.try_recv().is_ok()); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(200)); - assert!(r.try_recv().is_err()); -} - -#[test] -fn recv() { - let start = Instant::now(); - let r = after(ms(50)); - - let fired = r.recv().unwrap(); - assert!(start < fired); - assert!(fired - start >= ms(50)); - - let now = Instant::now(); - assert!(fired < now); - assert!(now - fired < fired - start); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn recv_timeout() { - let start = Instant::now(); - let r = after(ms(200)); - - assert!(r.recv_timeout(ms(100)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(100)); - assert!(now - start <= ms(150)); - - let fired = r.recv_timeout(ms(200)).unwrap(); - assert!(fired - start >= ms(200)); - assert!(fired - start <= ms(250)); - - assert!(r.recv_timeout(ms(200)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(400)); - assert!(now - start <= ms(450)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn recv_two() { - let r1 = after(ms(50)); - let r2 = after(ms(50)); - - scope(|scope| { - scope.spawn(|_| { - select! { - recv(r1) -> _ => {} - recv(r2) -> _ => {} - } - }); - scope.spawn(|_| { - select! { - recv(r1) -> _ => {} - recv(r2) -> _ => {} - } - }); - }) - .unwrap(); -} - -#[test] -fn recv_race() { - select! { - recv(after(ms(50))) -> _ => {} - recv(after(ms(100))) -> _ => panic!(), - } - - select! { - recv(after(ms(100))) -> _ => panic!(), - recv(after(ms(50))) -> _ => {} - } -} - -#[test] -fn stress_default() { - const COUNT: usize = 10; - - for _ in 0..COUNT { - select! { - recv(after(ms(0))) -> _ => {} - default => panic!(), - } - } - - for _ in 0..COUNT { - select! { - recv(after(ms(100))) -> _ => panic!(), - default => {} - } - } -} - -#[test] -fn select() { - const THREADS: usize = 4; - const COUNT: usize = 1000; - const TIMEOUT_MS: u64 = 100; - - let v = (0..COUNT) - .map(|i| after(ms(i as u64 / TIMEOUT_MS / 2))) - .collect::>(); - let hits = AtomicUsize::new(0); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - let v: Vec<&_> = v.iter().collect(); - - loop { - let timeout = after(ms(TIMEOUT_MS)); - let mut sel = Select::new(); - for r in &v { - sel.recv(r); - } - let oper_timeout = sel.recv(&timeout); - - let oper = sel.select(); - match oper.index() { - i if i == oper_timeout => { - oper.recv(&timeout).unwrap(); - break; - } - i => { - oper.recv(v[i]).unwrap(); - hits.fetch_add(1, Ordering::SeqCst); - } - } - } - }); - } - }) - .unwrap(); - - assert_eq!(hits.load(Ordering::SeqCst), COUNT); -} - -#[test] -fn ready() { - const THREADS: usize = 4; - const COUNT: usize = 1000; - const TIMEOUT_MS: u64 = 100; - - let v = (0..COUNT) - .map(|i| after(ms(i as u64 / TIMEOUT_MS / 2))) - .collect::>(); - let hits = AtomicUsize::new(0); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - let v: Vec<&_> = v.iter().collect(); - - loop { - let timeout = after(ms(TIMEOUT_MS)); - let mut sel = Select::new(); - for r in &v { - sel.recv(r); - } - let oper_timeout = sel.recv(&timeout); - - loop { - let i = sel.ready(); - if i == oper_timeout { - timeout.try_recv().unwrap(); - return; - } else if v[i].try_recv().is_ok() { - hits.fetch_add(1, Ordering::SeqCst); - break; - } - } - } - }); - } - }) - .unwrap(); - - assert_eq!(hits.load(Ordering::SeqCst), COUNT); -} - -#[test] -fn stress_clone() { - const RUNS: usize = 1000; - const THREADS: usize = 10; - const COUNT: usize = 50; - - for i in 0..RUNS { - let r = after(ms(i as u64)); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - let r = r.clone(); - let _ = r.try_recv(); - - for _ in 0..COUNT { - drop(r.clone()); - thread::yield_now(); - } - }); - } - }) - .unwrap(); - } -} - -#[test] -fn fairness() { - const COUNT: usize = 1000; - - for &dur in &[0, 1] { - let mut hits = [0usize; 2]; - - for _ in 0..COUNT { - select! { - recv(after(ms(dur))) -> _ => hits[0] += 1, - recv(after(ms(dur))) -> _ => hits[1] += 1, - } - } - - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - } -} - -#[test] -fn fairness_duplicates() { - const COUNT: usize = 1000; - - for &dur in &[0, 1] { - let mut hits = [0usize; 5]; - - for _ in 0..COUNT { - let r = after(ms(dur)); - select! { - recv(r) -> _ => hits[0] += 1, - recv(r) -> _ => hits[1] += 1, - recv(r) -> _ => hits[2] += 1, - recv(r) -> _ => hits[3] += 1, - recv(r) -> _ => hits[4] += 1, - } - } - - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - } -} diff --git a/crossbeam-channel/tests/array.rs b/crossbeam-channel/tests/array.rs deleted file mode 100644 index 486f56a78..000000000 --- a/crossbeam-channel/tests/array.rs +++ /dev/null @@ -1,744 +0,0 @@ -//! Tests for the array channel flavor. - -use std::any::Any; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::thread; -use std::time::Duration; - -use crossbeam_channel::{bounded, select, Receiver}; -use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError}; -use crossbeam_channel::{SendError, SendTimeoutError, TrySendError}; -use crossbeam_utils::thread::scope; -use rand::{thread_rng, Rng}; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke() { - let (s, r) = bounded(1); - s.send(7).unwrap(); - assert_eq!(r.try_recv(), Ok(7)); - - s.send(8).unwrap(); - assert_eq!(r.recv(), Ok(8)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); -} - -#[test] -fn capacity() { - for i in 1..10 { - let (s, r) = bounded::<()>(i); - assert_eq!(s.capacity(), Some(i)); - assert_eq!(r.capacity(), Some(i)); - } -} - -#[test] -fn len_empty_full() { - let (s, r) = bounded(2); - - assert_eq!(s.len(), 0); - assert!(s.is_empty()); - assert!(!s.is_full()); - assert_eq!(r.len(), 0); - assert!(r.is_empty()); - assert!(!r.is_full()); - - s.send(()).unwrap(); - - assert_eq!(s.len(), 1); - assert!(!s.is_empty()); - assert!(!s.is_full()); - assert_eq!(r.len(), 1); - assert!(!r.is_empty()); - assert!(!r.is_full()); - - s.send(()).unwrap(); - - assert_eq!(s.len(), 2); - assert!(!s.is_empty()); - assert!(s.is_full()); - assert_eq!(r.len(), 2); - assert!(!r.is_empty()); - assert!(r.is_full()); - - r.recv().unwrap(); - - assert_eq!(s.len(), 1); - assert!(!s.is_empty()); - assert!(!s.is_full()); - assert_eq!(r.len(), 1); - assert!(!r.is_empty()); - assert!(!r.is_full()); -} - -#[test] -fn try_recv() { - let (s, r) = bounded(100); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - thread::sleep(ms(1500)); - assert_eq!(r.try_recv(), Ok(7)); - thread::sleep(ms(500)); - assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - s.send(7).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn recv() { - let (s, r) = bounded(100); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv(), Ok(7)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(8)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(9)); - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - s.send(7).unwrap(); - s.send(8).unwrap(); - s.send(9).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn recv_timeout() { - let (s, r) = bounded::(100); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); - assert_eq!(r.recv_timeout(ms(1000)), Ok(7)); - assert_eq!( - r.recv_timeout(ms(1000)), - Err(RecvTimeoutError::Disconnected) - ); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - s.send(7).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn try_send() { - let (s, r) = bounded(1); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(s.try_send(1), Ok(())); - assert_eq!(s.try_send(2), Err(TrySendError::Full(2))); - thread::sleep(ms(1500)); - assert_eq!(s.try_send(3), Ok(())); - thread::sleep(ms(500)); - assert_eq!(s.try_send(4), Err(TrySendError::Disconnected(4))); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - assert_eq!(r.try_recv(), Ok(1)); - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - assert_eq!(r.recv(), Ok(3)); - }); - }) - .unwrap(); -} - -#[test] -fn send() { - let (s, r) = bounded(1); - - scope(|scope| { - scope.spawn(|_| { - s.send(7).unwrap(); - thread::sleep(ms(1000)); - s.send(8).unwrap(); - thread::sleep(ms(1000)); - s.send(9).unwrap(); - thread::sleep(ms(1000)); - s.send(10).unwrap(); - }); - scope.spawn(|_| { - thread::sleep(ms(1500)); - assert_eq!(r.recv(), Ok(7)); - assert_eq!(r.recv(), Ok(8)); - assert_eq!(r.recv(), Ok(9)); - }); - }) - .unwrap(); -} - -#[test] -fn send_timeout() { - let (s, r) = bounded(2); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(s.send_timeout(1, ms(1000)), Ok(())); - assert_eq!(s.send_timeout(2, ms(1000)), Ok(())); - assert_eq!( - s.send_timeout(3, ms(500)), - Err(SendTimeoutError::Timeout(3)) - ); - thread::sleep(ms(1000)); - assert_eq!(s.send_timeout(4, ms(1000)), Ok(())); - thread::sleep(ms(1000)); - assert_eq!(s.send(5), Err(SendError(5))); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(1)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(2)); - assert_eq!(r.recv(), Ok(4)); - }); - }) - .unwrap(); -} - -#[test] -fn send_after_disconnect() { - let (s, r) = bounded(100); - - s.send(1).unwrap(); - s.send(2).unwrap(); - s.send(3).unwrap(); - - drop(r); - - assert_eq!(s.send(4), Err(SendError(4))); - assert_eq!(s.try_send(5), Err(TrySendError::Disconnected(5))); - assert_eq!( - s.send_timeout(6, ms(500)), - Err(SendTimeoutError::Disconnected(6)) - ); -} - -#[test] -fn recv_after_disconnect() { - let (s, r) = bounded(100); - - s.send(1).unwrap(); - s.send(2).unwrap(); - s.send(3).unwrap(); - - drop(s); - - assert_eq!(r.recv(), Ok(1)); - assert_eq!(r.recv(), Ok(2)); - assert_eq!(r.recv(), Ok(3)); - assert_eq!(r.recv(), Err(RecvError)); -} - -#[test] -fn len() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - #[cfg(miri)] - const CAP: usize = 50; - #[cfg(not(miri))] - const CAP: usize = 1000; - - let (s, r) = bounded(CAP); - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); - - for _ in 0..CAP / 10 { - for i in 0..50 { - s.send(i).unwrap(); - assert_eq!(s.len(), i + 1); - } - - for i in 0..50 { - r.recv().unwrap(); - assert_eq!(r.len(), 50 - i - 1); - } - } - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); - - for i in 0..CAP { - s.send(i).unwrap(); - assert_eq!(s.len(), i + 1); - } - - for _ in 0..CAP { - r.recv().unwrap(); - } - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - assert_eq!(r.recv(), Ok(i)); - let len = r.len(); - assert!(len <= CAP); - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - s.send(i).unwrap(); - let len = s.len(); - assert!(len <= CAP); - } - }); - }) - .unwrap(); - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); -} - -#[test] -fn disconnect_wakes_sender() { - let (s, r) = bounded(1); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(s.send(()), Ok(())); - assert_eq!(s.send(()), Err(SendError(()))); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(r); - }); - }) - .unwrap(); -} - -#[test] -fn disconnect_wakes_receiver() { - let (s, r) = bounded::<()>(1); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(s); - }); - }) - .unwrap(); -} - -#[test] -fn spsc() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - let (s, r) = bounded(3); - - scope(|scope| { - scope.spawn(move |_| { - for i in 0..COUNT { - assert_eq!(r.recv(), Ok(i)); - } - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - for i in 0..COUNT { - s.send(i).unwrap(); - } - }); - }) - .unwrap(); -} - -#[test] -fn mpmc() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let (s, r) = bounded::(3); - let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - let n = r.recv().unwrap(); - v[n].fetch_add(1, Ordering::SeqCst); - } - }); - } - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..COUNT { - s.send(i).unwrap(); - } - }); - } - }) - .unwrap(); - - for c in v { - assert_eq!(c.load(Ordering::SeqCst), THREADS); - } -} - -#[test] -fn stress_oneshot() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - for _ in 0..COUNT { - let (s, r) = bounded(1); - - scope(|scope| { - scope.spawn(|_| r.recv().unwrap()); - scope.spawn(|_| s.send(0).unwrap()); - }) - .unwrap(); - } -} - -#[test] -fn stress_iter() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - let (request_s, request_r) = bounded(1); - let (response_s, response_r) = bounded(1); - - scope(|scope| { - scope.spawn(move |_| { - let mut count = 0; - loop { - for x in response_r.try_iter() { - count += x; - if count == COUNT { - return; - } - } - request_s.send(()).unwrap(); - } - }); - - for _ in request_r.iter() { - if response_s.send(1).is_err() { - break; - } - } - }) - .unwrap(); -} - -#[test] -fn stress_timeout_two_threads() { - const COUNT: usize = 100; - - let (s, r) = bounded(2); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(50)); - } - loop { - if let Ok(()) = s.send_timeout(i, ms(10)) { - break; - } - } - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(50)); - } - loop { - if let Ok(x) = r.recv_timeout(ms(10)) { - assert_eq!(x, i); - break; - } - } - } - }); - }) - .unwrap(); -} - -#[test] -fn drops() { - #[cfg(miri)] - const RUNS: usize = 10; - #[cfg(not(miri))] - const RUNS: usize = 100; - #[cfg(miri)] - const STEPS: usize = 100; - #[cfg(not(miri))] - const STEPS: usize = 10_000; - - static DROPS: AtomicUsize = AtomicUsize::new(0); - - #[derive(Debug, PartialEq)] - struct DropCounter; - - impl Drop for DropCounter { - fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::SeqCst); - } - } - - let mut rng = thread_rng(); - - for _ in 0..RUNS { - let steps = rng.gen_range(0..STEPS); - let additional = rng.gen_range(0..50); - - DROPS.store(0, Ordering::SeqCst); - let (s, r) = bounded::(50); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..steps { - r.recv().unwrap(); - } - }); - - scope.spawn(|_| { - for _ in 0..steps { - s.send(DropCounter).unwrap(); - } - }); - }) - .unwrap(); - - for _ in 0..additional { - s.send(DropCounter).unwrap(); - } - - assert_eq!(DROPS.load(Ordering::SeqCst), steps); - drop(s); - drop(r); - assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); - } -} - -#[test] -fn linearizable() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let (s, r) = bounded(THREADS); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - s.send(0).unwrap(); - r.try_recv().unwrap(); - } - }); - } - }) - .unwrap(); -} - -#[test] -fn fairness() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded::<()>(COUNT); - let (s2, r2) = bounded::<()>(COUNT); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - - let mut hits = [0usize; 2]; - for _ in 0..COUNT { - select! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); -} - -#[test] -fn fairness_duplicates() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s, r) = bounded::<()>(COUNT); - - for _ in 0..COUNT { - s.send(()).unwrap(); - } - - let mut hits = [0usize; 5]; - for _ in 0..COUNT { - select! { - recv(r) -> _ => hits[0] += 1, - recv(r) -> _ => hits[1] += 1, - recv(r) -> _ => hits[2] += 1, - recv(r) -> _ => hits[3] += 1, - recv(r) -> _ => hits[4] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); -} - -#[test] -fn recv_in_send() { - let (s, _r) = bounded(1); - s.send(()).unwrap(); - - #[allow(unreachable_code, clippy::diverging_sub_expression)] - { - select! { - send(s, panic!()) -> _ => panic!(), - default => {} - } - } - - let (s, r) = bounded(2); - s.send(()).unwrap(); - - select! { - send(s, assert_eq!(r.recv(), Ok(()))) -> _ => {} - } -} - -#[test] -fn channel_through_channel() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 1000; - - type T = Box; - - let (s, r) = bounded::(1); - - scope(|scope| { - scope.spawn(move |_| { - let mut s = s; - - for _ in 0..COUNT { - let (new_s, new_r) = bounded(1); - let new_r: T = Box::new(Some(new_r)); - - s.send(new_r).unwrap(); - s = new_s; - } - }); - - scope.spawn(move |_| { - let mut r = r; - - for _ in 0..COUNT { - r = r - .recv() - .unwrap() - .downcast_mut::>>() - .unwrap() - .take() - .unwrap() - } - }); - }) - .unwrap(); -} - -#[test] -fn panic_on_drop() { - struct Msg1<'a>(&'a mut bool); - impl Drop for Msg1<'_> { - fn drop(&mut self) { - if *self.0 && !std::thread::panicking() { - panic!("double drop"); - } else { - *self.0 = true; - } - } - } - - struct Msg2<'a>(&'a mut bool); - impl Drop for Msg2<'_> { - fn drop(&mut self) { - if *self.0 { - panic!("double drop"); - } else { - *self.0 = true; - panic!("first drop"); - } - } - } - - // normal - let (s, r) = bounded(2); - let (mut a, mut b) = (false, false); - s.send(Msg1(&mut a)).unwrap(); - s.send(Msg1(&mut b)).unwrap(); - drop(s); - drop(r); - assert!(a); - assert!(b); - - // panic on drop - let (s, r) = bounded(2); - let (mut a, mut b) = (false, false); - s.send(Msg2(&mut a)).unwrap(); - s.send(Msg2(&mut b)).unwrap(); - drop(s); - let res = std::panic::catch_unwind(move || { - drop(r); - }); - assert_eq!( - *res.unwrap_err().downcast_ref::<&str>().unwrap(), - "first drop" - ); - assert!(a); - // Elements after the panicked element will leak. - assert!(!b); -} diff --git a/crossbeam-channel/tests/golang.rs b/crossbeam-channel/tests/golang.rs deleted file mode 100644 index f63b48171..000000000 --- a/crossbeam-channel/tests/golang.rs +++ /dev/null @@ -1,2152 +0,0 @@ -//! Tests copied from Go and manually rewritten in Rust. -//! -//! Source: -//! - https://github.com/golang/go -//! -//! Copyright & License: -//! - Copyright (c) 2009 The Go Authors -//! - https://golang.org/AUTHORS -//! - https://golang.org/LICENSE -//! - https://golang.org/PATENTS - -#![allow(clippy::redundant_clone)] - -use std::alloc::{GlobalAlloc, Layout, System}; -use std::any::Any; -use std::cell::Cell; -use std::collections::HashMap; -use std::sync::atomic::{AtomicI32, AtomicUsize, Ordering::SeqCst}; -use std::sync::{Arc, Condvar, Mutex}; -use std::thread; -use std::time::Duration; - -use crossbeam_channel::{bounded, never, select, tick, unbounded, Receiver, Select, Sender}; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -struct Chan { - inner: Arc>>, -} - -struct ChanInner { - s: Option>, - r: Option>, - // Receiver to use when r is None (Go blocks on receiving from nil) - nil_r: Receiver, - // Sender to use when s is None (Go blocks on sending to nil) - nil_s: Sender, - // Hold this receiver to prevent nil sender channel from disconnection - _nil_sr: Receiver, -} - -impl Clone for Chan { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } -} - -impl Chan { - fn send(&self, msg: T) { - let s = self - .inner - .lock() - .unwrap() - .s - .as_ref() - .expect("sending into closed channel") - .clone(); - let _ = s.send(msg); - } - - fn try_recv(&self) -> Option { - let r = self.inner.lock().unwrap().r.as_ref().unwrap().clone(); - r.try_recv().ok() - } - - fn recv(&self) -> Option { - let r = self.inner.lock().unwrap().r.as_ref().unwrap().clone(); - r.recv().ok() - } - - fn close_s(&self) { - self.inner - .lock() - .unwrap() - .s - .take() - .expect("channel sender already closed"); - } - - fn close_r(&self) { - self.inner - .lock() - .unwrap() - .r - .take() - .expect("channel receiver already closed"); - } - - fn has_rx(&self) -> bool { - self.inner.lock().unwrap().r.is_some() - } - - fn has_tx(&self) -> bool { - self.inner.lock().unwrap().s.is_some() - } - - fn rx(&self) -> Receiver { - let inner = self.inner.lock().unwrap(); - match inner.r.as_ref() { - None => inner.nil_r.clone(), - Some(r) => r.clone(), - } - } - - fn tx(&self) -> Sender { - let inner = self.inner.lock().unwrap(); - match inner.s.as_ref() { - None => inner.nil_s.clone(), - Some(s) => s.clone(), - } - } -} - -impl Iterator for Chan { - type Item = T; - - fn next(&mut self) -> Option { - self.recv() - } -} - -impl IntoIterator for &Chan { - type Item = T; - type IntoIter = Chan; - - fn into_iter(self) -> Self::IntoIter { - self.clone() - } -} - -fn make(cap: usize) -> Chan { - let (s, r) = bounded(cap); - let (nil_s, _nil_sr) = bounded(0); - Chan { - inner: Arc::new(Mutex::new(ChanInner { - s: Some(s), - r: Some(r), - nil_r: never(), - nil_s, - _nil_sr, - })), - } -} - -fn make_unbounded() -> Chan { - let (s, r) = unbounded(); - let (nil_s, _nil_sr) = bounded(0); - Chan { - inner: Arc::new(Mutex::new(ChanInner { - s: Some(s), - r: Some(r), - nil_r: never(), - nil_s, - _nil_sr, - })), - } -} - -#[derive(Clone)] -struct WaitGroup(Arc); - -struct WaitGroupInner { - cond: Condvar, - count: Mutex, -} - -impl WaitGroup { - fn new() -> Self { - Self(Arc::new(WaitGroupInner { - cond: Condvar::new(), - count: Mutex::new(0), - })) - } - - fn add(&self, delta: i32) { - let mut count = self.0.count.lock().unwrap(); - *count += delta; - assert!(*count >= 0); - self.0.cond.notify_all(); - } - - fn done(&self) { - self.add(-1); - } - - fn wait(&self) { - let mut count = self.0.count.lock().unwrap(); - while *count > 0 { - count = self.0.cond.wait(count).unwrap(); - } - } -} - -struct Defer { - f: Option>, -} - -impl Drop for Defer { - fn drop(&mut self) { - let f = self.f.take().unwrap(); - let mut f = Some(f); - let mut f = move || f.take().unwrap()(); - f(); - } -} - -struct Counter; - -static ALLOCATED: AtomicUsize = AtomicUsize::new(0); -unsafe impl GlobalAlloc for Counter { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - let ret = System.alloc(layout); - if !ret.is_null() { - ALLOCATED.fetch_add(layout.size(), SeqCst); - } - ret - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - System.dealloc(ptr, layout); - ALLOCATED.fetch_sub(layout.size(), SeqCst); - } -} - -#[global_allocator] -static A: Counter = Counter; - -macro_rules! defer { - ($body:expr) => { - let _defer = Defer { - f: Some(Box::new(|| $body)), - }; - }; -} - -macro_rules! go { - (@parse $v:ident, $($tail:tt)*) => {{ - let $v = $v.clone(); - go!(@parse $($tail)*) - }}; - (@parse $body:expr) => { - ::std::thread::spawn(move || { - let res = ::std::panic::catch_unwind(::std::panic::AssertUnwindSafe(|| { - $body - })); - if res.is_err() { - eprintln!("goroutine panicked: {:?}", res); - ::std::process::abort(); - } - }) - }; - (@parse $($tail:tt)*) => { - compile_error!("invalid `go!` syntax") - }; - ($($tail:tt)*) => {{ - go!(@parse $($tail)*) - }}; -} - -// https://github.com/golang/go/blob/master/test/chan/doubleselect.go -mod doubleselect { - use super::*; - - #[cfg(miri)] - const ITERATIONS: i32 = 100; - #[cfg(not(miri))] - const ITERATIONS: i32 = 10_000; - - fn sender(n: i32, c1: Chan, c2: Chan, c3: Chan, c4: Chan) { - defer! { c1.close_s() } - defer! { c2.close_s() } - defer! { c3.close_s() } - defer! { c4.close_s() } - - for i in 0..n { - select! { - send(c1.tx(), i) -> _ => {} - send(c2.tx(), i) -> _ => {} - send(c3.tx(), i) -> _ => {} - send(c4.tx(), i) -> _ => {} - } - } - } - - fn mux(out: Chan, inp: Chan, done: Chan) { - for v in inp { - out.send(v); - } - done.send(true); - } - - fn recver(inp: Chan) { - let mut seen = HashMap::new(); - - for v in &inp { - if seen.contains_key(&v) { - panic!("got duplicate value for {}", v); - } - seen.insert(v, true); - } - } - - #[test] - fn main() { - let c1 = make::(0); - let c2 = make::(0); - let c3 = make::(0); - let c4 = make::(0); - let done = make::(0); - let cmux = make::(0); - - go!(c1, c2, c3, c4, sender(ITERATIONS, c1, c2, c3, c4)); - go!(cmux, c1, done, mux(cmux, c1, done)); - go!(cmux, c2, done, mux(cmux, c2, done)); - go!(cmux, c3, done, mux(cmux, c3, done)); - go!(cmux, c4, done, mux(cmux, c4, done)); - go!(done, cmux, { - done.recv(); - done.recv(); - done.recv(); - done.recv(); - cmux.close_s(); - }); - recver(cmux); - } -} - -// https://github.com/golang/go/blob/master/test/chan/fifo.go -mod fifo { - use super::*; - - const N: i32 = 10; - - #[test] - fn asynch_fifo() { - let ch = make::(N as usize); - for i in 0..N { - ch.send(i); - } - for i in 0..N { - if ch.recv() != Some(i) { - panic!("bad receive"); - } - } - } - - fn chain(ch: Chan, val: i32, inp: Chan, out: Chan) { - inp.recv(); - if ch.recv() != Some(val) { - panic!("{}", val); - } - out.send(1); - } - - #[test] - fn synch_fifo() { - let ch = make::(0); - let mut inp = make::(0); - let start = inp.clone(); - - for i in 0..N { - let out = make::(0); - go!(ch, i, inp, out, chain(ch, i, inp, out)); - inp = out; - } - - start.send(0); - for i in 0..N { - ch.send(i); - } - inp.recv(); - } -} - -// https://github.com/golang/go/blob/master/test/chan/goroutines.go -mod goroutines { - use super::*; - - fn f(left: Chan, right: Chan) { - left.send(right.recv().unwrap()); - } - - #[test] - fn main() { - let n = 100i32; - - let leftmost = make::(0); - let mut right = leftmost.clone(); - let mut left = leftmost.clone(); - - for _ in 0..n { - right = make::(0); - go!(left, right, f(left, right)); - left = right.clone(); - } - - go!(right, right.send(1)); - leftmost.recv().unwrap(); - } -} - -// https://github.com/golang/go/blob/master/test/chan/nonblock.go -mod nonblock { - use super::*; - - fn i32receiver(c: Chan, strobe: Chan) { - if c.recv().unwrap() != 123 { - panic!("i32 value"); - } - strobe.send(true); - } - - fn i32sender(c: Chan, strobe: Chan) { - c.send(234); - strobe.send(true); - } - - fn i64receiver(c: Chan, strobe: Chan) { - if c.recv().unwrap() != 123456 { - panic!("i64 value"); - } - strobe.send(true); - } - - fn i64sender(c: Chan, strobe: Chan) { - c.send(234567); - strobe.send(true); - } - - fn breceiver(c: Chan, strobe: Chan) { - if !c.recv().unwrap() { - panic!("b value"); - } - strobe.send(true); - } - - fn bsender(c: Chan, strobe: Chan) { - c.send(true); - strobe.send(true); - } - - fn sreceiver(c: Chan, strobe: Chan) { - if c.recv().unwrap() != "hello" { - panic!("x value"); - } - strobe.send(true); - } - - fn ssender(c: Chan, strobe: Chan) { - c.send("hello again".to_string()); - strobe.send(true); - } - - const MAX_TRIES: usize = 10000; // Up to 100ms per test. - - #[test] - fn main() { - let ticker = tick(Duration::new(0, 10_000)); // 10 us - let sleep = || { - ticker.recv().unwrap(); - ticker.recv().unwrap(); - thread::yield_now(); - thread::yield_now(); - thread::yield_now(); - }; - - let sync = make::(0); - - for buffer in 0..2 { - let c32 = make::(buffer); - let c64 = make::(buffer); - let cb = make::(buffer); - let cs = make::(buffer); - - select! { - recv(c32.rx()) -> _ => panic!("blocked i32sender"), - default => {} - } - - select! { - recv(c64.rx()) -> _ => panic!("blocked i64sender"), - default => {} - } - - select! { - recv(cb.rx()) -> _ => panic!("blocked bsender"), - default => {} - } - - select! { - recv(cs.rx()) -> _ => panic!("blocked ssender"), - default => {} - } - - go!(c32, sync, i32receiver(c32, sync)); - let mut r#try = 0; - loop { - select! { - send(c32.tx(), 123) -> _ => break, - default => { - r#try += 1; - if r#try > MAX_TRIES { - println!("i32receiver buffer={}", buffer); - panic!("fail") - } - sleep(); - } - } - } - sync.recv(); - go!(c32, sync, i32sender(c32, sync)); - if buffer > 0 { - sync.recv(); - } - let mut r#try = 0; - loop { - select! { - recv(c32.rx()) -> v => { - if v != Ok(234) { - panic!("i32sender value"); - } - break; - } - default => { - r#try += 1; - if r#try > MAX_TRIES { - println!("i32sender buffer={}", buffer); - panic!("fail"); - } - sleep(); - } - } - } - if buffer == 0 { - sync.recv(); - } - - go!(c64, sync, i64receiver(c64, sync)); - let mut r#try = 0; - loop { - select! { - send(c64.tx(), 123456) -> _ => break, - default => { - r#try += 1; - if r#try > MAX_TRIES { - println!("i64receiver buffer={}", buffer); - panic!("fail") - } - sleep(); - } - } - } - sync.recv(); - go!(c64, sync, i64sender(c64, sync)); - if buffer > 0 { - sync.recv(); - } - let mut r#try = 0; - loop { - select! { - recv(c64.rx()) -> v => { - if v != Ok(234567) { - panic!("i64sender value"); - } - break; - } - default => { - r#try += 1; - if r#try > MAX_TRIES { - println!("i64sender buffer={}", buffer); - panic!("fail"); - } - sleep(); - } - } - } - if buffer == 0 { - sync.recv(); - } - - go!(cb, sync, breceiver(cb, sync)); - let mut r#try = 0; - loop { - select! { - send(cb.tx(), true) -> _ => break, - default => { - r#try += 1; - if r#try > MAX_TRIES { - println!("breceiver buffer={}", buffer); - panic!("fail") - } - sleep(); - } - } - } - sync.recv(); - go!(cb, sync, bsender(cb, sync)); - if buffer > 0 { - sync.recv(); - } - let mut r#try = 0; - loop { - select! { - recv(cb.rx()) -> v => { - if v != Ok(true) { - panic!("bsender value"); - } - break; - } - default => { - r#try += 1; - if r#try > MAX_TRIES { - println!("bsender buffer={}", buffer); - panic!("fail"); - } - sleep(); - } - } - } - if buffer == 0 { - sync.recv(); - } - - go!(cs, sync, sreceiver(cs, sync)); - let mut r#try = 0; - loop { - select! { - send(cs.tx(), "hello".to_string()) -> _ => break, - default => { - r#try += 1; - if r#try > MAX_TRIES { - println!("sreceiver buffer={}", buffer); - panic!("fail") - } - sleep(); - } - } - } - sync.recv(); - go!(cs, sync, ssender(cs, sync)); - if buffer > 0 { - sync.recv(); - } - let mut r#try = 0; - loop { - select! { - recv(cs.rx()) -> v => { - if v != Ok("hello again".to_string()) { - panic!("ssender value"); - } - break; - } - default => { - r#try += 1; - if r#try > MAX_TRIES { - println!("ssender buffer={}", buffer); - panic!("fail"); - } - sleep(); - } - } - } - if buffer == 0 { - sync.recv(); - } - } - } -} - -// https://github.com/golang/go/blob/master/test/chan/select.go -mod select { - use super::*; - - #[test] - fn main() { - let shift = Cell::new(0); - let counter = Cell::new(0); - - let get_value = || { - counter.set(counter.get() + 1); - 1 << shift.get() - }; - - let send = |mut a: Option<&Chan>, mut b: Option<&Chan>| { - let mut i = 0; - let never = make::(0); - loop { - let nil1 = never.tx(); - let nil2 = never.tx(); - let v1 = get_value(); - let v2 = get_value(); - select! { - send(a.map(|c| c.tx()).unwrap_or(nil1), v1) -> _ => { - i += 1; - a = None; - } - send(b.map(|c| c.tx()).unwrap_or(nil2), v2) -> _ => { - i += 1; - b = None; - } - default => break, - } - shift.set(shift.get() + 1); - } - i - }; - - let a = make::(1); - let b = make::(1); - - assert_eq!(send(Some(&a), Some(&b)), 2); - - let av = a.recv().unwrap(); - let bv = b.recv().unwrap(); - assert_eq!(av | bv, 3); - - assert_eq!(send(Some(&a), None), 1); - assert_eq!(counter.get(), 10); - } -} - -// https://github.com/golang/go/blob/master/test/chan/select2.go -mod select2 { - use super::*; - - #[cfg(miri)] - const N: i32 = 200; - #[cfg(not(miri))] - const N: i32 = 100000; - - #[test] - fn main() { - fn sender(c: &Chan, n: i32) { - for _ in 0..n { - c.send(1); - } - } - - fn receiver(c: &Chan, dummy: &Chan, n: i32) { - for _ in 0..n { - select! { - recv(c.rx()) -> _ => {} - recv(dummy.rx()) -> _ => { - panic!("dummy"); - } - } - } - } - - let c = make_unbounded::(); - let dummy = make_unbounded::(); - - go!(c, sender(&c, N)); - receiver(&c, &dummy, N); - - let alloc = ALLOCATED.load(SeqCst); - - go!(c, sender(&c, N)); - receiver(&c, &dummy, N); - - let final_alloc = ALLOCATED.load(SeqCst); - - assert!(!(final_alloc > alloc && final_alloc - alloc > N as usize + 10000)); - } -} - -// https://github.com/golang/go/blob/master/test/chan/select3.go -mod select3 { - // TODO -} - -// https://github.com/golang/go/blob/master/test/chan/select4.go -mod select4 { - use super::*; - - #[test] - fn main() { - let c = make::(1); - let c1 = make::(0); - c.send(42); - select! { - recv(c1.rx()) -> _ => panic!("BUG"), - recv(c.rx()) -> v => assert_eq!(v, Ok(42)), - } - } -} - -// https://github.com/golang/go/blob/master/test/chan/select6.go -mod select6 { - use super::*; - - #[test] - fn main() { - let c1 = make::(0); - let c2 = make::(0); - let c3 = make::(0); - - go!(c1, c1.recv()); - go!(c1, c2, c3, { - select! { - recv(c1.rx()) -> _ => panic!("dummy"), - recv(c2.rx()) -> _ => c3.send(true), - } - c1.recv(); - }); - go!(c2, c2.send(true)); - - c3.recv(); - c1.send(true); - c1.send(true); - } -} - -// https://github.com/golang/go/blob/master/test/chan/select7.go -mod select7 { - use super::*; - - fn recv1(c: Chan) { - c.recv().unwrap(); - } - - fn recv2(c: Chan) { - select! { - recv(c.rx()) -> _ => () - } - } - - fn recv3(c: Chan) { - let c2 = make::(1); - select! { - recv(c.rx()) -> _ => (), - recv(c2.rx()) -> _ => () - } - } - - fn send1(recv: fn(Chan)) { - let c = make::(1); - go!(c, recv(c)); - thread::yield_now(); - c.send(1); - } - - fn send2(recv: fn(Chan)) { - let c = make::(1); - go!(c, recv(c)); - thread::yield_now(); - select! { - send(c.tx(), 1) -> _ => () - } - } - - fn send3(recv: fn(Chan)) { - let c = make::(1); - go!(c, recv(c)); - thread::yield_now(); - let c2 = make::(1); - select! { - send(c.tx(), 1) -> _ => (), - send(c2.tx(), 1) -> _ => () - } - } - - #[test] - fn main() { - // https://github.com/rust-lang/miri/issues/1371 - if option_env!("MIRI_LEAK_CHECK").is_some() { - return; - } - - send1(recv1); - send2(recv1); - send3(recv1); - send1(recv2); - send2(recv2); - send3(recv2); - send1(recv3); - send2(recv3); - send3(recv3); - } -} - -// https://github.com/golang/go/blob/master/test/chan/sieve1.go -mod sieve1 { - use super::*; - - fn generate(ch: Chan) { - let mut i = 2; - loop { - ch.send(i); - i += 1; - } - } - - fn filter(in_ch: Chan, out_ch: Chan, prime: i32) { - for i in in_ch { - if i % prime != 0 { - out_ch.send(i); - } - } - } - - fn sieve(primes: Chan) { - let mut ch = make::(1); - go!(ch, generate(ch)); - loop { - let prime = ch.recv().unwrap(); - primes.send(prime); - - let ch1 = make::(1); - go!(ch, ch1, prime, filter(ch, ch1, prime)); - ch = ch1; - } - } - - #[test] - fn main() { - // https://github.com/rust-lang/miri/issues/1371 - if option_env!("MIRI_LEAK_CHECK").is_some() { - return; - } - - let primes = make::(1); - go!(primes, sieve(primes)); - - let a = [ - 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, - 89, 97, - ]; - #[cfg(miri)] - let a = &a[..10]; - - for item in a.iter() { - let x = primes.recv().unwrap(); - if x != *item { - println!("{} != {}", x, item); - panic!("fail"); - } - } - } -} - -// https://github.com/golang/go/blob/master/test/chan/zerosize.go -mod zerosize { - use super::*; - - #[test] - fn zero_size_struct() { - struct ZeroSize; - let _ = make::(0); - } - - #[test] - fn zero_size_array() { - let _ = make::<[u8; 0]>(0); - } -} - -// https://github.com/golang/go/blob/master/src/runtime/chan_test.go -mod chan_test { - use super::*; - - #[test] - fn test_chan() { - #[cfg(miri)] - const N: i32 = 12; - #[cfg(not(miri))] - const N: i32 = 200; - - #[cfg(miri)] - const MESSAGES_COUNT: i32 = 20; - #[cfg(not(miri))] - const MESSAGES_COUNT: i32 = 100; - - for cap in 0..N { - { - // Ensure that receive from empty chan blocks. - let c = make::(cap as usize); - - let recv1 = Arc::new(Mutex::new(false)); - go!(c, recv1, { - c.recv(); - *recv1.lock().unwrap() = true; - }); - - let recv2 = Arc::new(Mutex::new(false)); - go!(c, recv2, { - c.recv(); - *recv2.lock().unwrap() = true; - }); - - thread::sleep(ms(1)); - - if *recv1.lock().unwrap() || *recv2.lock().unwrap() { - panic!(); - } - - // Ensure that non-blocking receive does not block. - select! { - recv(c.rx()) -> _ => panic!(), - default => {} - } - select! { - recv(c.rx()) -> _ => panic!(), - default => {} - } - - c.send(0); - c.send(0); - } - - { - // Ensure that send to full chan blocks. - let c = make::(cap as usize); - for i in 0..cap { - c.send(i); - } - - let sent = Arc::new(Mutex::new(0)); - go!(sent, c, { - c.send(0); - *sent.lock().unwrap() = 1; - }); - - thread::sleep(ms(1)); - - if *sent.lock().unwrap() != 0 { - panic!(); - } - - // Ensure that non-blocking send does not block. - select! { - send(c.tx(), 0) -> _ => panic!(), - default => {} - } - c.recv(); - } - - { - // Ensure that we receive 0 from closed chan. - let c = make::(cap as usize); - for i in 0..cap { - c.send(i); - } - c.close_s(); - - for i in 0..cap { - let v = c.recv(); - if v != Some(i) { - panic!(); - } - } - - if c.recv().is_some() { - panic!(); - } - if c.try_recv().is_some() { - panic!(); - } - } - - { - // Ensure that close unblocks receive. - let c = make::(cap as usize); - let done = make::(0); - - go!(c, done, { - let v = c.try_recv(); - done.send(v.is_none()); - }); - - thread::sleep(ms(1)); - c.close_s(); - - if !done.recv().unwrap() { - panic!(); - } - } - - { - // Send many integers, - // ensure that we receive them non-corrupted in FIFO order. - let c = make::(cap as usize); - go!(c, { - for i in 0..MESSAGES_COUNT { - c.send(i); - } - }); - for i in 0..MESSAGES_COUNT { - if c.recv() != Some(i) { - panic!(); - } - } - - // Same, but using recv2. - go!(c, { - for i in 0..MESSAGES_COUNT { - c.send(i); - } - }); - for i in 0..MESSAGES_COUNT { - if c.recv() != Some(i) { - panic!(); - } - } - } - } - } - - #[test] - fn test_nonblock_recv_race() { - #[cfg(miri)] - const N: usize = 100; - #[cfg(not(miri))] - const N: usize = 1000; - - for _ in 0..N { - let c = make::(1); - c.send(1); - - let t = go!(c, { - select! { - recv(c.rx()) -> _ => {} - default => panic!("chan is not ready"), - } - }); - - c.close_s(); - c.recv(); - t.join().unwrap(); - } - } - - #[test] - fn test_nonblock_select_race() { - #[cfg(miri)] - const N: usize = 100; - #[cfg(not(miri))] - const N: usize = 1000; - - let done = make::(1); - for _ in 0..N { - let c1 = make::(1); - let c2 = make::(1); - c1.send(1); - - go!(c1, c2, done, { - select! { - recv(c1.rx()) -> _ => {} - recv(c2.rx()) -> _ => {} - default => { - done.send(false); - return; - } - } - done.send(true); - }); - - c2.send(1); - select! { - recv(c1.rx()) -> _ => {} - default => {} - } - if !done.recv().unwrap() { - panic!("no chan is ready"); - } - } - } - - #[test] - fn test_nonblock_select_race2() { - #[cfg(miri)] - const N: usize = 100; - #[cfg(not(miri))] - const N: usize = 1000; - - let done = make::(1); - for _ in 0..N { - let c1 = make::(1); - let c2 = make::(0); - c1.send(1); - - go!(c1, c2, done, { - select! { - recv(c1.rx()) -> _ => {} - recv(c2.rx()) -> _ => {} - default => { - done.send(false); - return; - } - } - done.send(true); - }); - - c2.close_s(); - select! { - recv(c1.rx()) -> _ => {} - default => {} - } - if !done.recv().unwrap() { - panic!("no chan is ready"); - } - } - } - - #[test] - fn test_self_select() { - // Ensure that send/recv on the same chan in select - // does not crash nor deadlock. - - #[cfg(miri)] - const N: usize = 100; - #[cfg(not(miri))] - const N: usize = 1000; - - for &cap in &[0, 10] { - let wg = WaitGroup::new(); - wg.add(2); - let c = make::(cap); - - for p in 0..2 { - go!(wg, p, c, { - defer! { wg.done() } - for i in 0..N { - if p == 0 || i % 2 == 0 { - select! { - send(c.tx(), p) -> _ => {} - recv(c.rx()) -> v => { - if cap == 0 && v.ok() == Some(p) { - panic!("self receive"); - } - } - } - } else { - select! { - recv(c.rx()) -> v => { - if cap == 0 && v.ok() == Some(p) { - panic!("self receive"); - } - } - send(c.tx(), p) -> _ => {} - } - } - } - }); - } - wg.wait(); - } - } - - #[test] - fn test_select_stress() { - #[cfg(miri)] - const N: usize = 100; - #[cfg(not(miri))] - const N: usize = 10000; - - let c = vec![ - make::(0), - make::(0), - make::(2), - make::(3), - ]; - - // There are 4 goroutines that send N values on each of the chans, - // + 4 goroutines that receive N values on each of the chans, - // + 1 goroutine that sends N values on each of the chans in a single select, - // + 1 goroutine that receives N values on each of the chans in a single select. - // All these sends, receives and selects interact chaotically at runtime, - // but we are careful that this whole construct does not deadlock. - let wg = WaitGroup::new(); - wg.add(10); - - for k in 0..4 { - go!(k, c, wg, { - for _ in 0..N { - c[k].send(0); - } - wg.done(); - }); - go!(k, c, wg, { - for _ in 0..N { - c[k].recv(); - } - wg.done(); - }); - } - - go!(c, wg, { - let mut n = [0; 4]; - let mut c1 = c.iter().map(|c| Some(c.rx().clone())).collect::>(); - - for _ in 0..4 * N { - let index = { - let mut sel = Select::new(); - let mut opers = [!0; 4]; - for &i in &[3, 2, 0, 1] { - if let Some(c) = &c1[i] { - opers[i] = sel.recv(c); - } - } - - let oper = sel.select(); - let mut index = !0; - for i in 0..4 { - if opers[i] == oper.index() { - index = i; - let _ = oper.recv(c1[i].as_ref().unwrap()); - break; - } - } - index - }; - - n[index] += 1; - if n[index] == N { - c1[index] = None; - } - } - wg.done(); - }); - - go!(c, wg, { - let mut n = [0; 4]; - let mut c1 = c.iter().map(|c| Some(c.tx().clone())).collect::>(); - - for _ in 0..4 * N { - let index = { - let mut sel = Select::new(); - let mut opers = [!0; 4]; - for &i in &[0, 1, 2, 3] { - if let Some(c) = &c1[i] { - opers[i] = sel.send(c); - } - } - - let oper = sel.select(); - let mut index = !0; - for i in 0..4 { - if opers[i] == oper.index() { - index = i; - let _ = oper.send(c1[i].as_ref().unwrap(), 0); - break; - } - } - index - }; - - n[index] += 1; - if n[index] == N { - c1[index] = None; - } - } - wg.done(); - }); - - wg.wait(); - } - - #[test] - fn test_select_fairness() { - #[cfg(miri)] - const TRIALS: usize = 100; - #[cfg(not(miri))] - const TRIALS: usize = 10000; - - let c1 = make::(TRIALS + 1); - let c2 = make::(TRIALS + 1); - - for _ in 0..TRIALS + 1 { - c1.send(1); - c2.send(2); - } - - let c3 = make::(0); - let c4 = make::(0); - let out = make::(0); - let done = make::(0); - let wg = WaitGroup::new(); - - wg.add(1); - go!(wg, c1, c2, c3, c4, out, done, { - defer! { wg.done() }; - loop { - let b; - select! { - recv(c3.rx()) -> m => b = m.unwrap(), - recv(c4.rx()) -> m => b = m.unwrap(), - recv(c1.rx()) -> m => b = m.unwrap(), - recv(c2.rx()) -> m => b = m.unwrap(), - } - select! { - send(out.tx(), b) -> _ => {} - recv(done.rx()) -> _ => return, - } - } - }); - - let (mut cnt1, mut cnt2) = (0, 0); - for _ in 0..TRIALS { - match out.recv() { - Some(1) => cnt1 += 1, - Some(2) => cnt2 += 1, - b => panic!("unexpected value {:?} on channel", b), - } - } - - // If the select in the goroutine is fair, - // cnt1 and cnt2 should be about the same value. - // With 10,000 trials, the expected margin of error at - // a confidence level of five nines is 4.4172 / (2 * Sqrt(10000)). - - let r = cnt1 as f64 / TRIALS as f64; - let e = (r - 0.5).abs(); - - if e > 4.4172 / (2.0 * (TRIALS as f64).sqrt()) { - panic!( - "unfair select: in {} trials, results were {}, {}", - TRIALS, cnt1, cnt2, - ); - } - - done.close_s(); - wg.wait(); - } - - #[test] - fn test_chan_send_interface() { - struct Mt; - - let c = make::>(1); - c.send(Box::new(Mt)); - - select! { - send(c.tx(), Box::new(Mt)) -> _ => {} - default => {} - } - - select! { - send(c.tx(), Box::new(Mt)) -> _ => {} - send(c.tx(), Box::new(Mt)) -> _ => {} - default => {} - } - } - - #[test] - fn test_pseudo_random_send() { - #[cfg(miri)] - const N: usize = 20; - #[cfg(not(miri))] - const N: usize = 100; - - for cap in 0..N { - let c = make::(cap); - let l = Arc::new(Mutex::new(vec![0i32; N])); - let done = make::(0); - - go!(c, done, l, { - let mut l = l.lock().unwrap(); - for i in 0..N { - thread::yield_now(); - l[i] = c.recv().unwrap(); - } - done.send(true); - }); - - for _ in 0..N { - select! { - send(c.tx(), 1) -> _ => {} - send(c.tx(), 0) -> _ => {} - } - } - done.recv(); - - let mut n0 = 0; - let mut n1 = 0; - for &i in l.lock().unwrap().iter() { - n0 += (i + 1) % 2; - n1 += i; - } - - if n0 <= N as i32 / 10 || n1 <= N as i32 / 10 { - panic!( - "Want pseudorandom, got {} zeros and {} ones (chan cap {})", - n0, n1, cap, - ); - } - } - } - - #[test] - fn test_multi_consumer() { - const NWORK: usize = 23; - #[cfg(miri)] - const NITER: usize = 50; - #[cfg(not(miri))] - const NITER: usize = 271828; - - let pn = [2, 3, 7, 11, 13, 17, 19, 23, 27, 31]; - - let q = make::(NWORK * 3); - let r = make::(NWORK * 3); - - let wg = WaitGroup::new(); - for i in 0..NWORK { - wg.add(1); - let w = i; - go!(q, r, wg, pn, { - for v in &q { - if pn[w % pn.len()] == v { - thread::yield_now(); - } - r.send(v); - } - wg.done(); - }); - } - - let expect = Arc::new(Mutex::new(0)); - go!(q, r, expect, wg, pn, { - for i in 0..NITER { - let v = pn[i % pn.len()]; - *expect.lock().unwrap() += v; - q.send(v); - } - q.close_s(); - wg.wait(); - r.close_s(); - }); - - let mut n = 0; - let mut s = 0; - for v in &r { - n += 1; - s += v; - } - - if n != NITER || s != *expect.lock().unwrap() { - panic!(); - } - } - - #[test] - fn test_select_duplicate_channel() { - // This test makes sure we can queue a G on - // the same channel multiple times. - let c = make::(0); - let d = make::(0); - let e = make::(0); - - go!(c, d, e, { - select! { - recv(c.rx()) -> _ => {} - recv(d.rx()) -> _ => {} - recv(e.rx()) -> _ => {} - } - e.send(9); - }); - thread::sleep(ms(1)); - - go!(c, c.recv()); - thread::sleep(ms(1)); - - d.send(7); - e.recv(); - c.send(8); - } -} - -// https://github.com/golang/go/blob/master/test/closedchan.go -mod closedchan { - // TODO -} - -// https://github.com/golang/go/blob/master/src/runtime/chanbarrier_test.go -mod chanbarrier_test { - // TODO -} - -// https://github.com/golang/go/blob/master/src/runtime/race/testdata/chan_test.go -mod race_chan_test { - // TODO -} - -// https://github.com/golang/go/blob/master/test/ken/chan.go -mod chan { - use super::*; - - const MESSAGES_PER_CHANEL: u32 = 76; - const MESSAGES_RANGE_LEN: u32 = 100; - const END: i32 = 10000; - - struct ChanWithVals { - chan: Chan, - /// Next value to send - sv: Arc, - /// Next value to receive - rv: Arc, - } - - struct Totals { - /// Total sent messages - tots: u32, - /// Total received messages - totr: u32, - } - - struct Context { - nproc: Arc>, - cval: Arc>, - tot: Arc>, - nc: ChanWithVals, - randx: Arc>, - } - - impl ChanWithVals { - fn with_capacity(capacity: usize) -> Self { - Self { - chan: make(capacity), - sv: Arc::new(AtomicI32::new(0)), - rv: Arc::new(AtomicI32::new(0)), - } - } - - fn closed() -> Self { - let ch = Self::with_capacity(0); - ch.chan.close_r(); - ch.chan.close_s(); - ch - } - - fn rv(&self) -> i32 { - self.rv.load(SeqCst) - } - - fn sv(&self) -> i32 { - self.sv.load(SeqCst) - } - - fn send(&mut self, tot: &Mutex) -> bool { - { - let mut tot = tot.lock().unwrap(); - tot.tots += 1 - } - let esv = expect(self.sv(), self.sv()); - self.sv.store(esv, SeqCst); - if self.sv() == END { - self.chan.close_s(); - return true; - } - false - } - - fn recv(&mut self, v: i32, tot: &Mutex) -> bool { - { - let mut tot = tot.lock().unwrap(); - tot.totr += 1 - } - let erv = expect(self.rv(), v); - self.rv.store(erv, SeqCst); - if self.rv() == END { - self.chan.close_r(); - return true; - } - false - } - } - - impl Clone for ChanWithVals { - fn clone(&self) -> Self { - Self { - chan: self.chan.clone(), - sv: self.sv.clone(), - rv: self.rv.clone(), - } - } - } - - impl Context { - fn nproc(&self) -> &Mutex { - self.nproc.as_ref() - } - - fn cval(&self) -> &Mutex { - self.cval.as_ref() - } - - fn tot(&self) -> &Mutex { - self.tot.as_ref() - } - - fn randx(&self) -> &Mutex { - self.randx.as_ref() - } - } - - impl Clone for Context { - fn clone(&self) -> Self { - Self { - nproc: self.nproc.clone(), - cval: self.cval.clone(), - tot: self.tot.clone(), - nc: self.nc.clone(), - randx: self.randx.clone(), - } - } - } - - fn nrand(n: i32, randx: &Mutex) -> i32 { - let mut randx = randx.lock().unwrap(); - *randx += 10007; - if *randx >= 1000000 { - *randx -= 1000000 - } - *randx % n - } - - fn change_nproc(adjust: i32, nproc: &Mutex) -> i32 { - let mut nproc = nproc.lock().unwrap(); - *nproc += adjust; - *nproc - } - - fn mkchan(c: usize, n: usize, cval: &Mutex) -> Vec { - let mut ca = Vec::::with_capacity(n); - let mut cval = cval.lock().unwrap(); - for _ in 0..n { - *cval += MESSAGES_RANGE_LEN as i32; - let chl = ChanWithVals::with_capacity(c); - chl.sv.store(*cval, SeqCst); - chl.rv.store(*cval, SeqCst); - ca.push(chl); - } - ca - } - - fn expect(v: i32, v0: i32) -> i32 { - if v == v0 { - return if v % MESSAGES_RANGE_LEN as i32 == MESSAGES_PER_CHANEL as i32 - 1 { - END - } else { - v + 1 - }; - } - panic!("got {}, expected {}", v, v0 + 1); - } - - fn send(mut c: ChanWithVals, ctx: Context) { - loop { - for _ in 0..=nrand(10, ctx.randx()) { - thread::yield_now(); - } - c.chan.tx().send(c.sv()).unwrap(); - if c.send(ctx.tot()) { - break; - } - } - change_nproc(-1, ctx.nproc()); - } - - fn recv(mut c: ChanWithVals, ctx: Context) { - loop { - for _ in (0..nrand(10, ctx.randx())).rev() { - thread::yield_now(); - } - let v = c.chan.rx().recv().unwrap(); - if c.recv(v, ctx.tot()) { - break; - } - } - change_nproc(-1, ctx.nproc()); - } - - #[allow(clippy::too_many_arguments)] - fn sel( - mut r0: ChanWithVals, - mut r1: ChanWithVals, - mut r2: ChanWithVals, - mut r3: ChanWithVals, - mut s0: ChanWithVals, - mut s1: ChanWithVals, - mut s2: ChanWithVals, - mut s3: ChanWithVals, - ctx: Context, - ) { - let mut a = 0; // local chans running - - if r0.chan.has_rx() { - a += 1; - } - if r1.chan.has_rx() { - a += 1; - } - if r2.chan.has_rx() { - a += 1; - } - if r3.chan.has_rx() { - a += 1; - } - if s0.chan.has_tx() { - a += 1; - } - if s1.chan.has_tx() { - a += 1; - } - if s2.chan.has_tx() { - a += 1; - } - if s3.chan.has_tx() { - a += 1; - } - - loop { - for _ in 0..=nrand(5, ctx.randx()) { - thread::yield_now(); - } - select! { - recv(r0.chan.rx()) -> v => if r0.recv(v.unwrap(), ctx.tot()) { a -= 1 }, - recv(r1.chan.rx()) -> v => if r1.recv(v.unwrap(), ctx.tot()) { a -= 1 }, - recv(r2.chan.rx()) -> v => if r2.recv(v.unwrap(), ctx.tot()) { a -= 1 }, - recv(r3.chan.rx()) -> v => if r3.recv(v.unwrap(), ctx.tot()) { a -= 1 }, - send(s0.chan.tx(), s0.sv()) -> _ => if s0.send(ctx.tot()) { a -= 1 }, - send(s1.chan.tx(), s1.sv()) -> _ => if s1.send(ctx.tot()) { a -= 1 }, - send(s2.chan.tx(), s2.sv()) -> _ => if s2.send(ctx.tot()) { a -= 1 }, - send(s3.chan.tx(), s3.sv()) -> _ => if s3.send(ctx.tot()) { a -= 1 }, - } - if a == 0 { - break; - } - } - change_nproc(-1, ctx.nproc()); - } - - fn get(vec: &[ChanWithVals], idx: usize) -> ChanWithVals { - vec.get(idx).unwrap().clone() - } - - /// Direct send to direct recv - fn test1(c: ChanWithVals, ctx: &mut Context) { - change_nproc(2, ctx.nproc()); - go!(c, ctx, send(c, ctx)); - go!(c, ctx, recv(c, ctx)); - } - - /// Direct send to select recv - fn test2(c: usize, ctx: &mut Context) { - let ca = mkchan(c, 4, ctx.cval()); - - change_nproc(4, ctx.nproc()); - go!(ca, ctx, send(get(&ca, 0), ctx)); - go!(ca, ctx, send(get(&ca, 1), ctx)); - go!(ca, ctx, send(get(&ca, 2), ctx)); - go!(ca, ctx, send(get(&ca, 3), ctx)); - - change_nproc(1, ctx.nproc()); - go!( - ca, - ctx, - sel( - get(&ca, 0), - get(&ca, 1), - get(&ca, 2), - get(&ca, 3), - ctx.nc.clone(), - ctx.nc.clone(), - ctx.nc.clone(), - ctx.nc.clone(), - ctx, - ) - ); - } - - /// Select send to direct recv - fn test3(c: usize, ctx: &mut Context) { - let ca = mkchan(c, 4, ctx.cval()); - - change_nproc(4, ctx.nproc()); - go!(ca, ctx, recv(get(&ca, 0), ctx)); - go!(ca, ctx, recv(get(&ca, 1), ctx)); - go!(ca, ctx, recv(get(&ca, 2), ctx)); - go!(ca, ctx, recv(get(&ca, 3), ctx)); - - change_nproc(1, ctx.nproc()); - go!( - ca, - ctx, - sel( - ctx.nc.clone(), - ctx.nc.clone(), - ctx.nc.clone(), - ctx.nc.clone(), - get(&ca, 0), - get(&ca, 1), - get(&ca, 2), - get(&ca, 3), - ctx, - ) - ); - } - - /// Select send to select recv, 4 channels - fn test4(c: usize, ctx: &mut Context) { - let ca = mkchan(c, 4, ctx.cval()); - - change_nproc(2, ctx.nproc()); - go!( - ca, - ctx, - sel( - ctx.nc.clone(), - ctx.nc.clone(), - ctx.nc.clone(), - ctx.nc.clone(), - get(&ca, 0), - get(&ca, 1), - get(&ca, 2), - get(&ca, 3), - ctx, - ) - ); - go!( - ca, - ctx, - sel( - get(&ca, 0), - get(&ca, 1), - get(&ca, 2), - get(&ca, 3), - ctx.nc.clone(), - ctx.nc.clone(), - ctx.nc.clone(), - ctx.nc.clone(), - ctx, - ) - ); - } - - /// Select send to select recv, 8 channels - fn test5(c: usize, ctx: &mut Context) { - let ca = mkchan(c, 8, ctx.cval()); - - change_nproc(2, ctx.nproc()); - go!( - ca, - ctx, - sel( - get(&ca, 4), - get(&ca, 5), - get(&ca, 6), - get(&ca, 7), - get(&ca, 0), - get(&ca, 1), - get(&ca, 2), - get(&ca, 3), - ctx, - ) - ); - go!( - ca, - ctx, - sel( - get(&ca, 0), - get(&ca, 1), - get(&ca, 2), - get(&ca, 3), - get(&ca, 4), - get(&ca, 5), - get(&ca, 6), - get(&ca, 7), - ctx, - ) - ); - } - - // Direct and select send to direct and select recv - fn test6(c: usize, ctx: &mut Context) { - let ca = mkchan(c, 12, ctx.cval()); - - change_nproc(4, ctx.nproc()); - go!(ca, ctx, send(get(&ca, 4), ctx)); - go!(ca, ctx, send(get(&ca, 5), ctx)); - go!(ca, ctx, send(get(&ca, 6), ctx)); - go!(ca, ctx, send(get(&ca, 7), ctx)); - - change_nproc(4, ctx.nproc()); - go!(ca, ctx, recv(get(&ca, 8), ctx)); - go!(ca, ctx, recv(get(&ca, 9), ctx)); - go!(ca, ctx, recv(get(&ca, 10), ctx)); - go!(ca, ctx, recv(get(&ca, 11), ctx)); - - change_nproc(2, ctx.nproc()); - go!( - ca, - ctx, - sel( - get(&ca, 4), - get(&ca, 5), - get(&ca, 6), - get(&ca, 7), - get(&ca, 0), - get(&ca, 1), - get(&ca, 2), - get(&ca, 3), - ctx, - ) - ); - go!( - ca, - ctx, - sel( - get(&ca, 0), - get(&ca, 1), - get(&ca, 2), - get(&ca, 3), - get(&ca, 8), - get(&ca, 9), - get(&ca, 10), - get(&ca, 11), - ctx, - ) - ); - } - - fn wait(ctx: &mut Context) { - thread::yield_now(); - while change_nproc(0, ctx.nproc()) != 0 { - thread::yield_now(); - } - } - - fn tests(c: usize, ctx: &mut Context) { - let ca = mkchan(c, 4, ctx.cval()); - test1(get(&ca, 0), ctx); - test1(get(&ca, 1), ctx); - test1(get(&ca, 2), ctx); - test1(get(&ca, 3), ctx); - wait(ctx); - - test2(c, ctx); - wait(ctx); - - test3(c, ctx); - wait(ctx); - - test4(c, ctx); - wait(ctx); - - test5(c, ctx); - wait(ctx); - - test6(c, ctx); - wait(ctx); - } - - #[test] - #[cfg_attr(miri, ignore)] // Miri is too slow - fn main() { - let mut ctx = Context { - nproc: Arc::new(Mutex::new(0)), - cval: Arc::new(Mutex::new(0)), - tot: Arc::new(Mutex::new(Totals { tots: 0, totr: 0 })), - nc: ChanWithVals::closed(), - randx: Arc::new(Mutex::new(0)), - }; - - tests(0, &mut ctx); - tests(1, &mut ctx); - tests(10, &mut ctx); - tests(100, &mut ctx); - - #[rustfmt::skip] - let t = 4 * // buffer sizes - (4*4 + // tests 1,2,3,4 channels - 8 + // test 5 channels - 12) * // test 6 channels - MESSAGES_PER_CHANEL; // sends/recvs on a channel - - let tot = ctx.tot.lock().unwrap(); - if tot.tots != t || tot.totr != t { - panic!("tots={} totr={} sb={}", tot.tots, tot.totr, t); - } - } -} - -// https://github.com/golang/go/blob/master/test/ken/chan1.go -mod chan1 { - use super::*; - - // sent messages - #[cfg(miri)] - const N: usize = 20; - #[cfg(not(miri))] - const N: usize = 1000; - // receiving "goroutines" - const M: usize = 10; - // channel buffering - const W: usize = 2; - - fn r(c: Chan, m: usize, h: Arc>) { - loop { - select! { - recv(c.rx()) -> rr => { - let r = rr.unwrap(); - let mut data = h.lock().unwrap(); - if data[r] != 1 { - println!("r\nm={}\nr={}\nh={}\n", m, r, data[r]); - panic!("fail") - } - data[r] = 2; - } - } - } - } - - fn s(c: Chan, h: Arc>) { - for n in 0..N { - let r = n; - let mut data = h.lock().unwrap(); - if data[r] != 0 { - println!("s"); - panic!("fail"); - } - data[r] = 1; - // https://github.com/crossbeam-rs/crossbeam/pull/615#discussion_r550281094 - drop(data); - c.send(r); - } - } - - #[test] - fn main() { - // https://github.com/rust-lang/miri/issues/1371 - if option_env!("MIRI_LEAK_CHECK").is_some() { - return; - } - - let h = Arc::new(Mutex::new([0usize; N])); - let c = make::(W); - for m in 0..M { - go!(c, h, { - r(c, m, h); - }); - thread::yield_now(); - } - thread::yield_now(); - thread::yield_now(); - s(c, h); - } -} diff --git a/crossbeam-channel/tests/iter.rs b/crossbeam-channel/tests/iter.rs deleted file mode 100644 index 463f3b043..000000000 --- a/crossbeam-channel/tests/iter.rs +++ /dev/null @@ -1,110 +0,0 @@ -//! Tests for iteration over receivers. - -use crossbeam_channel::unbounded; -use crossbeam_utils::thread::scope; - -#[test] -fn nested_recv_iter() { - let (s, r) = unbounded::(); - let (total_s, total_r) = unbounded::(); - - scope(|scope| { - scope.spawn(move |_| { - let mut acc = 0; - for x in r.iter() { - acc += x; - } - total_s.send(acc).unwrap(); - }); - - s.send(3).unwrap(); - s.send(1).unwrap(); - s.send(2).unwrap(); - drop(s); - assert_eq!(total_r.recv().unwrap(), 6); - }) - .unwrap(); -} - -#[test] -fn recv_iter_break() { - let (s, r) = unbounded::(); - let (count_s, count_r) = unbounded(); - - scope(|scope| { - scope.spawn(move |_| { - let mut count = 0; - for x in r.iter() { - if count >= 3 { - break; - } else { - count += x; - } - } - count_s.send(count).unwrap(); - }); - - s.send(2).unwrap(); - s.send(2).unwrap(); - s.send(2).unwrap(); - let _ = s.send(2); - drop(s); - assert_eq!(count_r.recv().unwrap(), 4); - }) - .unwrap(); -} - -#[test] -fn recv_try_iter() { - let (request_s, request_r) = unbounded(); - let (response_s, response_r) = unbounded(); - - scope(|scope| { - scope.spawn(move |_| { - let mut count = 0; - loop { - for x in response_r.try_iter() { - count += x; - if count == 6 { - return; - } - } - request_s.send(()).unwrap(); - } - }); - - for _ in request_r.iter() { - if response_s.send(2).is_err() { - break; - } - } - }) - .unwrap(); -} - -#[test] -fn recv_into_iter_owned() { - let mut iter = { - let (s, r) = unbounded::(); - s.send(1).unwrap(); - s.send(2).unwrap(); - r.into_iter() - }; - - assert_eq!(iter.next().unwrap(), 1); - assert_eq!(iter.next().unwrap(), 2); - assert!(iter.next().is_none()); -} - -#[test] -fn recv_into_iter_borrowed() { - let (s, r) = unbounded::(); - s.send(1).unwrap(); - s.send(2).unwrap(); - drop(s); - - let mut iter = (&r).into_iter(); - assert_eq!(iter.next().unwrap(), 1); - assert_eq!(iter.next().unwrap(), 2); - assert!(iter.next().is_none()); -} diff --git a/crossbeam-channel/tests/list.rs b/crossbeam-channel/tests/list.rs deleted file mode 100644 index beabac8f2..000000000 --- a/crossbeam-channel/tests/list.rs +++ /dev/null @@ -1,597 +0,0 @@ -//! Tests for the list channel flavor. - -use std::any::Any; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::thread; -use std::time::Duration; - -use crossbeam_channel::{select, unbounded, Receiver}; -use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError}; -use crossbeam_channel::{SendError, SendTimeoutError, TrySendError}; -use crossbeam_utils::thread::scope; -use rand::{thread_rng, Rng}; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke() { - let (s, r) = unbounded(); - s.try_send(7).unwrap(); - assert_eq!(r.try_recv(), Ok(7)); - - s.send(8).unwrap(); - assert_eq!(r.recv(), Ok(8)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); -} - -#[test] -fn capacity() { - let (s, r) = unbounded::<()>(); - assert_eq!(s.capacity(), None); - assert_eq!(r.capacity(), None); -} - -#[test] -fn len_empty_full() { - let (s, r) = unbounded(); - - assert_eq!(s.len(), 0); - assert!(s.is_empty()); - assert!(!s.is_full()); - assert_eq!(r.len(), 0); - assert!(r.is_empty()); - assert!(!r.is_full()); - - s.send(()).unwrap(); - - assert_eq!(s.len(), 1); - assert!(!s.is_empty()); - assert!(!s.is_full()); - assert_eq!(r.len(), 1); - assert!(!r.is_empty()); - assert!(!r.is_full()); - - r.recv().unwrap(); - - assert_eq!(s.len(), 0); - assert!(s.is_empty()); - assert!(!s.is_full()); - assert_eq!(r.len(), 0); - assert!(r.is_empty()); - assert!(!r.is_full()); -} - -#[test] -#[cfg_attr(miri, ignore)] // this test makes timing assumptions, but Miri is so slow it violates them -fn try_recv() { - let (s, r) = unbounded(); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - thread::sleep(ms(1500)); - assert_eq!(r.try_recv(), Ok(7)); - thread::sleep(ms(500)); - assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - s.send(7).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn recv() { - let (s, r) = unbounded(); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv(), Ok(7)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(8)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(9)); - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - s.send(7).unwrap(); - s.send(8).unwrap(); - s.send(9).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn recv_timeout() { - let (s, r) = unbounded::(); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); - assert_eq!(r.recv_timeout(ms(1000)), Ok(7)); - assert_eq!( - r.recv_timeout(ms(1000)), - Err(RecvTimeoutError::Disconnected) - ); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - s.send(7).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn try_send() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 1000; - - let (s, r) = unbounded(); - for i in 0..COUNT { - assert_eq!(s.try_send(i), Ok(())); - } - - drop(r); - assert_eq!(s.try_send(777), Err(TrySendError::Disconnected(777))); -} - -#[test] -fn send() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 1000; - - let (s, r) = unbounded(); - for i in 0..COUNT { - assert_eq!(s.send(i), Ok(())); - } - - drop(r); - assert_eq!(s.send(777), Err(SendError(777))); -} - -#[test] -fn send_timeout() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 1000; - - let (s, r) = unbounded(); - for i in 0..COUNT { - assert_eq!(s.send_timeout(i, ms(i as u64)), Ok(())); - } - - drop(r); - assert_eq!( - s.send_timeout(777, ms(0)), - Err(SendTimeoutError::Disconnected(777)) - ); -} - -#[test] -fn send_after_disconnect() { - let (s, r) = unbounded(); - - s.send(1).unwrap(); - s.send(2).unwrap(); - s.send(3).unwrap(); - - drop(r); - - assert_eq!(s.send(4), Err(SendError(4))); - assert_eq!(s.try_send(5), Err(TrySendError::Disconnected(5))); - assert_eq!( - s.send_timeout(6, ms(0)), - Err(SendTimeoutError::Disconnected(6)) - ); -} - -#[test] -fn recv_after_disconnect() { - let (s, r) = unbounded(); - - s.send(1).unwrap(); - s.send(2).unwrap(); - s.send(3).unwrap(); - - drop(s); - - assert_eq!(r.recv(), Ok(1)); - assert_eq!(r.recv(), Ok(2)); - assert_eq!(r.recv(), Ok(3)); - assert_eq!(r.recv(), Err(RecvError)); -} - -#[test] -fn len() { - let (s, r) = unbounded(); - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); - - for i in 0..50 { - s.send(i).unwrap(); - assert_eq!(s.len(), i + 1); - } - - for i in 0..50 { - r.recv().unwrap(); - assert_eq!(r.len(), 50 - i - 1); - } - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); -} - -#[test] -fn disconnect_wakes_receiver() { - let (s, r) = unbounded::<()>(); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(s); - }); - }) - .unwrap(); -} - -#[test] -fn spsc() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - let (s, r) = unbounded(); - - scope(|scope| { - scope.spawn(move |_| { - for i in 0..COUNT { - assert_eq!(r.recv(), Ok(i)); - } - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - for i in 0..COUNT { - s.send(i).unwrap(); - } - }); - }) - .unwrap(); -} - -#[test] -fn mpmc() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let (s, r) = unbounded::(); - let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - let n = r.recv().unwrap(); - v[n].fetch_add(1, Ordering::SeqCst); - } - }); - } - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..COUNT { - s.send(i).unwrap(); - } - }); - } - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - - for c in v { - assert_eq!(c.load(Ordering::SeqCst), THREADS); - } -} - -#[test] -fn stress_oneshot() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - for _ in 0..COUNT { - let (s, r) = unbounded(); - - scope(|scope| { - scope.spawn(|_| r.recv().unwrap()); - scope.spawn(|_| s.send(0).unwrap()); - }) - .unwrap(); - } -} - -#[test] -fn stress_iter() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - let (request_s, request_r) = unbounded(); - let (response_s, response_r) = unbounded(); - - scope(|scope| { - scope.spawn(move |_| { - let mut count = 0; - loop { - for x in response_r.try_iter() { - count += x; - if count == COUNT { - return; - } - } - request_s.send(()).unwrap(); - } - }); - - for _ in request_r.iter() { - if response_s.send(1).is_err() { - break; - } - } - }) - .unwrap(); -} - -#[test] -fn stress_timeout_two_threads() { - const COUNT: usize = 100; - - let (s, r) = unbounded(); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(50)); - } - s.send(i).unwrap(); - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(50)); - } - loop { - if let Ok(x) = r.recv_timeout(ms(10)) { - assert_eq!(x, i); - break; - } - } - } - }); - }) - .unwrap(); -} - -#[test] -fn drops() { - #[cfg(miri)] - const RUNS: usize = 20; - #[cfg(not(miri))] - const RUNS: usize = 100; - #[cfg(miri)] - const STEPS: usize = 100; - #[cfg(not(miri))] - const STEPS: usize = 10_000; - - static DROPS: AtomicUsize = AtomicUsize::new(0); - - #[derive(Debug, PartialEq)] - struct DropCounter; - - impl Drop for DropCounter { - fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::SeqCst); - } - } - - let mut rng = thread_rng(); - - for _ in 0..RUNS { - let steps = rng.gen_range(0..STEPS); - let additional = rng.gen_range(0..STEPS / 10); - - DROPS.store(0, Ordering::SeqCst); - let (s, r) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..steps { - r.recv().unwrap(); - } - }); - - scope.spawn(|_| { - for _ in 0..steps { - s.send(DropCounter).unwrap(); - } - }); - }) - .unwrap(); - - for _ in 0..additional { - s.try_send(DropCounter).unwrap(); - } - - assert_eq!(DROPS.load(Ordering::SeqCst), steps); - drop(s); - drop(r); - assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); - } -} - -#[test] -fn linearizable() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let (s, r) = unbounded(); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - s.send(0).unwrap(); - r.try_recv().unwrap(); - } - }); - } - }) - .unwrap(); -} - -#[test] -fn fairness() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - - let mut hits = [0usize; 2]; - for _ in 0..COUNT { - select! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); -} - -#[test] -fn fairness_duplicates() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s, r) = unbounded(); - - for _ in 0..COUNT { - s.send(()).unwrap(); - } - - let mut hits = [0usize; 5]; - for _ in 0..COUNT { - select! { - recv(r) -> _ => hits[0] += 1, - recv(r) -> _ => hits[1] += 1, - recv(r) -> _ => hits[2] += 1, - recv(r) -> _ => hits[3] += 1, - recv(r) -> _ => hits[4] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); -} - -#[test] -fn recv_in_send() { - let (s, r) = unbounded(); - s.send(()).unwrap(); - - select! { - send(s, assert_eq!(r.recv(), Ok(()))) -> _ => {} - } -} - -#[test] -fn channel_through_channel() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 1000; - - type T = Box; - - let (s, r) = unbounded::(); - - scope(|scope| { - scope.spawn(move |_| { - let mut s = s; - - for _ in 0..COUNT { - let (new_s, new_r) = unbounded(); - let new_r: T = Box::new(Some(new_r)); - - s.send(new_r).unwrap(); - s = new_s; - } - }); - - scope.spawn(move |_| { - let mut r = r; - - for _ in 0..COUNT { - r = r - .recv() - .unwrap() - .downcast_mut::>>() - .unwrap() - .take() - .unwrap() - } - }); - }) - .unwrap(); -} - -// If `Block` is created on the stack, the array of slots will multiply this `BigStruct` and -// probably overflow the thread stack. It's now directly created on the heap to avoid this. -#[test] -fn stack_overflow() { - const N: usize = 32_768; - struct BigStruct { - _data: [u8; N], - } - - let (sender, receiver) = unbounded::(); - sender.send(BigStruct { _data: [0u8; N] }).unwrap(); - - for _data in receiver.try_iter() {} -} diff --git a/crossbeam-channel/tests/mpsc.rs b/crossbeam-channel/tests/mpsc.rs deleted file mode 100644 index 307e2f400..000000000 --- a/crossbeam-channel/tests/mpsc.rs +++ /dev/null @@ -1,2126 +0,0 @@ -//! Tests copied from `std::sync::mpsc`. -//! -//! This is a copy of tests for the `std::sync::mpsc` channels from the standard library, but -//! modified to work with `crossbeam-channel` instead. -//! -//! Minor tweaks were needed to make the tests compile: -//! -//! - Replace `box` syntax with `Box::new`. -//! - Replace all uses of `Select` with `select!`. -//! - Change the imports. -//! - Join all spawned threads. -//! - Removed assertion from oneshot_multi_thread_send_close_stress tests. -//! -//! Source: -//! - https://github.com/rust-lang/rust/tree/master/src/libstd/sync/mpsc -//! -//! Copyright & License: -//! - Copyright 2013-2014 The Rust Project Developers -//! - Apache License, Version 2.0 or MIT license, at your option -//! - https://github.com/rust-lang/rust/blob/master/COPYRIGHT -//! - https://www.rust-lang.org/en-US/legal.html - -#![allow(clippy::match_single_binding, clippy::redundant_clone)] - -use std::sync::mpsc::{RecvError, RecvTimeoutError, TryRecvError}; -use std::sync::mpsc::{SendError, TrySendError}; -use std::thread::JoinHandle; -use std::time::Duration; - -use crossbeam_channel as cc; - -struct Sender { - inner: cc::Sender, -} - -impl Sender { - fn send(&self, t: T) -> Result<(), SendError> { - self.inner.send(t).map_err(|cc::SendError(m)| SendError(m)) - } -} - -impl Clone for Sender { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } -} - -struct SyncSender { - inner: cc::Sender, -} - -impl SyncSender { - fn send(&self, t: T) -> Result<(), SendError> { - self.inner.send(t).map_err(|cc::SendError(m)| SendError(m)) - } - - fn try_send(&self, t: T) -> Result<(), TrySendError> { - self.inner.try_send(t).map_err(|err| match err { - cc::TrySendError::Full(m) => TrySendError::Full(m), - cc::TrySendError::Disconnected(m) => TrySendError::Disconnected(m), - }) - } -} - -impl Clone for SyncSender { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } -} - -struct Receiver { - inner: cc::Receiver, -} - -impl Receiver { - fn try_recv(&self) -> Result { - self.inner.try_recv().map_err(|err| match err { - cc::TryRecvError::Empty => TryRecvError::Empty, - cc::TryRecvError::Disconnected => TryRecvError::Disconnected, - }) - } - - fn recv(&self) -> Result { - self.inner.recv().map_err(|_| RecvError) - } - - fn recv_timeout(&self, timeout: Duration) -> Result { - self.inner.recv_timeout(timeout).map_err(|err| match err { - cc::RecvTimeoutError::Timeout => RecvTimeoutError::Timeout, - cc::RecvTimeoutError::Disconnected => RecvTimeoutError::Disconnected, - }) - } - - fn iter(&self) -> Iter<'_, T> { - Iter { inner: self } - } - - fn try_iter(&self) -> TryIter<'_, T> { - TryIter { inner: self } - } -} - -impl<'a, T> IntoIterator for &'a Receiver { - type Item = T; - type IntoIter = Iter<'a, T>; - - fn into_iter(self) -> Iter<'a, T> { - self.iter() - } -} - -impl IntoIterator for Receiver { - type Item = T; - type IntoIter = IntoIter; - - fn into_iter(self) -> IntoIter { - IntoIter { inner: self } - } -} - -struct TryIter<'a, T> { - inner: &'a Receiver, -} - -impl Iterator for TryIter<'_, T> { - type Item = T; - - fn next(&mut self) -> Option { - self.inner.try_recv().ok() - } -} - -struct Iter<'a, T> { - inner: &'a Receiver, -} - -impl Iterator for Iter<'_, T> { - type Item = T; - - fn next(&mut self) -> Option { - self.inner.recv().ok() - } -} - -struct IntoIter { - inner: Receiver, -} - -impl Iterator for IntoIter { - type Item = T; - - fn next(&mut self) -> Option { - self.inner.recv().ok() - } -} - -fn channel() -> (Sender, Receiver) { - let (s, r) = cc::unbounded(); - let s = Sender { inner: s }; - let r = Receiver { inner: r }; - (s, r) -} - -fn sync_channel(bound: usize) -> (SyncSender, Receiver) { - let (s, r) = cc::bounded(bound); - let s = SyncSender { inner: s }; - let r = Receiver { inner: r }; - (s, r) -} - -macro_rules! select { - ( - $($name:pat = $rx:ident.$meth:ident() => $code:expr),+ - ) => ({ - const _IS_BIASED: bool = false; - - cc::crossbeam_channel_internal! { - $( - $meth(($rx).inner) -> res => { - let $name = res.map_err(|_| ::std::sync::mpsc::RecvError); - $code - } - )+ - } - }) -} - -// Source: https://github.com/rust-lang/rust/blob/master/src/libstd/sync/mpsc/mod.rs -mod channel_tests { - use super::*; - - use std::env; - use std::thread; - use std::time::Instant; - - fn stress_factor() -> usize { - match env::var("RUST_TEST_STRESS") { - Ok(val) => val.parse().unwrap(), - Err(..) => 1, - } - } - - #[test] - fn smoke() { - let (tx, rx) = channel::(); - tx.send(1).unwrap(); - assert_eq!(rx.recv().unwrap(), 1); - } - - #[test] - fn drop_full() { - let (tx, _rx) = channel::>(); - tx.send(Box::new(1)).unwrap(); - } - - #[test] - fn drop_full_shared() { - let (tx, _rx) = channel::>(); - drop(tx.clone()); - drop(tx.clone()); - tx.send(Box::new(1)).unwrap(); - } - - #[test] - fn smoke_shared() { - let (tx, rx) = channel::(); - tx.send(1).unwrap(); - assert_eq!(rx.recv().unwrap(), 1); - let tx = tx.clone(); - tx.send(1).unwrap(); - assert_eq!(rx.recv().unwrap(), 1); - } - - #[test] - fn smoke_threads() { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - tx.send(1).unwrap(); - }); - assert_eq!(rx.recv().unwrap(), 1); - t.join().unwrap(); - } - - #[test] - fn smoke_port_gone() { - let (tx, rx) = channel::(); - drop(rx); - assert!(tx.send(1).is_err()); - } - - #[test] - fn smoke_shared_port_gone() { - let (tx, rx) = channel::(); - drop(rx); - assert!(tx.send(1).is_err()) - } - - #[test] - fn smoke_shared_port_gone2() { - let (tx, rx) = channel::(); - drop(rx); - let tx2 = tx.clone(); - drop(tx); - assert!(tx2.send(1).is_err()); - } - - #[test] - fn port_gone_concurrent() { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - rx.recv().unwrap(); - }); - while tx.send(1).is_ok() {} - t.join().unwrap(); - } - - #[test] - fn port_gone_concurrent_shared() { - let (tx, rx) = channel::(); - let tx2 = tx.clone(); - let t = thread::spawn(move || { - rx.recv().unwrap(); - }); - while tx.send(1).is_ok() && tx2.send(1).is_ok() {} - t.join().unwrap(); - } - - #[test] - fn smoke_chan_gone() { - let (tx, rx) = channel::(); - drop(tx); - assert!(rx.recv().is_err()); - } - - #[test] - fn smoke_chan_gone_shared() { - let (tx, rx) = channel::<()>(); - let tx2 = tx.clone(); - drop(tx); - drop(tx2); - assert!(rx.recv().is_err()); - } - - #[test] - fn chan_gone_concurrent() { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - tx.send(1).unwrap(); - tx.send(1).unwrap(); - }); - while rx.recv().is_ok() {} - t.join().unwrap(); - } - - #[test] - fn stress() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10000; - - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - for _ in 0..COUNT { - tx.send(1).unwrap(); - } - }); - for _ in 0..COUNT { - assert_eq!(rx.recv().unwrap(), 1); - } - t.join().unwrap(); - } - - #[test] - fn stress_shared() { - let amt: u32 = if cfg!(miri) { 100 } else { 10_000 }; - let nthreads: u32 = if cfg!(miri) { 4 } else { 8 }; - let (tx, rx) = channel::(); - - let t = thread::spawn(move || { - for _ in 0..amt * nthreads { - assert_eq!(rx.recv().unwrap(), 1); - } - assert!(rx.try_recv().is_err()); - }); - - let mut ts = Vec::with_capacity(nthreads as usize); - for _ in 0..nthreads { - let tx = tx.clone(); - let t = thread::spawn(move || { - for _ in 0..amt { - tx.send(1).unwrap(); - } - }); - ts.push(t); - } - drop(tx); - t.join().unwrap(); - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn send_from_outside_runtime() { - let (tx1, rx1) = channel::<()>(); - let (tx2, rx2) = channel::(); - let t1 = thread::spawn(move || { - tx1.send(()).unwrap(); - for _ in 0..40 { - assert_eq!(rx2.recv().unwrap(), 1); - } - }); - rx1.recv().unwrap(); - let t2 = thread::spawn(move || { - for _ in 0..40 { - tx2.send(1).unwrap(); - } - }); - t1.join().unwrap(); - t2.join().unwrap(); - } - - #[test] - fn recv_from_outside_runtime() { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - for _ in 0..40 { - assert_eq!(rx.recv().unwrap(), 1); - } - }); - for _ in 0..40 { - tx.send(1).unwrap(); - } - t.join().unwrap(); - } - - #[test] - fn no_runtime() { - let (tx1, rx1) = channel::(); - let (tx2, rx2) = channel::(); - let t1 = thread::spawn(move || { - assert_eq!(rx1.recv().unwrap(), 1); - tx2.send(2).unwrap(); - }); - let t2 = thread::spawn(move || { - tx1.send(1).unwrap(); - assert_eq!(rx2.recv().unwrap(), 2); - }); - t1.join().unwrap(); - t2.join().unwrap(); - } - - #[test] - fn oneshot_single_thread_close_port_first() { - // Simple test of closing without sending - let (_tx, rx) = channel::(); - drop(rx); - } - - #[test] - fn oneshot_single_thread_close_chan_first() { - // Simple test of closing without sending - let (tx, _rx) = channel::(); - drop(tx); - } - - #[test] - fn oneshot_single_thread_send_port_close() { - // Testing that the sender cleans up the payload if receiver is closed - let (tx, rx) = channel::>(); - drop(rx); - assert!(tx.send(Box::new(0)).is_err()); - } - - #[test] - fn oneshot_single_thread_recv_chan_close() { - let (tx, rx) = channel::(); - drop(tx); - assert_eq!(rx.recv(), Err(RecvError)); - } - - #[test] - fn oneshot_single_thread_send_then_recv() { - let (tx, rx) = channel::>(); - tx.send(Box::new(10)).unwrap(); - assert!(*rx.recv().unwrap() == 10); - } - - #[test] - fn oneshot_single_thread_try_send_open() { - let (tx, rx) = channel::(); - assert!(tx.send(10).is_ok()); - assert!(rx.recv().unwrap() == 10); - } - - #[test] - fn oneshot_single_thread_try_send_closed() { - let (tx, rx) = channel::(); - drop(rx); - assert!(tx.send(10).is_err()); - } - - #[test] - fn oneshot_single_thread_try_recv_open() { - let (tx, rx) = channel::(); - tx.send(10).unwrap(); - assert!(rx.recv() == Ok(10)); - } - - #[test] - fn oneshot_single_thread_try_recv_closed() { - let (tx, rx) = channel::(); - drop(tx); - assert!(rx.recv().is_err()); - } - - #[test] - fn oneshot_single_thread_peek_data() { - let (tx, rx) = channel::(); - assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); - tx.send(10).unwrap(); - assert_eq!(rx.try_recv(), Ok(10)); - } - - #[test] - fn oneshot_single_thread_peek_close() { - let (tx, rx) = channel::(); - drop(tx); - assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); - assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); - } - - #[test] - fn oneshot_single_thread_peek_open() { - let (_tx, rx) = channel::(); - assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[test] - fn oneshot_multi_task_recv_then_send() { - let (tx, rx) = channel::>(); - let t = thread::spawn(move || { - assert!(*rx.recv().unwrap() == 10); - }); - - tx.send(Box::new(10)).unwrap(); - t.join().unwrap(); - } - - #[test] - fn oneshot_multi_task_recv_then_close() { - let (tx, rx) = channel::>(); - let t = thread::spawn(move || { - drop(tx); - }); - thread::spawn(move || { - assert_eq!(rx.recv(), Err(RecvError)); - }) - .join() - .unwrap(); - t.join().unwrap(); - } - - #[test] - fn oneshot_multi_thread_close_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - drop(rx); - }); - ts.push(t); - drop(tx); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_multi_thread_send_close_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(2 * stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - drop(rx); - }); - ts.push(t); - thread::spawn(move || { - let _ = tx.send(1); - }) - .join() - .unwrap(); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_multi_thread_recv_close_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(2 * stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = channel::(); - let t = thread::spawn(move || { - thread::spawn(move || { - assert_eq!(rx.recv(), Err(RecvError)); - }) - .join() - .unwrap(); - }); - ts.push(t); - let t2 = thread::spawn(move || { - let t = thread::spawn(move || { - drop(tx); - }); - t.join().unwrap(); - }); - ts.push(t2); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_multi_thread_send_recv_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = channel::>(); - let t = thread::spawn(move || { - tx.send(Box::new(10)).unwrap(); - }); - ts.push(t); - assert!(*rx.recv().unwrap() == 10); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn stream_send_recv_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(2 * stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = channel(); - - if let Some(t) = send(tx, 0) { - ts.push(t); - } - if let Some(t2) = recv(rx, 0) { - ts.push(t2); - } - - fn send(tx: Sender>, i: i32) -> Option> { - if i == 10 { - return None; - } - - Some(thread::spawn(move || { - tx.send(Box::new(i)).unwrap(); - send(tx, i + 1); - })) - } - - fn recv(rx: Receiver>, i: i32) -> Option> { - if i == 10 { - return None; - } - - Some(thread::spawn(move || { - assert!(*rx.recv().unwrap() == i); - recv(rx, i + 1); - })) - } - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_single_thread_recv_timeout() { - let (tx, rx) = channel(); - tx.send(()).unwrap(); - assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(())); - assert_eq!( - rx.recv_timeout(Duration::from_millis(1)), - Err(RecvTimeoutError::Timeout) - ); - tx.send(()).unwrap(); - assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(())); - } - - #[test] - fn stress_recv_timeout_two_threads() { - let (tx, rx) = channel(); - let stress = stress_factor() + 100; - let timeout = Duration::from_millis(100); - - let t = thread::spawn(move || { - for i in 0..stress { - if i % 2 == 0 { - thread::sleep(timeout * 2); - } - tx.send(1usize).unwrap(); - } - }); - - let mut recv_count = 0; - loop { - match rx.recv_timeout(timeout) { - Ok(n) => { - assert_eq!(n, 1usize); - recv_count += 1; - } - Err(RecvTimeoutError::Timeout) => continue, - Err(RecvTimeoutError::Disconnected) => break, - } - } - - assert_eq!(recv_count, stress); - t.join().unwrap() - } - - #[test] - fn recv_timeout_upgrade() { - let (tx, rx) = channel::<()>(); - let timeout = Duration::from_millis(1); - let _tx_clone = tx.clone(); - - let start = Instant::now(); - assert_eq!(rx.recv_timeout(timeout), Err(RecvTimeoutError::Timeout)); - assert!(Instant::now() >= start + timeout); - } - - #[test] - fn stress_recv_timeout_shared() { - let (tx, rx) = channel(); - let stress = stress_factor() + 100; - - let mut ts = Vec::with_capacity(stress); - for i in 0..stress { - let tx = tx.clone(); - let t = thread::spawn(move || { - thread::sleep(Duration::from_millis(i as u64 * 10)); - tx.send(1usize).unwrap(); - }); - ts.push(t); - } - - drop(tx); - - let mut recv_count = 0; - loop { - match rx.recv_timeout(Duration::from_millis(10)) { - Ok(n) => { - assert_eq!(n, 1usize); - recv_count += 1; - } - Err(RecvTimeoutError::Timeout) => continue, - Err(RecvTimeoutError::Disconnected) => break, - } - } - - assert_eq!(recv_count, stress); - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn recv_a_lot() { - #[cfg(miri)] - const N: usize = 50; - #[cfg(not(miri))] - const N: usize = 10000; - - // Regression test that we don't run out of stack in scheduler context - let (tx, rx) = channel(); - for _ in 0..N { - tx.send(()).unwrap(); - } - for _ in 0..N { - rx.recv().unwrap(); - } - } - - #[test] - fn shared_recv_timeout() { - let (tx, rx) = channel(); - let total = 5; - let mut ts = Vec::with_capacity(total); - for _ in 0..total { - let tx = tx.clone(); - let t = thread::spawn(move || { - tx.send(()).unwrap(); - }); - ts.push(t); - } - - for _ in 0..total { - rx.recv().unwrap(); - } - - assert_eq!( - rx.recv_timeout(Duration::from_millis(1)), - Err(RecvTimeoutError::Timeout) - ); - tx.send(()).unwrap(); - assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(())); - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn shared_chan_stress() { - let (tx, rx) = channel(); - let total = stress_factor() + 100; - let mut ts = Vec::with_capacity(total); - for _ in 0..total { - let tx = tx.clone(); - let t = thread::spawn(move || { - tx.send(()).unwrap(); - }); - ts.push(t); - } - - for _ in 0..total { - rx.recv().unwrap(); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn test_nested_recv_iter() { - let (tx, rx) = channel::(); - let (total_tx, total_rx) = channel::(); - - let t = thread::spawn(move || { - let mut acc = 0; - for x in rx.iter() { - acc += x; - } - total_tx.send(acc).unwrap(); - }); - - tx.send(3).unwrap(); - tx.send(1).unwrap(); - tx.send(2).unwrap(); - drop(tx); - assert_eq!(total_rx.recv().unwrap(), 6); - t.join().unwrap(); - } - - #[test] - fn test_recv_iter_break() { - let (tx, rx) = channel::(); - let (count_tx, count_rx) = channel(); - - let t = thread::spawn(move || { - let mut count = 0; - for x in rx.iter() { - if count >= 3 { - break; - } else { - count += x; - } - } - count_tx.send(count).unwrap(); - }); - - tx.send(2).unwrap(); - tx.send(2).unwrap(); - tx.send(2).unwrap(); - let _ = tx.send(2); - drop(tx); - assert_eq!(count_rx.recv().unwrap(), 4); - t.join().unwrap(); - } - - #[test] - fn test_recv_try_iter() { - let (request_tx, request_rx) = channel(); - let (response_tx, response_rx) = channel(); - - // Request `x`s until we have `6`. - let t = thread::spawn(move || { - let mut count = 0; - loop { - for x in response_rx.try_iter() { - count += x; - if count == 6 { - return count; - } - } - request_tx.send(()).unwrap(); - } - }); - - for _ in request_rx.iter() { - if response_tx.send(2).is_err() { - break; - } - } - - assert_eq!(t.join().unwrap(), 6); - } - - #[test] - fn test_recv_into_iter_owned() { - let mut iter = { - let (tx, rx) = channel::(); - tx.send(1).unwrap(); - tx.send(2).unwrap(); - - rx.into_iter() - }; - assert_eq!(iter.next().unwrap(), 1); - assert_eq!(iter.next().unwrap(), 2); - assert!(iter.next().is_none()); - } - - #[test] - fn test_recv_into_iter_borrowed() { - let (tx, rx) = channel::(); - tx.send(1).unwrap(); - tx.send(2).unwrap(); - drop(tx); - let mut iter = (&rx).into_iter(); - assert_eq!(iter.next().unwrap(), 1); - assert_eq!(iter.next().unwrap(), 2); - assert!(iter.next().is_none()); - } - - #[test] - fn try_recv_states() { - let (tx1, rx1) = channel::(); - let (tx2, rx2) = channel::<()>(); - let (tx3, rx3) = channel::<()>(); - let t = thread::spawn(move || { - rx2.recv().unwrap(); - tx1.send(1).unwrap(); - tx3.send(()).unwrap(); - rx2.recv().unwrap(); - drop(tx1); - tx3.send(()).unwrap(); - }); - - assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); - tx2.send(()).unwrap(); - rx3.recv().unwrap(); - assert_eq!(rx1.try_recv(), Ok(1)); - assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); - tx2.send(()).unwrap(); - rx3.recv().unwrap(); - assert_eq!(rx1.try_recv(), Err(TryRecvError::Disconnected)); - t.join().unwrap(); - } - - // This bug used to end up in a livelock inside of the Receiver destructor - // because the internal state of the Shared packet was corrupted - #[test] - fn destroy_upgraded_shared_port_when_sender_still_active() { - let (tx, rx) = channel(); - let (tx2, rx2) = channel(); - let t = thread::spawn(move || { - rx.recv().unwrap(); // wait on a oneshot - drop(rx); // destroy a shared - tx2.send(()).unwrap(); - }); - // make sure the other thread has gone to sleep - for _ in 0..5000 { - thread::yield_now(); - } - - // upgrade to a shared chan and send a message - let tx2 = tx.clone(); - drop(tx); - tx2.send(()).unwrap(); - - // wait for the child thread to exit before we exit - rx2.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn issue_32114() { - let (tx, _) = channel(); - let _ = tx.send(123); - assert_eq!(tx.send(123), Err(SendError(123))); - } -} - -// Source: https://github.com/rust-lang/rust/blob/master/src/libstd/sync/mpsc/mod.rs -mod sync_channel_tests { - use super::*; - - use std::env; - use std::thread; - - fn stress_factor() -> usize { - match env::var("RUST_TEST_STRESS") { - Ok(val) => val.parse().unwrap(), - Err(..) => 1, - } - } - - #[test] - fn smoke() { - let (tx, rx) = sync_channel::(1); - tx.send(1).unwrap(); - assert_eq!(rx.recv().unwrap(), 1); - } - - #[test] - fn drop_full() { - let (tx, _rx) = sync_channel::>(1); - tx.send(Box::new(1)).unwrap(); - } - - #[test] - fn smoke_shared() { - let (tx, rx) = sync_channel::(1); - tx.send(1).unwrap(); - assert_eq!(rx.recv().unwrap(), 1); - let tx = tx.clone(); - tx.send(1).unwrap(); - assert_eq!(rx.recv().unwrap(), 1); - } - - #[test] - fn recv_timeout() { - let (tx, rx) = sync_channel::(1); - assert_eq!( - rx.recv_timeout(Duration::from_millis(1)), - Err(RecvTimeoutError::Timeout) - ); - tx.send(1).unwrap(); - assert_eq!(rx.recv_timeout(Duration::from_millis(1)), Ok(1)); - } - - #[test] - fn smoke_threads() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - tx.send(1).unwrap(); - }); - assert_eq!(rx.recv().unwrap(), 1); - t.join().unwrap(); - } - - #[test] - fn smoke_port_gone() { - let (tx, rx) = sync_channel::(0); - drop(rx); - assert!(tx.send(1).is_err()); - } - - #[test] - fn smoke_shared_port_gone2() { - let (tx, rx) = sync_channel::(0); - drop(rx); - let tx2 = tx.clone(); - drop(tx); - assert!(tx2.send(1).is_err()); - } - - #[test] - fn port_gone_concurrent() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - rx.recv().unwrap(); - }); - while tx.send(1).is_ok() {} - t.join().unwrap(); - } - - #[test] - fn port_gone_concurrent_shared() { - let (tx, rx) = sync_channel::(0); - let tx2 = tx.clone(); - let t = thread::spawn(move || { - rx.recv().unwrap(); - }); - while tx.send(1).is_ok() && tx2.send(1).is_ok() {} - t.join().unwrap(); - } - - #[test] - fn smoke_chan_gone() { - let (tx, rx) = sync_channel::(0); - drop(tx); - assert!(rx.recv().is_err()); - } - - #[test] - fn smoke_chan_gone_shared() { - let (tx, rx) = sync_channel::<()>(0); - let tx2 = tx.clone(); - drop(tx); - drop(tx2); - assert!(rx.recv().is_err()); - } - - #[test] - fn chan_gone_concurrent() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - tx.send(1).unwrap(); - tx.send(1).unwrap(); - }); - while rx.recv().is_ok() {} - t.join().unwrap(); - } - - #[test] - fn stress() { - #[cfg(miri)] - const N: usize = 100; - #[cfg(not(miri))] - const N: usize = 10000; - - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - for _ in 0..N { - tx.send(1).unwrap(); - } - }); - for _ in 0..N { - assert_eq!(rx.recv().unwrap(), 1); - } - t.join().unwrap(); - } - - #[test] - fn stress_recv_timeout_two_threads() { - #[cfg(miri)] - const N: usize = 100; - #[cfg(not(miri))] - const N: usize = 10000; - - let (tx, rx) = sync_channel::(0); - - let t = thread::spawn(move || { - for _ in 0..N { - tx.send(1).unwrap(); - } - }); - - let mut recv_count = 0; - loop { - match rx.recv_timeout(Duration::from_millis(1)) { - Ok(v) => { - assert_eq!(v, 1); - recv_count += 1; - } - Err(RecvTimeoutError::Timeout) => continue, - Err(RecvTimeoutError::Disconnected) => break, - } - } - - assert_eq!(recv_count, N); - t.join().unwrap(); - } - - #[test] - fn stress_recv_timeout_shared() { - #[cfg(miri)] - const AMT: u32 = 100; - #[cfg(not(miri))] - const AMT: u32 = 1000; - const NTHREADS: u32 = 8; - let (tx, rx) = sync_channel::(0); - let (dtx, drx) = sync_channel::<()>(0); - - let t = thread::spawn(move || { - let mut recv_count = 0; - loop { - match rx.recv_timeout(Duration::from_millis(10)) { - Ok(v) => { - assert_eq!(v, 1); - recv_count += 1; - } - Err(RecvTimeoutError::Timeout) => continue, - Err(RecvTimeoutError::Disconnected) => break, - } - } - - assert_eq!(recv_count, AMT * NTHREADS); - assert!(rx.try_recv().is_err()); - - dtx.send(()).unwrap(); - }); - - let mut ts = Vec::with_capacity(NTHREADS as usize); - for _ in 0..NTHREADS { - let tx = tx.clone(); - let t = thread::spawn(move || { - for _ in 0..AMT { - tx.send(1).unwrap(); - } - }); - ts.push(t); - } - - drop(tx); - - drx.recv().unwrap(); - for t in ts { - t.join().unwrap(); - } - t.join().unwrap(); - } - - #[test] - fn stress_shared() { - #[cfg(miri)] - const AMT: u32 = 100; - #[cfg(not(miri))] - const AMT: u32 = 1000; - const NTHREADS: u32 = 8; - let (tx, rx) = sync_channel::(0); - let (dtx, drx) = sync_channel::<()>(0); - - let t = thread::spawn(move || { - for _ in 0..AMT * NTHREADS { - assert_eq!(rx.recv().unwrap(), 1); - } - assert!(rx.try_recv().is_err()); - dtx.send(()).unwrap(); - }); - - let mut ts = Vec::with_capacity(NTHREADS as usize); - for _ in 0..NTHREADS { - let tx = tx.clone(); - let t = thread::spawn(move || { - for _ in 0..AMT { - tx.send(1).unwrap(); - } - }); - ts.push(t); - } - drop(tx); - drx.recv().unwrap(); - for t in ts { - t.join().unwrap(); - } - t.join().unwrap(); - } - - #[test] - fn oneshot_single_thread_close_port_first() { - // Simple test of closing without sending - let (_tx, rx) = sync_channel::(0); - drop(rx); - } - - #[test] - fn oneshot_single_thread_close_chan_first() { - // Simple test of closing without sending - let (tx, _rx) = sync_channel::(0); - drop(tx); - } - - #[test] - fn oneshot_single_thread_send_port_close() { - // Testing that the sender cleans up the payload if receiver is closed - let (tx, rx) = sync_channel::>(0); - drop(rx); - assert!(tx.send(Box::new(0)).is_err()); - } - - #[test] - fn oneshot_single_thread_recv_chan_close() { - let (tx, rx) = sync_channel::(0); - drop(tx); - assert_eq!(rx.recv(), Err(RecvError)); - } - - #[test] - fn oneshot_single_thread_send_then_recv() { - let (tx, rx) = sync_channel::>(1); - tx.send(Box::new(10)).unwrap(); - assert!(*rx.recv().unwrap() == 10); - } - - #[test] - fn oneshot_single_thread_try_send_open() { - let (tx, rx) = sync_channel::(1); - assert_eq!(tx.try_send(10), Ok(())); - assert!(rx.recv().unwrap() == 10); - } - - #[test] - fn oneshot_single_thread_try_send_closed() { - let (tx, rx) = sync_channel::(0); - drop(rx); - assert_eq!(tx.try_send(10), Err(TrySendError::Disconnected(10))); - } - - #[test] - fn oneshot_single_thread_try_send_closed2() { - let (tx, _rx) = sync_channel::(0); - assert_eq!(tx.try_send(10), Err(TrySendError::Full(10))); - } - - #[test] - fn oneshot_single_thread_try_recv_open() { - let (tx, rx) = sync_channel::(1); - tx.send(10).unwrap(); - assert!(rx.recv() == Ok(10)); - } - - #[test] - fn oneshot_single_thread_try_recv_closed() { - let (tx, rx) = sync_channel::(0); - drop(tx); - assert!(rx.recv().is_err()); - } - - #[test] - fn oneshot_single_thread_try_recv_closed_with_data() { - let (tx, rx) = sync_channel::(1); - tx.send(10).unwrap(); - drop(tx); - assert_eq!(rx.try_recv(), Ok(10)); - assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); - } - - #[test] - fn oneshot_single_thread_peek_data() { - let (tx, rx) = sync_channel::(1); - assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); - tx.send(10).unwrap(); - assert_eq!(rx.try_recv(), Ok(10)); - } - - #[test] - fn oneshot_single_thread_peek_close() { - let (tx, rx) = sync_channel::(0); - drop(tx); - assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); - assert_eq!(rx.try_recv(), Err(TryRecvError::Disconnected)); - } - - #[test] - fn oneshot_single_thread_peek_open() { - let (_tx, rx) = sync_channel::(0); - assert_eq!(rx.try_recv(), Err(TryRecvError::Empty)); - } - - #[test] - fn oneshot_multi_task_recv_then_send() { - let (tx, rx) = sync_channel::>(0); - let t = thread::spawn(move || { - assert!(*rx.recv().unwrap() == 10); - }); - - tx.send(Box::new(10)).unwrap(); - t.join().unwrap(); - } - - #[test] - fn oneshot_multi_task_recv_then_close() { - let (tx, rx) = sync_channel::>(0); - let t = thread::spawn(move || { - drop(tx); - }); - thread::spawn(move || { - assert_eq!(rx.recv(), Err(RecvError)); - }) - .join() - .unwrap(); - t.join().unwrap(); - } - - #[test] - fn oneshot_multi_thread_close_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - drop(rx); - }); - ts.push(t); - drop(tx); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_multi_thread_send_close_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - drop(rx); - }); - ts.push(t); - thread::spawn(move || { - let _ = tx.send(1); - }) - .join() - .unwrap(); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_multi_thread_recv_close_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(2 * stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - thread::spawn(move || { - assert_eq!(rx.recv(), Err(RecvError)); - }) - .join() - .unwrap(); - }); - ts.push(t); - let t2 = thread::spawn(move || { - thread::spawn(move || { - drop(tx); - }); - }); - ts.push(t2); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn oneshot_multi_thread_send_recv_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = sync_channel::>(0); - let t = thread::spawn(move || { - tx.send(Box::new(10)).unwrap(); - }); - ts.push(t); - assert!(*rx.recv().unwrap() == 10); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn stream_send_recv_stress() { - let stress_factor = stress_factor(); - let mut ts = Vec::with_capacity(2 * stress_factor); - for _ in 0..stress_factor { - let (tx, rx) = sync_channel::>(0); - - if let Some(t) = send(tx, 0) { - ts.push(t); - } - if let Some(t) = recv(rx, 0) { - ts.push(t); - } - - fn send(tx: SyncSender>, i: i32) -> Option> { - if i == 10 { - return None; - } - - Some(thread::spawn(move || { - tx.send(Box::new(i)).unwrap(); - send(tx, i + 1); - })) - } - - fn recv(rx: Receiver>, i: i32) -> Option> { - if i == 10 { - return None; - } - - Some(thread::spawn(move || { - assert!(*rx.recv().unwrap() == i); - recv(rx, i + 1); - })) - } - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn recv_a_lot() { - #[cfg(miri)] - const N: usize = 100; - #[cfg(not(miri))] - const N: usize = 10000; - - // Regression test that we don't run out of stack in scheduler context - let (tx, rx) = sync_channel(N); - for _ in 0..N { - tx.send(()).unwrap(); - } - for _ in 0..N { - rx.recv().unwrap(); - } - } - - #[test] - fn shared_chan_stress() { - let (tx, rx) = sync_channel(0); - let total = stress_factor() + 100; - let mut ts = Vec::with_capacity(total); - for _ in 0..total { - let tx = tx.clone(); - let t = thread::spawn(move || { - tx.send(()).unwrap(); - }); - ts.push(t); - } - - for _ in 0..total { - rx.recv().unwrap(); - } - for t in ts { - t.join().unwrap(); - } - } - - #[test] - fn test_nested_recv_iter() { - let (tx, rx) = sync_channel::(0); - let (total_tx, total_rx) = sync_channel::(0); - - let t = thread::spawn(move || { - let mut acc = 0; - for x in rx.iter() { - acc += x; - } - total_tx.send(acc).unwrap(); - }); - - tx.send(3).unwrap(); - tx.send(1).unwrap(); - tx.send(2).unwrap(); - drop(tx); - assert_eq!(total_rx.recv().unwrap(), 6); - t.join().unwrap(); - } - - #[test] - fn test_recv_iter_break() { - let (tx, rx) = sync_channel::(0); - let (count_tx, count_rx) = sync_channel(0); - - let t = thread::spawn(move || { - let mut count = 0; - for x in rx.iter() { - if count >= 3 { - break; - } else { - count += x; - } - } - count_tx.send(count).unwrap(); - }); - - tx.send(2).unwrap(); - tx.send(2).unwrap(); - tx.send(2).unwrap(); - let _ = tx.try_send(2); - drop(tx); - assert_eq!(count_rx.recv().unwrap(), 4); - t.join().unwrap(); - } - - #[test] - fn try_recv_states() { - let (tx1, rx1) = sync_channel::(1); - let (tx2, rx2) = sync_channel::<()>(1); - let (tx3, rx3) = sync_channel::<()>(1); - let t = thread::spawn(move || { - rx2.recv().unwrap(); - tx1.send(1).unwrap(); - tx3.send(()).unwrap(); - rx2.recv().unwrap(); - drop(tx1); - tx3.send(()).unwrap(); - }); - - assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); - tx2.send(()).unwrap(); - rx3.recv().unwrap(); - assert_eq!(rx1.try_recv(), Ok(1)); - assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); - tx2.send(()).unwrap(); - rx3.recv().unwrap(); - assert_eq!(rx1.try_recv(), Err(TryRecvError::Disconnected)); - t.join().unwrap(); - } - - // This bug used to end up in a livelock inside of the Receiver destructor - // because the internal state of the Shared packet was corrupted - #[test] - fn destroy_upgraded_shared_port_when_sender_still_active() { - let (tx, rx) = sync_channel::<()>(0); - let (tx2, rx2) = sync_channel::<()>(0); - let t = thread::spawn(move || { - rx.recv().unwrap(); // wait on a oneshot - drop(rx); // destroy a shared - tx2.send(()).unwrap(); - }); - // make sure the other thread has gone to sleep - for _ in 0..5000 { - thread::yield_now(); - } - - // upgrade to a shared chan and send a message - let tx2 = tx.clone(); - drop(tx); - tx2.send(()).unwrap(); - - // wait for the child thread to exit before we exit - rx2.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn send1() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - rx.recv().unwrap(); - }); - assert_eq!(tx.send(1), Ok(())); - t.join().unwrap(); - } - - #[test] - fn send2() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - drop(rx); - }); - assert!(tx.send(1).is_err()); - t.join().unwrap(); - } - - #[test] - fn send3() { - let (tx, rx) = sync_channel::(1); - assert_eq!(tx.send(1), Ok(())); - let t = thread::spawn(move || { - drop(rx); - }); - assert!(tx.send(1).is_err()); - t.join().unwrap(); - } - - #[test] - fn send4() { - let (tx, rx) = sync_channel::(0); - let tx2 = tx.clone(); - let (done, donerx) = channel(); - let done2 = done.clone(); - let t = thread::spawn(move || { - assert!(tx.send(1).is_err()); - done.send(()).unwrap(); - }); - let t2 = thread::spawn(move || { - assert!(tx2.send(2).is_err()); - done2.send(()).unwrap(); - }); - drop(rx); - donerx.recv().unwrap(); - donerx.recv().unwrap(); - t.join().unwrap(); - t2.join().unwrap(); - } - - #[test] - fn try_send1() { - let (tx, _rx) = sync_channel::(0); - assert_eq!(tx.try_send(1), Err(TrySendError::Full(1))); - } - - #[test] - fn try_send2() { - let (tx, _rx) = sync_channel::(1); - assert_eq!(tx.try_send(1), Ok(())); - assert_eq!(tx.try_send(1), Err(TrySendError::Full(1))); - } - - #[test] - fn try_send3() { - let (tx, rx) = sync_channel::(1); - assert_eq!(tx.try_send(1), Ok(())); - drop(rx); - assert_eq!(tx.try_send(1), Err(TrySendError::Disconnected(1))); - } - - #[test] - fn issue_15761() { - fn repro() { - let (tx1, rx1) = sync_channel::<()>(3); - let (tx2, rx2) = sync_channel::<()>(3); - - let _t = thread::spawn(move || { - rx1.recv().unwrap(); - tx2.try_send(()).unwrap(); - }); - - tx1.try_send(()).unwrap(); - rx2.recv().unwrap(); - } - - for _ in 0..100 { - repro() - } - } -} - -// Source: https://github.com/rust-lang/rust/blob/master/src/libstd/sync/mpsc/select.rs -mod select_tests { - use super::*; - - use std::thread; - - #[test] - fn smoke() { - let (tx1, rx1) = channel::(); - let (tx2, rx2) = channel::(); - tx1.send(1).unwrap(); - select! { - foo = rx1.recv() => assert_eq!(foo.unwrap(), 1), - _bar = rx2.recv() => panic!() - } - tx2.send(2).unwrap(); - select! { - _foo = rx1.recv() => panic!(), - bar = rx2.recv() => assert_eq!(bar.unwrap(), 2) - } - drop(tx1); - select! { - foo = rx1.recv() => assert!(foo.is_err()), - _bar = rx2.recv() => panic!() - } - drop(tx2); - select! { - bar = rx2.recv() => assert!(bar.is_err()) - } - } - - #[test] - fn smoke2() { - let (_tx1, rx1) = channel::(); - let (_tx2, rx2) = channel::(); - let (_tx3, rx3) = channel::(); - let (_tx4, rx4) = channel::(); - let (tx5, rx5) = channel::(); - tx5.send(4).unwrap(); - select! { - _foo = rx1.recv() => panic!("1"), - _foo = rx2.recv() => panic!("2"), - _foo = rx3.recv() => panic!("3"), - _foo = rx4.recv() => panic!("4"), - foo = rx5.recv() => assert_eq!(foo.unwrap(), 4) - } - } - - #[test] - fn closed() { - let (_tx1, rx1) = channel::(); - let (tx2, rx2) = channel::(); - drop(tx2); - - select! { - _a1 = rx1.recv() => panic!(), - a2 = rx2.recv() => assert!(a2.is_err()) - } - } - - #[test] - fn unblocks() { - let (tx1, rx1) = channel::(); - let (_tx2, rx2) = channel::(); - let (tx3, rx3) = channel::(); - - let t = thread::spawn(move || { - for _ in 0..20 { - thread::yield_now(); - } - tx1.send(1).unwrap(); - rx3.recv().unwrap(); - for _ in 0..20 { - thread::yield_now(); - } - }); - - select! { - a = rx1.recv() => assert_eq!(a.unwrap(), 1), - _b = rx2.recv() => panic!() - } - tx3.send(1).unwrap(); - select! { - a = rx1.recv() => assert!(a.is_err()), - _b = rx2.recv() => panic!() - } - t.join().unwrap(); - } - - #[test] - fn both_ready() { - let (tx1, rx1) = channel::(); - let (tx2, rx2) = channel::(); - let (tx3, rx3) = channel::<()>(); - - let t = thread::spawn(move || { - for _ in 0..20 { - thread::yield_now(); - } - tx1.send(1).unwrap(); - tx2.send(2).unwrap(); - rx3.recv().unwrap(); - }); - - select! { - a = rx1.recv() => { assert_eq!(a.unwrap(), 1); }, - a = rx2.recv() => { assert_eq!(a.unwrap(), 2); } - } - select! { - a = rx1.recv() => { assert_eq!(a.unwrap(), 1); }, - a = rx2.recv() => { assert_eq!(a.unwrap(), 2); } - } - assert_eq!(rx1.try_recv(), Err(TryRecvError::Empty)); - assert_eq!(rx2.try_recv(), Err(TryRecvError::Empty)); - tx3.send(()).unwrap(); - t.join().unwrap(); - } - - #[test] - fn stress() { - #[cfg(miri)] - const AMT: i32 = 100; - #[cfg(not(miri))] - const AMT: i32 = 10000; - - let (tx1, rx1) = channel::(); - let (tx2, rx2) = channel::(); - let (tx3, rx3) = channel::<()>(); - - let t = thread::spawn(move || { - for i in 0..AMT { - if i % 2 == 0 { - tx1.send(i).unwrap(); - } else { - tx2.send(i).unwrap(); - } - rx3.recv().unwrap(); - } - }); - - for i in 0..AMT { - select! { - i1 = rx1.recv() => { assert!(i % 2 == 0 && i == i1.unwrap()); }, - i2 = rx2.recv() => { assert!(i % 2 == 1 && i == i2.unwrap()); } - } - tx3.send(()).unwrap(); - } - t.join().unwrap(); - } - - #[allow(unused_must_use)] - #[test] - fn cloning() { - let (tx1, rx1) = channel::(); - let (_tx2, rx2) = channel::(); - let (tx3, rx3) = channel::<()>(); - - let t = thread::spawn(move || { - rx3.recv().unwrap(); - tx1.clone(); - assert_eq!(rx3.try_recv(), Err(TryRecvError::Empty)); - tx1.send(2).unwrap(); - rx3.recv().unwrap(); - }); - - tx3.send(()).unwrap(); - select! { - _i1 = rx1.recv() => {}, - _i2 = rx2.recv() => panic!() - } - tx3.send(()).unwrap(); - t.join().unwrap(); - } - - #[allow(unused_must_use)] - #[test] - fn cloning2() { - let (tx1, rx1) = channel::(); - let (_tx2, rx2) = channel::(); - let (tx3, rx3) = channel::<()>(); - - let t = thread::spawn(move || { - rx3.recv().unwrap(); - tx1.clone(); - assert_eq!(rx3.try_recv(), Err(TryRecvError::Empty)); - tx1.send(2).unwrap(); - rx3.recv().unwrap(); - }); - - tx3.send(()).unwrap(); - select! { - _i1 = rx1.recv() => {}, - _i2 = rx2.recv() => panic!() - } - tx3.send(()).unwrap(); - t.join().unwrap(); - } - - #[test] - fn cloning3() { - let (tx1, rx1) = channel::<()>(); - let (tx2, rx2) = channel::<()>(); - let (tx3, rx3) = channel::<()>(); - let t = thread::spawn(move || { - select! { - _ = rx1.recv() => panic!(), - _ = rx2.recv() => {} - } - tx3.send(()).unwrap(); - }); - - for _ in 0..1000 { - thread::yield_now(); - } - drop(tx1.clone()); - tx2.send(()).unwrap(); - rx3.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn preflight1() { - let (tx, rx) = channel(); - tx.send(()).unwrap(); - select! { - _n = rx.recv() => {} - } - } - - #[test] - fn preflight2() { - let (tx, rx) = channel(); - tx.send(()).unwrap(); - tx.send(()).unwrap(); - select! { - _n = rx.recv() => {} - } - } - - #[test] - fn preflight3() { - let (tx, rx) = channel(); - drop(tx.clone()); - tx.send(()).unwrap(); - select! { - _n = rx.recv() => {} - } - } - - #[test] - fn preflight4() { - let (tx, rx) = channel(); - tx.send(()).unwrap(); - select! { - _ = rx.recv() => {} - } - } - - #[test] - fn preflight5() { - let (tx, rx) = channel(); - tx.send(()).unwrap(); - tx.send(()).unwrap(); - select! { - _ = rx.recv() => {} - } - } - - #[test] - fn preflight6() { - let (tx, rx) = channel(); - drop(tx.clone()); - tx.send(()).unwrap(); - select! { - _ = rx.recv() => {} - } - } - - #[test] - fn preflight7() { - let (tx, rx) = channel::<()>(); - drop(tx); - select! { - _ = rx.recv() => {} - } - } - - #[test] - fn preflight8() { - let (tx, rx) = channel(); - tx.send(()).unwrap(); - drop(tx); - rx.recv().unwrap(); - select! { - _ = rx.recv() => {} - } - } - - #[test] - fn preflight9() { - let (tx, rx) = channel(); - drop(tx.clone()); - tx.send(()).unwrap(); - drop(tx); - rx.recv().unwrap(); - select! { - _ = rx.recv() => {} - } - } - - #[test] - fn oneshot_data_waiting() { - let (tx1, rx1) = channel(); - let (tx2, rx2) = channel(); - let t = thread::spawn(move || { - select! { - _n = rx1.recv() => {} - } - tx2.send(()).unwrap(); - }); - - for _ in 0..100 { - thread::yield_now() - } - tx1.send(()).unwrap(); - rx2.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn stream_data_waiting() { - let (tx1, rx1) = channel(); - let (tx2, rx2) = channel(); - tx1.send(()).unwrap(); - tx1.send(()).unwrap(); - rx1.recv().unwrap(); - rx1.recv().unwrap(); - let t = thread::spawn(move || { - select! { - _n = rx1.recv() => {} - } - tx2.send(()).unwrap(); - }); - - for _ in 0..100 { - thread::yield_now() - } - tx1.send(()).unwrap(); - rx2.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn shared_data_waiting() { - let (tx1, rx1) = channel(); - let (tx2, rx2) = channel(); - drop(tx1.clone()); - tx1.send(()).unwrap(); - rx1.recv().unwrap(); - let t = thread::spawn(move || { - select! { - _n = rx1.recv() => {} - } - tx2.send(()).unwrap(); - }); - - for _ in 0..100 { - thread::yield_now() - } - tx1.send(()).unwrap(); - rx2.recv().unwrap(); - t.join().unwrap(); - } - - #[test] - fn sync1() { - let (tx, rx) = sync_channel::(1); - tx.send(1).unwrap(); - select! { - n = rx.recv() => { assert_eq!(n.unwrap(), 1); } - } - } - - #[test] - fn sync2() { - let (tx, rx) = sync_channel::(0); - let t = thread::spawn(move || { - for _ in 0..100 { - thread::yield_now() - } - tx.send(1).unwrap(); - }); - select! { - n = rx.recv() => { assert_eq!(n.unwrap(), 1); } - } - t.join().unwrap(); - } - - #[test] - fn sync3() { - let (tx1, rx1) = sync_channel::(0); - let (tx2, rx2): (Sender, Receiver) = channel(); - let t = thread::spawn(move || { - tx1.send(1).unwrap(); - }); - let t2 = thread::spawn(move || { - tx2.send(2).unwrap(); - }); - select! { - n = rx1.recv() => { - let n = n.unwrap(); - assert_eq!(n, 1); - assert_eq!(rx2.recv().unwrap(), 2); - }, - n = rx2.recv() => { - let n = n.unwrap(); - assert_eq!(n, 2); - assert_eq!(rx1.recv().unwrap(), 1); - } - } - t.join().unwrap(); - t2.join().unwrap(); - } -} diff --git a/crossbeam-channel/tests/never.rs b/crossbeam-channel/tests/never.rs deleted file mode 100644 index f275126f7..000000000 --- a/crossbeam-channel/tests/never.rs +++ /dev/null @@ -1,95 +0,0 @@ -//! Tests for the never channel flavor. - -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{never, select, tick, unbounded}; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke() { - select! { - recv(never::()) -> _ => panic!(), - default => {} - } -} - -#[test] -fn optional() { - let (s, r) = unbounded::(); - s.send(1).unwrap(); - s.send(2).unwrap(); - - let mut r = Some(&r); - select! { - recv(r.unwrap_or(&never())) -> _ => {} - default => panic!(), - } - - r = None; - select! { - recv(r.unwrap_or(&never())) -> _ => panic!(), - default => {} - } -} - -#[test] -fn tick_n() { - let mut r = tick(ms(100)); - let mut step = 0; - - loop { - select! { - recv(r) -> _ => step += 1, - default(ms(500)) => break, - } - - if step == 10 { - r = never(); - } - } - - assert_eq!(step, 10); -} - -#[test] -fn capacity() { - let r = never::(); - assert_eq!(r.capacity(), Some(0)); -} - -#[test] -fn len_empty_full() { - let r = never::(); - assert_eq!(r.len(), 0); - assert!(r.is_empty()); - assert!(r.is_full()); -} - -#[test] -fn try_recv() { - let r = never::(); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(100)); - assert!(r.try_recv().is_err()); -} - -#[test] -fn recv_timeout() { - let start = Instant::now(); - let r = never::(); - - assert!(r.recv_timeout(ms(100)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(100)); - assert!(now - start <= ms(150)); - - assert!(r.recv_timeout(ms(100)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(200)); - assert!(now - start <= ms(250)); -} diff --git a/crossbeam-channel/tests/ready.rs b/crossbeam-channel/tests/ready.rs deleted file mode 100644 index ca84f869c..000000000 --- a/crossbeam-channel/tests/ready.rs +++ /dev/null @@ -1,851 +0,0 @@ -//! Tests for channel readiness using the `Select` struct. - -use std::any::Any; -use std::cell::Cell; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{after, bounded, tick, unbounded}; -use crossbeam_channel::{Receiver, Select, TryRecvError, TrySendError}; -use crossbeam_utils::thread::scope; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke1() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - s1.send(1).unwrap(); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - assert_eq!(sel.ready(), 0); - assert_eq!(r1.try_recv(), Ok(1)); - - s2.send(2).unwrap(); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - assert_eq!(sel.ready(), 1); - assert_eq!(r2.try_recv(), Ok(2)); -} - -#[test] -fn smoke2() { - let (_s1, r1) = unbounded::(); - let (_s2, r2) = unbounded::(); - let (_s3, r3) = unbounded::(); - let (_s4, r4) = unbounded::(); - let (s5, r5) = unbounded::(); - - s5.send(5).unwrap(); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - sel.recv(&r3); - sel.recv(&r4); - sel.recv(&r5); - assert_eq!(sel.ready(), 4); - assert_eq!(r5.try_recv(), Ok(5)); -} - -#[test] -fn disconnected() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - drop(s1); - thread::sleep(ms(500)); - s2.send(5).unwrap(); - }); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready_timeout(ms(1000)) { - Ok(0) => assert_eq!(r1.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } - - r2.recv().unwrap(); - }) - .unwrap(); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready_timeout(ms(1000)) { - Ok(0) => assert_eq!(r1.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - drop(s2); - }); - - let mut sel = Select::new(); - sel.recv(&r2); - match sel.ready_timeout(ms(1000)) { - Ok(0) => assert_eq!(r2.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } - }) - .unwrap(); -} - -#[test] -fn default() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - assert!(sel.try_ready().is_err()); - - drop(s1); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.try_ready() { - Ok(0) => assert!(r1.try_recv().is_err()), - _ => panic!(), - } - - s2.send(2).unwrap(); - - let mut sel = Select::new(); - sel.recv(&r2); - match sel.try_ready() { - Ok(0) => assert_eq!(r2.try_recv(), Ok(2)), - _ => panic!(), - } - - let mut sel = Select::new(); - sel.recv(&r2); - assert!(sel.try_ready().is_err()); - - let mut sel = Select::new(); - assert!(sel.try_ready().is_err()); -} - -#[test] -fn timeout() { - let (_s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(1500)); - s2.send(2).unwrap(); - }); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - assert!(sel.ready_timeout(ms(1000)).is_err()); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready_timeout(ms(1000)) { - Ok(1) => assert_eq!(r2.try_recv(), Ok(2)), - _ => panic!(), - } - }) - .unwrap(); - - scope(|scope| { - let (s, r) = unbounded::(); - - scope.spawn(move |_| { - thread::sleep(ms(500)); - drop(s); - }); - - let mut sel = Select::new(); - assert!(sel.ready_timeout(ms(1000)).is_err()); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.try_ready() { - Ok(0) => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } - }) - .unwrap(); -} - -#[test] -fn default_when_disconnected() { - let (_, r) = unbounded::(); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.try_ready() { - Ok(0) => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } - - let (_, r) = unbounded::(); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready_timeout(ms(1000)) { - Ok(0) => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } - - let (s, _) = bounded::(0); - - let mut sel = Select::new(); - sel.send(&s); - match sel.try_ready() { - Ok(0) => assert_eq!(s.try_send(0), Err(TrySendError::Disconnected(0))), - _ => panic!(), - } - - let (s, _) = bounded::(0); - - let mut sel = Select::new(); - sel.send(&s); - match sel.ready_timeout(ms(1000)) { - Ok(0) => assert_eq!(s.try_send(0), Err(TrySendError::Disconnected(0))), - _ => panic!(), - } -} - -#[test] -#[cfg_attr(miri, ignore)] // this test makes timing assumptions, but Miri is so slow it violates them -fn default_only() { - let start = Instant::now(); - - let mut sel = Select::new(); - assert!(sel.try_ready().is_err()); - let now = Instant::now(); - assert!(now - start <= ms(50)); - - let start = Instant::now(); - let mut sel = Select::new(); - assert!(sel.ready_timeout(ms(500)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(450)); - assert!(now - start <= ms(550)); -} - -#[test] -fn unblocks() { - let (s1, r1) = bounded::(0); - let (s2, r2) = bounded::(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - s2.send(2).unwrap(); - }); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready_timeout(ms(1000)) { - Ok(1) => assert_eq!(r2.try_recv(), Ok(2)), - _ => panic!(), - } - }) - .unwrap(); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - assert_eq!(r1.recv().unwrap(), 1); - }); - - let mut sel = Select::new(); - let oper1 = sel.send(&s1); - let oper2 = sel.send(&s2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => oper.send(&s1, 1).unwrap(), - i if i == oper2 => panic!(), - _ => unreachable!(), - }, - } - }) - .unwrap(); -} - -#[test] -fn both_ready() { - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - s1.send(1).unwrap(); - assert_eq!(r2.recv().unwrap(), 2); - }); - - for _ in 0..2 { - let mut sel = Select::new(); - sel.recv(&r1); - sel.send(&s2); - match sel.ready() { - 0 => assert_eq!(r1.try_recv(), Ok(1)), - 1 => s2.try_send(2).unwrap(), - _ => panic!(), - } - } - }) - .unwrap(); -} - -#[test] -fn cloning1() { - scope(|scope| { - let (s1, r1) = unbounded::(); - let (_s2, r2) = unbounded::(); - let (s3, r3) = unbounded::<()>(); - - scope.spawn(move |_| { - r3.recv().unwrap(); - drop(s1.clone()); - assert!(r3.try_recv().is_err()); - s1.send(1).unwrap(); - r3.recv().unwrap(); - }); - - s3.send(()).unwrap(); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready() { - 0 => drop(r1.try_recv()), - 1 => drop(r2.try_recv()), - _ => panic!(), - } - - s3.send(()).unwrap(); - }) - .unwrap(); -} - -#[test] -fn cloning2() { - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = unbounded::<()>(); - let (_s3, _r3) = unbounded::<()>(); - - scope(|scope| { - scope.spawn(move |_| { - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready() { - 0 => panic!(), - 1 => drop(r2.try_recv()), - _ => panic!(), - } - }); - - thread::sleep(ms(500)); - drop(s1.clone()); - s2.send(()).unwrap(); - }) - .unwrap(); -} - -#[test] -fn preflight1() { - let (s, r) = unbounded(); - s.send(()).unwrap(); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready() { - 0 => drop(r.try_recv()), - _ => panic!(), - } -} - -#[test] -fn preflight2() { - let (s, r) = unbounded(); - drop(s.clone()); - s.send(()).unwrap(); - drop(s); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready() { - 0 => assert_eq!(r.try_recv(), Ok(())), - _ => panic!(), - } - - assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); -} - -#[test] -fn preflight3() { - let (s, r) = unbounded(); - drop(s.clone()); - s.send(()).unwrap(); - drop(s); - r.recv().unwrap(); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready() { - 0 => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)), - _ => panic!(), - } -} - -#[test] -fn duplicate_operations() { - let (s, r) = unbounded::(); - let hit = vec![Cell::new(false); 4]; - - while hit.iter().map(|h| h.get()).any(|hit| !hit) { - let mut sel = Select::new(); - sel.recv(&r); - sel.recv(&r); - sel.send(&s); - sel.send(&s); - match sel.ready() { - 0 => { - assert!(r.try_recv().is_ok()); - hit[0].set(true); - } - 1 => { - assert!(r.try_recv().is_ok()); - hit[1].set(true); - } - 2 => { - assert!(s.try_send(0).is_ok()); - hit[2].set(true); - } - 3 => { - assert!(s.try_send(0).is_ok()); - hit[3].set(true); - } - _ => panic!(), - } - } -} - -#[test] -fn nesting() { - let (s, r) = unbounded::(); - - let mut sel = Select::new(); - sel.send(&s); - match sel.ready() { - 0 => { - assert!(s.try_send(0).is_ok()); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready() { - 0 => { - assert_eq!(r.try_recv(), Ok(0)); - - let mut sel = Select::new(); - sel.send(&s); - match sel.ready() { - 0 => { - assert!(s.try_send(1).is_ok()); - - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready() { - 0 => { - assert_eq!(r.try_recv(), Ok(1)); - } - _ => panic!(), - } - } - _ => panic!(), - } - } - _ => panic!(), - } - } - _ => panic!(), - } -} - -#[test] -fn stress_recv() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded(); - let (s2, r2) = bounded(5); - let (s3, r3) = bounded(0); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - r3.recv().unwrap(); - - s2.send(i).unwrap(); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - match sel.ready() { - 0 => assert_eq!(r1.try_recv(), Ok(i)), - 1 => assert_eq!(r2.try_recv(), Ok(i)), - _ => panic!(), - } - - s3.send(()).unwrap(); - } - } - }) - .unwrap(); -} - -#[test] -fn stress_send() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - assert_eq!(r1.recv().unwrap(), i); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - let mut sel = Select::new(); - sel.send(&s1); - sel.send(&s2); - match sel.ready() { - 0 => assert!(s1.try_send(i).is_ok()), - 1 => assert!(s2.try_send(i).is_ok()), - _ => panic!(), - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} - -#[test] -fn stress_mixed() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - let mut sel = Select::new(); - sel.recv(&r1); - sel.send(&s2); - match sel.ready() { - 0 => assert_eq!(r1.try_recv(), Ok(i)), - 1 => assert!(s2.try_send(i).is_ok()), - _ => panic!(), - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} - -#[test] -fn stress_timeout_two_threads() { - const COUNT: usize = 20; - - let (s, r) = bounded(2); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(500)); - } - - loop { - let mut sel = Select::new(); - sel.send(&s); - match sel.ready_timeout(ms(100)) { - Err(_) => {} - Ok(0) => { - assert!(s.try_send(i).is_ok()); - break; - } - Ok(_) => panic!(), - } - } - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(500)); - } - - loop { - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready_timeout(ms(100)) { - Err(_) => {} - Ok(0) => { - assert_eq!(r.try_recv(), Ok(i)); - break; - } - Ok(_) => panic!(), - } - } - } - }); - }) - .unwrap(); -} - -#[test] -fn send_recv_same_channel() { - let (s, r) = bounded::(0); - let mut sel = Select::new(); - sel.send(&s); - sel.recv(&r); - assert!(sel.ready_timeout(ms(100)).is_err()); - - let (s, r) = unbounded::(); - let mut sel = Select::new(); - sel.send(&s); - sel.recv(&r); - match sel.ready_timeout(ms(100)) { - Err(_) => panic!(), - Ok(0) => assert!(s.try_send(0).is_ok()), - Ok(_) => panic!(), - } -} - -#[test] -fn channel_through_channel() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 1000; - - type T = Box; - - for cap in 1..4 { - let (s, r) = bounded::(cap); - - scope(|scope| { - scope.spawn(move |_| { - let mut s = s; - - for _ in 0..COUNT { - let (new_s, new_r) = bounded(cap); - let new_r: T = Box::new(Some(new_r)); - - { - let mut sel = Select::new(); - sel.send(&s); - match sel.ready() { - 0 => assert!(s.try_send(new_r).is_ok()), - _ => panic!(), - } - } - - s = new_s; - } - }); - - scope.spawn(move |_| { - let mut r = r; - - for _ in 0..COUNT { - let new = { - let mut sel = Select::new(); - sel.recv(&r); - match sel.ready() { - 0 => r - .try_recv() - .unwrap() - .downcast_mut::>>() - .unwrap() - .take() - .unwrap(), - _ => panic!(), - } - }; - r = new; - } - }); - }) - .unwrap(); - } -} - -#[test] -fn fairness1() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded::<()>(COUNT); - let (s2, r2) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - - let hits = vec![Cell::new(0usize); 4]; - for _ in 0..COUNT { - let after = after(ms(0)); - let tick = tick(ms(0)); - - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - sel.recv(&after); - sel.recv(&tick); - match sel.ready() { - 0 => { - r1.try_recv().unwrap(); - hits[0].set(hits[0].get() + 1); - } - 1 => { - r2.try_recv().unwrap(); - hits[1].set(hits[1].get() + 1); - } - 2 => { - after.try_recv().unwrap(); - hits[2].set(hits[2].get() + 1); - } - 3 => { - tick.try_recv().unwrap(); - hits[3].set(hits[3].get() + 1); - } - _ => panic!(), - } - } - assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 2)); -} - -#[test] -fn fairness2() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = bounded::<()>(1); - let (s3, r3) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..COUNT { - let mut sel = Select::new(); - let mut oper1 = None; - let mut oper2 = None; - if s1.is_empty() { - oper1 = Some(sel.send(&s1)); - } - if s2.is_empty() { - oper2 = Some(sel.send(&s2)); - } - let oper3 = sel.send(&s3); - let oper = sel.select(); - match oper.index() { - i if Some(i) == oper1 => assert!(oper.send(&s1, ()).is_ok()), - i if Some(i) == oper2 => assert!(oper.send(&s2, ()).is_ok()), - i if i == oper3 => assert!(oper.send(&s3, ()).is_ok()), - _ => unreachable!(), - } - } - }); - - let hits = vec![Cell::new(0usize); 3]; - for _ in 0..COUNT { - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - sel.recv(&r3); - loop { - match sel.ready() { - 0 => { - if r1.try_recv().is_ok() { - hits[0].set(hits[0].get() + 1); - break; - } - } - 1 => { - if r2.try_recv().is_ok() { - hits[1].set(hits[1].get() + 1); - break; - } - } - 2 => { - if r3.try_recv().is_ok() { - hits[2].set(hits[2].get() + 1); - break; - } - } - _ => unreachable!(), - } - } - } - assert!(hits.iter().all(|x| x.get() > 0)); - }) - .unwrap(); -} diff --git a/crossbeam-channel/tests/same_channel.rs b/crossbeam-channel/tests/same_channel.rs deleted file mode 100644 index e86e7c5a0..000000000 --- a/crossbeam-channel/tests/same_channel.rs +++ /dev/null @@ -1,114 +0,0 @@ -#![allow(clippy::redundant_clone)] - -use std::time::Duration; - -use crossbeam_channel::{after, bounded, never, tick, unbounded}; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn after_same_channel() { - let r = after(ms(50)); - - let r2 = r.clone(); - assert!(r.same_channel(&r2)); - - let r3 = after(ms(50)); - assert!(!r.same_channel(&r3)); - assert!(!r2.same_channel(&r3)); - - let r4 = after(ms(100)); - assert!(!r.same_channel(&r4)); - assert!(!r2.same_channel(&r4)); -} - -#[test] -fn array_same_channel() { - let (s, r) = bounded::(1); - - let s2 = s.clone(); - assert!(s.same_channel(&s2)); - - let r2 = r.clone(); - assert!(r.same_channel(&r2)); - - let (s3, r3) = bounded::(1); - assert!(!s.same_channel(&s3)); - assert!(!s2.same_channel(&s3)); - assert!(!r.same_channel(&r3)); - assert!(!r2.same_channel(&r3)); -} - -#[test] -fn list_same_channel() { - let (s, r) = unbounded::(); - - let s2 = s.clone(); - assert!(s.same_channel(&s2)); - - let r2 = r.clone(); - assert!(r.same_channel(&r2)); - - let (s3, r3) = unbounded::(); - assert!(!s.same_channel(&s3)); - assert!(!s2.same_channel(&s3)); - assert!(!r.same_channel(&r3)); - assert!(!r2.same_channel(&r3)); -} - -#[test] -fn never_same_channel() { - let r = never::(); - - let r2 = r.clone(); - assert!(r.same_channel(&r2)); - - // Never channel are always equal to one another. - let r3 = never::(); - assert!(r.same_channel(&r3)); - assert!(r2.same_channel(&r3)); -} - -#[test] -fn tick_same_channel() { - let r = tick(ms(50)); - - let r2 = r.clone(); - assert!(r.same_channel(&r2)); - - let r3 = tick(ms(50)); - assert!(!r.same_channel(&r3)); - assert!(!r2.same_channel(&r3)); - - let r4 = tick(ms(100)); - assert!(!r.same_channel(&r4)); - assert!(!r2.same_channel(&r4)); -} - -#[test] -fn zero_same_channel() { - let (s, r) = bounded::(0); - - let s2 = s.clone(); - assert!(s.same_channel(&s2)); - - let r2 = r.clone(); - assert!(r.same_channel(&r2)); - - let (s3, r3) = bounded::(0); - assert!(!s.same_channel(&s3)); - assert!(!s2.same_channel(&s3)); - assert!(!r.same_channel(&r3)); - assert!(!r2.same_channel(&r3)); -} - -#[test] -fn different_flavors_same_channel() { - let (s1, r1) = bounded::(0); - let (s2, r2) = unbounded::(); - - assert!(!s1.same_channel(&s2)); - assert!(!r1.same_channel(&r2)); -} diff --git a/crossbeam-channel/tests/select.rs b/crossbeam-channel/tests/select.rs deleted file mode 100644 index 38178404d..000000000 --- a/crossbeam-channel/tests/select.rs +++ /dev/null @@ -1,1326 +0,0 @@ -//! Tests for channel selection using the `Select` struct. - -use std::any::Any; -use std::cell::Cell; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{after, bounded, tick, unbounded, Receiver, Select, TryRecvError}; -use crossbeam_utils::thread::scope; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke1() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - s1.send(1).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(1)), - i if i == oper2 => panic!(), - _ => unreachable!(), - } - - s2.send(2).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => panic!(), - i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(2)), - _ => unreachable!(), - } -} - -#[test] -fn smoke2() { - let (_s1, r1) = unbounded::(); - let (_s2, r2) = unbounded::(); - let (_s3, r3) = unbounded::(); - let (_s4, r4) = unbounded::(); - let (s5, r5) = unbounded::(); - - s5.send(5).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper3 = sel.recv(&r3); - let oper4 = sel.recv(&r4); - let oper5 = sel.recv(&r5); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => panic!(), - i if i == oper2 => panic!(), - i if i == oper3 => panic!(), - i if i == oper4 => panic!(), - i if i == oper5 => assert_eq!(oper.recv(&r5), Ok(5)), - _ => unreachable!(), - } -} - -#[test] -fn disconnected() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - drop(s1); - thread::sleep(ms(500)); - s2.send(5).unwrap(); - }); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r1).is_err()), - i if i == oper2 => panic!(), - _ => unreachable!(), - }, - } - - r2.recv().unwrap(); - }) - .unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r1).is_err()), - i if i == oper2 => panic!(), - _ => unreachable!(), - }, - } - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - drop(s2); - }); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r2).is_err()), - _ => unreachable!(), - }, - } - }) - .unwrap(); -} - -#[test] -fn default() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - let mut sel = Select::new(); - let _oper1 = sel.recv(&r1); - let _oper2 = sel.recv(&r2); - let oper = sel.try_select(); - match oper { - Err(_) => {} - Ok(_) => panic!(), - } - - drop(s1); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.try_select(); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r1).is_err()), - i if i == oper2 => panic!(), - _ => unreachable!(), - }, - } - - s2.send(2).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r2); - let oper = sel.try_select(); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert_eq!(oper.recv(&r2), Ok(2)), - _ => unreachable!(), - }, - } - - let mut sel = Select::new(); - let _oper1 = sel.recv(&r2); - let oper = sel.try_select(); - match oper { - Err(_) => {} - Ok(_) => panic!(), - } - - let mut sel = Select::new(); - let oper = sel.try_select(); - match oper { - Err(_) => {} - Ok(_) => panic!(), - } -} - -#[test] -fn timeout() { - let (_s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(1500)); - s2.send(2).unwrap(); - }); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - i if i == oper1 => panic!(), - i if i == oper2 => panic!(), - _ => unreachable!(), - }, - } - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => panic!(), - i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(2)), - _ => unreachable!(), - }, - } - }) - .unwrap(); - - scope(|scope| { - let (s, r) = unbounded::(); - - scope.spawn(move |_| { - thread::sleep(ms(500)); - drop(s); - }); - - let mut sel = Select::new(); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => { - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.try_select(); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r).is_err()), - _ => unreachable!(), - }, - } - } - Ok(_) => unreachable!(), - } - }) - .unwrap(); -} - -#[test] -fn default_when_disconnected() { - let (_, r) = unbounded::(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.try_select(); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r).is_err()), - _ => unreachable!(), - }, - } - - let (_, r) = unbounded::(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.recv(&r).is_err()), - _ => unreachable!(), - }, - } - - let (s, _) = bounded::(0); - - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper = sel.try_select(); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.send(&s, 0).is_err()), - _ => unreachable!(), - }, - } - - let (s, _) = bounded::(0); - - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => assert!(oper.send(&s, 0).is_err()), - _ => unreachable!(), - }, - } -} - -#[test] -fn default_only() { - let start = Instant::now(); - - let mut sel = Select::new(); - let oper = sel.try_select(); - assert!(oper.is_err()); - let now = Instant::now(); - assert!(now - start <= ms(50)); - - let start = Instant::now(); - let mut sel = Select::new(); - let oper = sel.select_timeout(ms(500)); - assert!(oper.is_err()); - let now = Instant::now(); - assert!(now - start >= ms(450)); - assert!(now - start <= ms(550)); -} - -#[test] -fn unblocks() { - let (s1, r1) = bounded::(0); - let (s2, r2) = bounded::(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - s2.send(2).unwrap(); - }); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => panic!(), - i if i == oper2 => assert_eq!(oper.recv(&r2), Ok(2)), - _ => unreachable!(), - }, - } - }) - .unwrap(); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - assert_eq!(r1.recv().unwrap(), 1); - }); - - let mut sel = Select::new(); - let oper1 = sel.send(&s1); - let oper2 = sel.send(&s2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - i if i == oper1 => oper.send(&s1, 1).unwrap(), - i if i == oper2 => panic!(), - _ => unreachable!(), - }, - } - }) - .unwrap(); -} - -#[test] -fn both_ready() { - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - s1.send(1).unwrap(); - assert_eq!(r2.recv().unwrap(), 2); - }); - - for _ in 0..2 { - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.send(&s2); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(1)), - i if i == oper2 => oper.send(&s2, 2).unwrap(), - _ => unreachable!(), - } - } - }) - .unwrap(); -} - -#[test] -fn loop_try() { - const RUNS: usize = 20; - - for _ in 0..RUNS { - let (s1, r1) = bounded::(0); - let (s2, r2) = bounded::(0); - let (s_end, r_end) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| loop { - let mut done = false; - - let mut sel = Select::new(); - let oper1 = sel.send(&s1); - let oper = sel.try_select(); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - i if i == oper1 => { - let _ = oper.send(&s1, 1); - done = true; - } - _ => unreachable!(), - }, - } - if done { - break; - } - - let mut sel = Select::new(); - let oper1 = sel.recv(&r_end); - let oper = sel.try_select(); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - i if i == oper1 => { - let _ = oper.recv(&r_end); - done = true; - } - _ => unreachable!(), - }, - } - if done { - break; - } - }); - - scope.spawn(|_| loop { - if let Ok(x) = r2.try_recv() { - assert_eq!(x, 2); - break; - } - - let mut done = false; - let mut sel = Select::new(); - let oper1 = sel.recv(&r_end); - let oper = sel.try_select(); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - i if i == oper1 => { - let _ = oper.recv(&r_end); - done = true; - } - _ => unreachable!(), - }, - } - if done { - break; - } - }); - - scope.spawn(|_| { - thread::sleep(ms(500)); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.send(&s2); - let oper = sel.select_timeout(ms(1000)); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - i if i == oper1 => assert_eq!(oper.recv(&r1), Ok(1)), - i if i == oper2 => assert!(oper.send(&s2, 2).is_ok()), - _ => unreachable!(), - }, - } - - drop(s_end); - }); - }) - .unwrap(); - } -} - -#[test] -fn cloning1() { - scope(|scope| { - let (s1, r1) = unbounded::(); - let (_s2, r2) = unbounded::(); - let (s3, r3) = unbounded::<()>(); - - scope.spawn(move |_| { - r3.recv().unwrap(); - drop(s1.clone()); - assert!(r3.try_recv().is_err()); - s1.send(1).unwrap(); - r3.recv().unwrap(); - }); - - s3.send(()).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => drop(oper.recv(&r1)), - i if i == oper2 => drop(oper.recv(&r2)), - _ => unreachable!(), - } - - s3.send(()).unwrap(); - }) - .unwrap(); -} - -#[test] -fn cloning2() { - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = unbounded::<()>(); - let (_s3, _r3) = unbounded::<()>(); - - scope(|scope| { - scope.spawn(move |_| { - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => panic!(), - i if i == oper2 => drop(oper.recv(&r2)), - _ => unreachable!(), - } - }); - - thread::sleep(ms(500)); - drop(s1.clone()); - s2.send(()).unwrap(); - }) - .unwrap(); -} - -#[test] -fn preflight1() { - let (s, r) = unbounded(); - s.send(()).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => drop(oper.recv(&r)), - _ => unreachable!(), - } -} - -#[test] -fn preflight2() { - let (s, r) = unbounded(); - drop(s.clone()); - s.send(()).unwrap(); - drop(s); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => assert_eq!(oper.recv(&r), Ok(())), - _ => unreachable!(), - } - - assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); -} - -#[test] -fn preflight3() { - let (s, r) = unbounded(); - drop(s.clone()); - s.send(()).unwrap(); - drop(s); - r.recv().unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => assert!(oper.recv(&r).is_err()), - _ => unreachable!(), - } -} - -#[test] -fn duplicate_operations() { - let (s, r) = unbounded::(); - let hit = vec![Cell::new(false); 4]; - - while hit.iter().map(|h| h.get()).any(|hit| !hit) { - let mut sel = Select::new(); - let oper0 = sel.recv(&r); - let oper1 = sel.recv(&r); - let oper2 = sel.send(&s); - let oper3 = sel.send(&s); - let oper = sel.select(); - match oper.index() { - i if i == oper0 => { - assert!(oper.recv(&r).is_ok()); - hit[0].set(true); - } - i if i == oper1 => { - assert!(oper.recv(&r).is_ok()); - hit[1].set(true); - } - i if i == oper2 => { - assert!(oper.send(&s, 0).is_ok()); - hit[2].set(true); - } - i if i == oper3 => { - assert!(oper.send(&s, 0).is_ok()); - hit[3].set(true); - } - _ => unreachable!(), - } - } -} - -#[test] -fn nesting() { - let (s, r) = unbounded::(); - - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - assert!(oper.send(&s, 0).is_ok()); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - assert_eq!(oper.recv(&r), Ok(0)); - - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - assert!(oper.send(&s, 1).is_ok()); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - assert_eq!(oper.recv(&r), Ok(1)); - } - _ => unreachable!(), - } - } - _ => unreachable!(), - } - } - _ => unreachable!(), - } - } - _ => unreachable!(), - } -} - -#[test] -fn stress_recv() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded(); - let (s2, r2) = bounded(5); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - r3.recv().unwrap(); - - s2.send(i).unwrap(); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_eq!(oper.recv(&r1), Ok(i)), - ix if ix == oper2 => assert_eq!(oper.recv(&r2), Ok(i)), - _ => unreachable!(), - } - - s3.send(()).unwrap(); - } - } - }) - .unwrap(); -} - -#[test] -fn stress_send() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - assert_eq!(r1.recv().unwrap(), i); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - let mut sel = Select::new(); - let oper1 = sel.send(&s1); - let oper2 = sel.send(&s2); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert!(oper.send(&s1, i).is_ok()), - ix if ix == oper2 => assert!(oper.send(&s2, i).is_ok()), - _ => unreachable!(), - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} - -#[test] -fn stress_mixed() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.send(&s2); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_eq!(oper.recv(&r1), Ok(i)), - ix if ix == oper2 => assert!(oper.send(&s2, i).is_ok()), - _ => unreachable!(), - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} - -#[test] -fn stress_timeout_two_threads() { - const COUNT: usize = 20; - - let (s, r) = bounded(2); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(500)); - } - - loop { - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper = sel.select_timeout(ms(100)); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - ix if ix == oper1 => { - assert!(oper.send(&s, i).is_ok()); - break; - } - _ => unreachable!(), - }, - } - } - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(500)); - } - - loop { - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select_timeout(ms(100)); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - ix if ix == oper1 => { - assert_eq!(oper.recv(&r), Ok(i)); - break; - } - _ => unreachable!(), - }, - } - } - } - }); - }) - .unwrap(); -} - -#[test] -fn send_recv_same_channel() { - let (s, r) = bounded::(0); - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper2 = sel.recv(&r); - let oper = sel.select_timeout(ms(100)); - match oper { - Err(_) => {} - Ok(oper) => match oper.index() { - ix if ix == oper1 => panic!(), - ix if ix == oper2 => panic!(), - _ => unreachable!(), - }, - } - - let (s, r) = unbounded::(); - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper2 = sel.recv(&r); - let oper = sel.select_timeout(ms(100)); - match oper { - Err(_) => panic!(), - Ok(oper) => match oper.index() { - ix if ix == oper1 => assert!(oper.send(&s, 0).is_ok()), - ix if ix == oper2 => panic!(), - _ => unreachable!(), - }, - } -} - -#[test] -fn matching() { - const THREADS: usize = 44; - - let (s, r) = &bounded::(0); - - scope(|scope| { - for i in 0..THREADS { - scope.spawn(move |_| { - let mut sel = Select::new(); - let oper1 = sel.recv(r); - let oper2 = sel.send(s); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_ne!(oper.recv(r), Ok(i)), - ix if ix == oper2 => assert!(oper.send(s, i).is_ok()), - _ => unreachable!(), - } - }); - } - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn matching_with_leftover() { - const THREADS: usize = 55; - - let (s, r) = &bounded::(0); - - scope(|scope| { - for i in 0..THREADS { - scope.spawn(move |_| { - let mut sel = Select::new(); - let oper1 = sel.recv(r); - let oper2 = sel.send(s); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_ne!(oper.recv(r), Ok(i)), - ix if ix == oper2 => assert!(oper.send(s, i).is_ok()), - _ => unreachable!(), - } - }); - } - s.send(!0).unwrap(); - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn channel_through_channel() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 1000; - - type T = Box; - - for cap in 0..3 { - let (s, r) = bounded::(cap); - - scope(|scope| { - scope.spawn(move |_| { - let mut s = s; - - for _ in 0..COUNT { - let (new_s, new_r) = bounded(cap); - let new_r: T = Box::new(Some(new_r)); - - { - let mut sel = Select::new(); - let oper1 = sel.send(&s); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert!(oper.send(&s, new_r).is_ok()), - _ => unreachable!(), - } - } - - s = new_s; - } - }); - - scope.spawn(move |_| { - let mut r = r; - - for _ in 0..COUNT { - let new = { - let mut sel = Select::new(); - let oper1 = sel.recv(&r); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => oper - .recv(&r) - .unwrap() - .downcast_mut::>>() - .unwrap() - .take() - .unwrap(), - _ => unreachable!(), - } - }; - r = new; - } - }); - }) - .unwrap(); - } -} - -#[test] -fn linearizable_try() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - for step in 0..2 { - let (start_s, start_r) = bounded::<()>(0); - let (end_s, end_r) = bounded::<()>(0); - - let ((s1, r1), (s2, r2)) = if step == 0 { - (bounded::(1), bounded::(1)) - } else { - (unbounded::(), unbounded::()) - }; - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..COUNT { - start_s.send(()).unwrap(); - - s1.send(1).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.try_select(); - match oper { - Err(_) => unreachable!(), - Ok(oper) => match oper.index() { - ix if ix == oper1 => assert!(oper.recv(&r1).is_ok()), - ix if ix == oper2 => assert!(oper.recv(&r2).is_ok()), - _ => unreachable!(), - }, - } - - end_s.send(()).unwrap(); - let _ = r2.try_recv(); - } - }); - - for _ in 0..COUNT { - start_r.recv().unwrap(); - - s2.send(1).unwrap(); - let _ = r1.try_recv(); - - end_r.recv().unwrap(); - } - }) - .unwrap(); - } -} - -#[test] -fn linearizable_timeout() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - for step in 0..2 { - let (start_s, start_r) = bounded::<()>(0); - let (end_s, end_r) = bounded::<()>(0); - - let ((s1, r1), (s2, r2)) = if step == 0 { - (bounded::(1), bounded::(1)) - } else { - (unbounded::(), unbounded::()) - }; - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..COUNT { - start_s.send(()).unwrap(); - - s1.send(1).unwrap(); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper = sel.select_timeout(ms(0)); - match oper { - Err(_) => unreachable!(), - Ok(oper) => match oper.index() { - ix if ix == oper1 => assert!(oper.recv(&r1).is_ok()), - ix if ix == oper2 => assert!(oper.recv(&r2).is_ok()), - _ => unreachable!(), - }, - } - - end_s.send(()).unwrap(); - let _ = r2.try_recv(); - } - }); - - for _ in 0..COUNT { - start_r.recv().unwrap(); - - s2.send(1).unwrap(); - let _ = r1.try_recv(); - - end_r.recv().unwrap(); - } - }) - .unwrap(); - } -} - -#[test] -fn fairness1() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded::<()>(COUNT); - let (s2, r2) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - - let hits = vec![Cell::new(0usize); 4]; - for _ in 0..COUNT { - let after = after(ms(0)); - let tick = tick(ms(0)); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper3 = sel.recv(&after); - let oper4 = sel.recv(&tick); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - oper.recv(&r1).unwrap(); - hits[0].set(hits[0].get() + 1); - } - i if i == oper2 => { - oper.recv(&r2).unwrap(); - hits[1].set(hits[1].get() + 1); - } - i if i == oper3 => { - oper.recv(&after).unwrap(); - hits[2].set(hits[2].get() + 1); - } - i if i == oper4 => { - oper.recv(&tick).unwrap(); - hits[3].set(hits[3].get() + 1); - } - _ => unreachable!(), - } - } - assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 2)); -} - -#[test] -fn fairness2() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = bounded::<()>(1); - let (s3, r3) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..COUNT { - let mut sel = Select::new(); - let mut oper1 = None; - let mut oper2 = None; - if s1.is_empty() { - oper1 = Some(sel.send(&s1)); - } - if s2.is_empty() { - oper2 = Some(sel.send(&s2)); - } - let oper3 = sel.send(&s3); - let oper = sel.select(); - match oper.index() { - i if Some(i) == oper1 => assert!(oper.send(&s1, ()).is_ok()), - i if Some(i) == oper2 => assert!(oper.send(&s2, ()).is_ok()), - i if i == oper3 => assert!(oper.send(&s3, ()).is_ok()), - _ => unreachable!(), - } - } - }); - - let hits = vec![Cell::new(0usize); 3]; - for _ in 0..COUNT { - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper3 = sel.recv(&r3); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - oper.recv(&r1).unwrap(); - hits[0].set(hits[0].get() + 1); - } - i if i == oper2 => { - oper.recv(&r2).unwrap(); - hits[1].set(hits[1].get() + 1); - } - i if i == oper3 => { - oper.recv(&r3).unwrap(); - hits[2].set(hits[2].get() + 1); - } - _ => unreachable!(), - } - } - assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 50)); - }) - .unwrap(); -} - -#[test] -fn sync_and_clone() { - const THREADS: usize = 20; - - let (s, r) = &bounded::(0); - - let mut sel = Select::new(); - let oper1 = sel.recv(r); - let oper2 = sel.send(s); - let sel = &sel; - - scope(|scope| { - for i in 0..THREADS { - scope.spawn(move |_| { - let mut sel = sel.clone(); - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_ne!(oper.recv(r), Ok(i)), - ix if ix == oper2 => assert!(oper.send(s, i).is_ok()), - _ => unreachable!(), - } - }); - } - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn send_and_clone() { - const THREADS: usize = 20; - - let (s, r) = &bounded::(0); - - let mut sel = Select::new(); - let oper1 = sel.recv(r); - let oper2 = sel.send(s); - - scope(|scope| { - for i in 0..THREADS { - let mut sel = sel.clone(); - scope.spawn(move |_| { - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_ne!(oper.recv(r), Ok(i)), - ix if ix == oper2 => assert!(oper.send(s, i).is_ok()), - _ => unreachable!(), - } - }); - } - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn reuse() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.send(&s2); - - for i in 0..COUNT { - for _ in 0..2 { - let oper = sel.select(); - match oper.index() { - ix if ix == oper1 => assert_eq!(oper.recv(&r1), Ok(i)), - ix if ix == oper2 => assert!(oper.send(&s2, i).is_ok()), - _ => unreachable!(), - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} diff --git a/crossbeam-channel/tests/select_macro.rs b/crossbeam-channel/tests/select_macro.rs deleted file mode 100644 index 8fb72773d..000000000 --- a/crossbeam-channel/tests/select_macro.rs +++ /dev/null @@ -1,1597 +0,0 @@ -//! Tests for the `select!` macro. - -#![forbid(unsafe_code)] // select! is safe. -#![allow(clippy::match_single_binding)] - -use std::any::Any; -use std::cell::Cell; -use std::ops::Deref; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{after, bounded, never, select, select_biased, tick, unbounded}; -use crossbeam_channel::{Receiver, RecvError, SendError, Sender, TryRecvError}; -use crossbeam_utils::thread::scope; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke1() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - s1.send(1).unwrap(); - - select! { - recv(r1) -> v => assert_eq!(v, Ok(1)), - recv(r2) -> _ => panic!(), - } - - s2.send(2).unwrap(); - - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> v => assert_eq!(v, Ok(2)), - } -} - -#[test] -fn smoke2() { - let (_s1, r1) = unbounded::(); - let (_s2, r2) = unbounded::(); - let (_s3, r3) = unbounded::(); - let (_s4, r4) = unbounded::(); - let (s5, r5) = unbounded::(); - - s5.send(5).unwrap(); - - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> _ => panic!(), - recv(r3) -> _ => panic!(), - recv(r4) -> _ => panic!(), - recv(r5) -> v => assert_eq!(v, Ok(5)), - } -} - -#[test] -fn disconnected() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - drop(s1); - thread::sleep(ms(500)); - s2.send(5).unwrap(); - }); - - select! { - recv(r1) -> v => assert!(v.is_err()), - recv(r2) -> _ => panic!(), - default(ms(1000)) => panic!(), - } - - r2.recv().unwrap(); - }) - .unwrap(); - - select! { - recv(r1) -> v => assert!(v.is_err()), - recv(r2) -> _ => panic!(), - default(ms(1000)) => panic!(), - } - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - drop(s2); - }); - - select! { - recv(r2) -> v => assert!(v.is_err()), - default(ms(1000)) => panic!(), - } - }) - .unwrap(); -} - -#[test] -fn default() { - let (s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> _ => panic!(), - default => {} - } - - drop(s1); - - select! { - recv(r1) -> v => assert!(v.is_err()), - recv(r2) -> _ => panic!(), - default => panic!(), - } - - s2.send(2).unwrap(); - - select! { - recv(r2) -> v => assert_eq!(v, Ok(2)), - default => panic!(), - } - - select! { - recv(r2) -> _ => panic!(), - default => {}, - } - - select! { - default => {}, - } -} - -#[test] -fn timeout() { - let (_s1, r1) = unbounded::(); - let (s2, r2) = unbounded::(); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(1500)); - s2.send(2).unwrap(); - }); - - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> _ => panic!(), - default(ms(1000)) => {}, - } - - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> v => assert_eq!(v, Ok(2)), - default(ms(1000)) => panic!(), - } - }) - .unwrap(); - - scope(|scope| { - let (s, r) = unbounded::(); - - scope.spawn(move |_| { - thread::sleep(ms(500)); - drop(s); - }); - - select! { - default(ms(1000)) => { - select! { - recv(r) -> v => assert!(v.is_err()), - default => panic!(), - } - } - } - }) - .unwrap(); -} - -#[test] -fn default_when_disconnected() { - let (_, r) = unbounded::(); - - select! { - recv(r) -> res => assert!(res.is_err()), - default => panic!(), - } - - let (_, r) = unbounded::(); - - select! { - recv(r) -> res => assert!(res.is_err()), - default(ms(1000)) => panic!(), - } - - let (s, _) = bounded::(0); - - select! { - send(s, 0) -> res => assert!(res.is_err()), - default => panic!(), - } - - let (s, _) = bounded::(0); - - select! { - send(s, 0) -> res => assert!(res.is_err()), - default(ms(1000)) => panic!(), - } -} - -#[test] -fn default_only() { - let start = Instant::now(); - select! { - default => {} - } - let now = Instant::now(); - assert!(now - start <= ms(50)); - - let start = Instant::now(); - select! { - default(ms(500)) => {} - } - let now = Instant::now(); - assert!(now - start >= ms(450)); - assert!(now - start <= ms(550)); -} - -#[test] -fn unblocks() { - let (s1, r1) = bounded::(0); - let (s2, r2) = bounded::(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - s2.send(2).unwrap(); - }); - - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> v => assert_eq!(v, Ok(2)), - default(ms(1000)) => panic!(), - } - }) - .unwrap(); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - assert_eq!(r1.recv().unwrap(), 1); - }); - - select! { - send(s1, 1) -> _ => {}, - send(s2, 2) -> _ => panic!(), - default(ms(1000)) => panic!(), - } - }) - .unwrap(); -} - -#[test] -fn both_ready() { - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(500)); - s1.send(1).unwrap(); - assert_eq!(r2.recv().unwrap(), 2); - }); - - for _ in 0..2 { - select! { - recv(r1) -> v => assert_eq!(v, Ok(1)), - send(s2, 2) -> _ => {}, - } - } - }) - .unwrap(); -} - -#[test] -fn loop_try() { - const RUNS: usize = 20; - - for _ in 0..RUNS { - let (s1, r1) = bounded::(0); - let (s2, r2) = bounded::(0); - let (s_end, r_end) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| loop { - select! { - send(s1, 1) -> _ => break, - default => {} - } - - select! { - recv(r_end) -> _ => break, - default => {} - } - }); - - scope.spawn(|_| loop { - if let Ok(x) = r2.try_recv() { - assert_eq!(x, 2); - break; - } - - select! { - recv(r_end) -> _ => break, - default => {} - } - }); - - scope.spawn(|_| { - thread::sleep(ms(500)); - - select! { - recv(r1) -> v => assert_eq!(v, Ok(1)), - send(s2, 2) -> _ => {}, - default(ms(500)) => panic!(), - } - - drop(s_end); - }); - }) - .unwrap(); - } -} - -#[test] -fn cloning1() { - scope(|scope| { - let (s1, r1) = unbounded::(); - let (_s2, r2) = unbounded::(); - let (s3, r3) = unbounded::<()>(); - - scope.spawn(move |_| { - r3.recv().unwrap(); - drop(s1.clone()); - assert_eq!(r3.try_recv(), Err(TryRecvError::Empty)); - s1.send(1).unwrap(); - r3.recv().unwrap(); - }); - - s3.send(()).unwrap(); - - select! { - recv(r1) -> _ => {}, - recv(r2) -> _ => {}, - } - - s3.send(()).unwrap(); - }) - .unwrap(); -} - -#[test] -fn cloning2() { - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = unbounded::<()>(); - let (_s3, _r3) = unbounded::<()>(); - - scope(|scope| { - scope.spawn(move |_| { - select! { - recv(r1) -> _ => panic!(), - recv(r2) -> _ => {}, - } - }); - - thread::sleep(ms(500)); - drop(s1.clone()); - s2.send(()).unwrap(); - }) - .unwrap(); -} - -#[test] -fn preflight1() { - let (s, r) = unbounded(); - s.send(()).unwrap(); - - select! { - recv(r) -> _ => {} - } -} - -#[test] -fn preflight2() { - let (s, r) = unbounded(); - drop(s.clone()); - s.send(()).unwrap(); - drop(s); - - select! { - recv(r) -> v => assert!(v.is_ok()), - } - assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); -} - -#[test] -fn preflight3() { - let (s, r) = unbounded(); - drop(s.clone()); - s.send(()).unwrap(); - drop(s); - r.recv().unwrap(); - - select! { - recv(r) -> v => assert!(v.is_err()) - } -} - -#[test] -fn duplicate_operations() { - let (s, r) = unbounded::(); - let mut hit = [false; 4]; - - while hit.iter().any(|hit| !hit) { - select! { - recv(r) -> _ => hit[0] = true, - recv(r) -> _ => hit[1] = true, - send(s, 0) -> _ => hit[2] = true, - send(s, 0) -> _ => hit[3] = true, - } - } -} - -#[test] -fn nesting() { - let (s, r) = unbounded::(); - - select! { - send(s, 0) -> _ => { - select! { - recv(r) -> v => { - assert_eq!(v, Ok(0)); - select! { - send(s, 1) -> _ => { - select! { - recv(r) -> v => { - assert_eq!(v, Ok(1)); - } - } - } - } - } - } - } - } -} - -#[test] -#[should_panic(expected = "send panicked")] -fn panic_sender() { - fn get() -> Sender { - panic!("send panicked") - } - - #[allow(unreachable_code, clippy::diverging_sub_expression)] - { - select! { - send(get(), panic!()) -> _ => {} - } - } -} - -#[test] -#[should_panic(expected = "recv panicked")] -fn panic_receiver() { - fn get() -> Receiver { - panic!("recv panicked") - } - - select! { - recv(get()) -> _ => {} - } -} - -#[test] -fn stress_recv() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded(); - let (s2, r2) = bounded(5); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - r3.recv().unwrap(); - - s2.send(i).unwrap(); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - select! { - recv(r1) -> v => assert_eq!(v, Ok(i)), - recv(r2) -> v => assert_eq!(v, Ok(i)), - } - - s3.send(()).unwrap(); - } - } - }) - .unwrap(); -} - -#[test] -fn stress_send() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - assert_eq!(r1.recv().unwrap(), i); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - select! { - send(s1, i) -> _ => {}, - send(s2, i) -> _ => {}, - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} - -#[test] -fn stress_mixed() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded(0); - let (s2, r2) = bounded(0); - let (s3, r3) = bounded(100); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - s1.send(i).unwrap(); - assert_eq!(r2.recv().unwrap(), i); - r3.recv().unwrap(); - } - }); - - for i in 0..COUNT { - for _ in 0..2 { - select! { - recv(r1) -> v => assert_eq!(v, Ok(i)), - send(s2, i) -> _ => {}, - } - } - s3.send(()).unwrap(); - } - }) - .unwrap(); -} - -#[test] -fn stress_timeout_two_threads() { - const COUNT: usize = 20; - - let (s, r) = bounded(2); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(500)); - } - - loop { - select! { - send(s, i) -> _ => break, - default(ms(100)) => {} - } - } - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(500)); - } - - loop { - select! { - recv(r) -> v => { - assert_eq!(v, Ok(i)); - break; - } - default(ms(100)) => {} - } - } - } - }); - }) - .unwrap(); -} - -#[test] -fn send_recv_same_channel() { - let (s, r) = bounded::(0); - select! { - send(s, 0) -> _ => panic!(), - recv(r) -> _ => panic!(), - default(ms(500)) => {} - } - - let (s, r) = unbounded::(); - select! { - send(s, 0) -> _ => {}, - recv(r) -> _ => panic!(), - default(ms(500)) => panic!(), - } -} - -#[test] -fn matching() { - const THREADS: usize = 44; - - let (s, r) = &bounded::(0); - - scope(|scope| { - for i in 0..THREADS { - scope.spawn(move |_| { - select! { - recv(r) -> v => assert_ne!(v.unwrap(), i), - send(s, i) -> _ => {}, - } - }); - } - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn matching_with_leftover() { - const THREADS: usize = 55; - - let (s, r) = &bounded::(0); - - scope(|scope| { - for i in 0..THREADS { - scope.spawn(move |_| { - select! { - recv(r) -> v => assert_ne!(v.unwrap(), i), - send(s, i) -> _ => {}, - } - }); - } - s.send(!0).unwrap(); - }) - .unwrap(); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn channel_through_channel() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 1000; - - type T = Box; - - for cap in 0..3 { - let (s, r) = bounded::(cap); - - scope(|scope| { - scope.spawn(move |_| { - let mut s = s; - - for _ in 0..COUNT { - let (new_s, new_r) = bounded(cap); - let new_r: T = Box::new(Some(new_r)); - - select! { - send(s, new_r) -> _ => {} - } - - s = new_s; - } - }); - - scope.spawn(move |_| { - let mut r = r; - - for _ in 0..COUNT { - r = select! { - recv(r) -> msg => { - msg.unwrap() - .downcast_mut::>>() - .unwrap() - .take() - .unwrap() - } - } - } - }); - }) - .unwrap(); - } -} - -#[test] -fn linearizable_default() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - for step in 0..2 { - let (start_s, start_r) = bounded::<()>(0); - let (end_s, end_r) = bounded::<()>(0); - - let ((s1, r1), (s2, r2)) = if step == 0 { - (bounded::(1), bounded::(1)) - } else { - (unbounded::(), unbounded::()) - }; - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..COUNT { - start_s.send(()).unwrap(); - - s1.send(1).unwrap(); - select! { - recv(r1) -> _ => {} - recv(r2) -> _ => {} - default => unreachable!() - } - - end_s.send(()).unwrap(); - let _ = r2.try_recv(); - } - }); - - for _ in 0..COUNT { - start_r.recv().unwrap(); - - s2.send(1).unwrap(); - let _ = r1.try_recv(); - - end_r.recv().unwrap(); - } - }) - .unwrap(); - } -} - -#[test] -fn linearizable_timeout() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - for step in 0..2 { - let (start_s, start_r) = bounded::<()>(0); - let (end_s, end_r) = bounded::<()>(0); - - let ((s1, r1), (s2, r2)) = if step == 0 { - (bounded::(1), bounded::(1)) - } else { - (unbounded::(), unbounded::()) - }; - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..COUNT { - start_s.send(()).unwrap(); - - s1.send(1).unwrap(); - select! { - recv(r1) -> _ => {} - recv(r2) -> _ => {} - default(ms(0)) => unreachable!() - } - - end_s.send(()).unwrap(); - let _ = r2.try_recv(); - } - }); - - for _ in 0..COUNT { - start_r.recv().unwrap(); - - s2.send(1).unwrap(); - let _ = r1.try_recv(); - - end_r.recv().unwrap(); - } - }) - .unwrap(); - } -} - -#[test] -fn fairness1() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded::<()>(COUNT); - let (s2, r2) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - - let mut hits = [0usize; 4]; - for _ in 0..COUNT { - select! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - recv(after(ms(0))) -> _ => hits[2] += 1, - recv(tick(ms(0))) -> _ => hits[3] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); -} - -#[cfg_attr(crossbeam_sanitize, ignore)] // TODO: flaky: https://github.com/crossbeam-rs/crossbeam/issues/1094 -#[test] -fn fairness2() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = bounded::<()>(1); - let (s3, r3) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| { - let (hole, _r) = bounded(0); - - for _ in 0..COUNT { - let s1 = if s1.is_empty() { &s1 } else { &hole }; - let s2 = if s2.is_empty() { &s2 } else { &hole }; - - select! { - send(s1, ()) -> res => assert!(res.is_ok()), - send(s2, ()) -> res => assert!(res.is_ok()), - send(s3, ()) -> res => assert!(res.is_ok()), - } - } - }); - - let hits = vec![Cell::new(0usize); 3]; - for _ in 0..COUNT { - select! { - recv(r1) -> _ => hits[0].set(hits[0].get() + 1), - recv(r2) -> _ => hits[1].set(hits[1].get() + 1), - recv(r3) -> _ => hits[2].set(hits[2].get() + 1), - } - } - assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 50)); - }) - .unwrap(); -} - -#[test] -fn fairness_recv() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded::<()>(COUNT); - let (s2, r2) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - - let mut hits = [0usize; 2]; - while hits[0] + hits[1] < COUNT { - select! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / 4)); -} - -#[test] -fn fairness_send() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, _r1) = bounded::<()>(COUNT); - let (s2, _r2) = unbounded::<()>(); - - let mut hits = [0usize; 2]; - for _ in 0..COUNT { - select! { - send(s1, ()) -> _ => hits[0] += 1, - send(s2, ()) -> _ => hits[1] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / 4)); -} - -#[test] -fn unfairness() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = unbounded::<()>(); - let (s3, r3) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - s3.send(()).unwrap(); - - let mut hits = [0usize; 3]; - for _ in 0..COUNT { - select_biased! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - recv(r3) -> _ => hits[2] += 1, - } - } - assert_eq!(hits, [COUNT, 0, 0]); - - for _ in 0..COUNT { - select_biased! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - recv(r3) -> _ => hits[2] += 1, - } - } - assert_eq!(hits, [COUNT, COUNT, 0]); -} - -#[test] -fn unfairness_timeout() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = unbounded::<()>(); - let (s3, r3) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - s3.send(()).unwrap(); - - let mut hits = [0usize; 3]; - for _ in 0..COUNT { - select_biased! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - recv(r3) -> _ => hits[2] += 1, - default(ms(1000)) => unreachable!(), - } - } - assert_eq!(hits, [COUNT, 0, 0]); - - for _ in 0..COUNT { - select_biased! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - recv(r3) -> _ => hits[2] += 1, - default(ms(1000)) => unreachable!(), - } - } - assert_eq!(hits, [COUNT, COUNT, 0]); -} - -#[test] -fn unfairness_try() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = unbounded::<()>(); - let (s2, r2) = unbounded::<()>(); - let (s3, r3) = unbounded::<()>(); - - for _ in 0..COUNT { - s1.send(()).unwrap(); - s2.send(()).unwrap(); - } - s3.send(()).unwrap(); - - let mut hits = [0usize; 3]; - for _ in 0..COUNT { - select_biased! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - recv(r3) -> _ => hits[2] += 1, - default() => unreachable!(), - } - } - assert_eq!(hits, [COUNT, 0, 0]); - - for _ in 0..COUNT { - select_biased! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - recv(r3) -> _ => hits[2] += 1, - default() => unreachable!(), - } - } - assert_eq!(hits, [COUNT, COUNT, 0]); -} - -#[allow(clippy::or_fun_call, clippy::unnecessary_literal_unwrap)] // This is intentional. -#[test] -fn references() { - let (s, r) = unbounded::(); - select! { - send(s, 0) -> _ => {} - recv(r) -> _ => {} - } - select! { - send(&&&&s, 0) -> _ => {} - recv(&&&&r) -> _ => {} - } - select! { - recv(Some(&r).unwrap_or(&never())) -> _ => {}, - default => {} - } - select! { - recv(Some(r).unwrap_or(never())) -> _ => {}, - default => {} - } -} - -#[allow(clippy::never_loop)] // This is intentional. -#[test] -fn case_blocks() { - let (s, r) = unbounded::(); - - select! { - recv(r) -> _ => 3.0, - recv(r) -> _ => loop { - unreachable!() - }, - recv(r) -> _ => match 7 + 3 { - _ => unreachable!() - }, - default => 7. - }; - - select! { - recv(r) -> msg => if msg.is_ok() { - unreachable!() - }, - default => () - } - - drop(s); -} - -#[allow(clippy::redundant_closure_call)] // This is intentional. -#[test] -fn move_handles() { - let (s, r) = unbounded::(); - select! { - recv((move || r)()) -> _ => {} - send((move || s)(), 0) -> _ => {} - } -} - -#[test] -fn infer_types() { - let (s, r) = unbounded(); - select! { - recv(r) -> _ => {} - default => {} - } - s.send(()).unwrap(); - - let (s, r) = unbounded(); - select! { - send(s, ()) -> _ => {} - } - r.recv().unwrap(); -} - -#[test] -fn default_syntax() { - let (s, r) = bounded::(0); - - select! { - recv(r) -> _ => panic!(), - default => {} - } - select! { - send(s, 0) -> _ => panic!(), - default() => {} - } - select! { - default => {} - } - select! { - default() => {} - } -} - -#[test] -fn same_variable_name() { - let (_, r) = unbounded::(); - select! { - recv(r) -> r => assert!(r.is_err()), - } -} - -#[test] -fn handles_on_heap() { - let (s, r) = unbounded::(); - let (s, r) = (Box::new(s), Box::new(r)); - - select! { - send(*s, 0) -> _ => {} - recv(*r) -> _ => {} - default => {} - } - - drop(s); - drop(r); -} - -#[test] -fn once_blocks() { - let (s, r) = unbounded::(); - - let once = Box::new(()); - select! { - send(s, 0) -> _ => drop(once), - } - - let once = Box::new(()); - select! { - recv(r) -> _ => drop(once), - } - - let once1 = Box::new(()); - let once2 = Box::new(()); - select! { - send(s, 0) -> _ => drop(once1), - default => drop(once2), - } - - let once1 = Box::new(()); - let once2 = Box::new(()); - select! { - recv(r) -> _ => drop(once1), - default => drop(once2), - } - - let once1 = Box::new(()); - let once2 = Box::new(()); - select! { - recv(r) -> _ => drop(once1), - send(s, 0) -> _ => drop(once2), - } -} - -#[test] -fn once_receiver() { - let (_, r) = unbounded::(); - - let once = Box::new(()); - let get = move || { - drop(once); - r - }; - - select! { - recv(get()) -> _ => {} - } -} - -#[test] -fn once_sender() { - let (s, _) = unbounded::(); - - let once = Box::new(()); - let get = move || { - drop(once); - s - }; - - select! { - send(get(), 5) -> _ => {} - } -} - -#[test] -fn parse_nesting() { - let (_, r) = unbounded::(); - - select! { - recv(r) -> _ => {} - recv(r) -> _ => { - select! { - recv(r) -> _ => {} - recv(r) -> _ => { - select! { - recv(r) -> _ => {} - recv(r) -> _ => { - select! { - default => {} - } - } - } - } - } - } - } -} - -#[test] -fn evaluate() { - let (s, r) = unbounded::(); - - let v = select! { - recv(r) -> _ => "foo".into(), - send(s, 0) -> _ => "bar".to_owned(), - default => "baz".to_string(), - }; - assert_eq!(v, "bar"); - - let v = select! { - recv(r) -> _ => "foo".into(), - default => "baz".to_string(), - }; - assert_eq!(v, "foo"); - - let v = select! { - recv(r) -> _ => "foo".into(), - default => "baz".to_string(), - }; - assert_eq!(v, "baz"); -} - -#[test] -fn deref() { - use crossbeam_channel as cc; - - struct Sender(cc::Sender); - struct Receiver(cc::Receiver); - - impl Deref for Receiver { - type Target = cc::Receiver; - - fn deref(&self) -> &Self::Target { - &self.0 - } - } - - impl Deref for Sender { - type Target = cc::Sender; - - fn deref(&self) -> &Self::Target { - &self.0 - } - } - - let (s, r) = bounded::(0); - let (s, r) = (Sender(s), Receiver(r)); - - select! { - send(s, 0) -> _ => panic!(), - recv(r) -> _ => panic!(), - default => {} - } -} - -#[test] -fn result_types() { - let (s, _) = bounded::(0); - let (_, r) = bounded::(0); - - select! { - recv(r) -> res => { let _: Result = res; }, - } - select! { - recv(r) -> res => { let _: Result = res; }, - default => {} - } - select! { - recv(r) -> res => { let _: Result = res; }, - default(ms(0)) => {} - } - - select! { - send(s, 0) -> res => { let _: Result<(), SendError> = res; }, - } - select! { - send(s, 0) -> res => { let _: Result<(), SendError> = res; }, - default => {} - } - select! { - send(s, 0) -> res => { let _: Result<(), SendError> = res; }, - default(ms(0)) => {} - } - - select! { - send(s, 0) -> res => { let _: Result<(), SendError> = res; }, - recv(r) -> res => { let _: Result = res; }, - } -} - -#[test] -fn try_recv() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - recv(r) -> _ => panic!(), - default => {} - } - thread::sleep(ms(1500)); - select! { - recv(r) -> v => assert_eq!(v, Ok(7)), - default => panic!(), - } - thread::sleep(ms(500)); - select! { - recv(r) -> v => assert_eq!(v, Err(RecvError)), - default => panic!(), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - select! { - send(s, 7) -> res => res.unwrap(), - } - }); - }) - .unwrap(); -} - -#[test] -fn recv() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - recv(r) -> v => assert_eq!(v, Ok(7)), - } - thread::sleep(ms(1000)); - select! { - recv(r) -> v => assert_eq!(v, Ok(8)), - } - thread::sleep(ms(1000)); - select! { - recv(r) -> v => assert_eq!(v, Ok(9)), - } - select! { - recv(r) -> v => assert_eq!(v, Err(RecvError)), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - select! { - send(s, 7) -> res => res.unwrap(), - } - select! { - send(s, 8) -> res => res.unwrap(), - } - select! { - send(s, 9) -> res => res.unwrap(), - } - }); - }) - .unwrap(); -} - -#[test] -fn recv_timeout() { - let (s, r) = bounded::(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - recv(r) -> _ => panic!(), - default(ms(1000)) => {} - } - select! { - recv(r) -> v => assert_eq!(v, Ok(7)), - default(ms(1000)) => panic!(), - } - select! { - recv(r) -> v => assert_eq!(v, Err(RecvError)), - default(ms(1000)) => panic!(), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - select! { - send(s, 7) -> res => res.unwrap(), - } - }); - }) - .unwrap(); -} - -#[test] -fn try_send() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - send(s, 7) -> _ => panic!(), - default => {} - } - thread::sleep(ms(1500)); - select! { - send(s, 8) -> res => res.unwrap(), - default => panic!(), - } - thread::sleep(ms(500)); - select! { - send(s, 8) -> res => assert_eq!(res, Err(SendError(8))), - default => panic!(), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - select! { - recv(r) -> v => assert_eq!(v, Ok(8)), - } - }); - }) - .unwrap(); -} - -#[test] -fn send() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - send(s, 7) -> res => res.unwrap(), - } - thread::sleep(ms(1000)); - select! { - send(s, 8) -> res => res.unwrap(), - } - thread::sleep(ms(1000)); - select! { - send(s, 9) -> res => res.unwrap(), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - select! { - recv(r) -> v => assert_eq!(v, Ok(7)), - } - select! { - recv(r) -> v => assert_eq!(v, Ok(8)), - } - select! { - recv(r) -> v => assert_eq!(v, Ok(9)), - } - }); - }) - .unwrap(); -} - -#[test] -fn send_timeout() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - send(s, 7) -> _ => panic!(), - default(ms(1000)) => {} - } - select! { - send(s, 8) -> res => res.unwrap(), - default(ms(1000)) => panic!(), - } - select! { - send(s, 9) -> res => assert_eq!(res, Err(SendError(9))), - default(ms(1000)) => panic!(), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - select! { - recv(r) -> v => assert_eq!(v, Ok(8)), - } - }); - }) - .unwrap(); -} - -#[test] -fn disconnect_wakes_sender() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - send(s, ()) -> res => assert_eq!(res, Err(SendError(()))), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(r); - }); - }) - .unwrap(); -} - -#[test] -fn disconnect_wakes_receiver() { - let (s, r) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(move |_| { - select! { - recv(r) -> res => assert_eq!(res, Err(RecvError)), - } - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(s); - }); - }) - .unwrap(); -} - -#[test] -fn trailing_comma() { - let (s, r) = unbounded::(); - - select! { - send(s, 1,) -> _ => {}, - recv(r,) -> _ => {}, - default(ms(1000),) => {}, - } -} diff --git a/crossbeam-channel/tests/thread_locals.rs b/crossbeam-channel/tests/thread_locals.rs deleted file mode 100644 index 4639833aa..000000000 --- a/crossbeam-channel/tests/thread_locals.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! Tests that make sure accessing thread-locals while exiting the thread doesn't cause panics. - -#![cfg(not(miri))] // Miri detects that this test is buggy: the destructor of `FOO` uses `std::thread::current()`! - -use std::thread; -use std::time::Duration; - -use crossbeam_channel::{select, unbounded}; -use crossbeam_utils::thread::scope; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -#[cfg_attr(target_os = "macos", ignore = "TLS is destroyed too early on macOS")] -fn use_while_exiting() { - struct Foo; - - impl Drop for Foo { - fn drop(&mut self) { - // A blocking operation after the thread-locals have been dropped. This will attempt to - // use the thread-locals and must not panic. - let (_s, r) = unbounded::<()>(); - select! { - recv(r) -> _ => {} - default(ms(100)) => {} - } - } - } - - thread_local! { - static FOO: Foo = const { Foo }; - } - - let (s, r) = unbounded::<()>(); - - scope(|scope| { - scope.spawn(|_| { - // First initialize `FOO`, then the thread-locals related to crossbeam-channel. - FOO.with(|_| ()); - r.recv().unwrap(); - // At thread exit, thread-locals related to crossbeam-channel get dropped first and - // `FOO` is dropped last. - }); - - scope.spawn(|_| { - thread::sleep(ms(100)); - s.send(()).unwrap(); - }); - }) - .unwrap(); -} diff --git a/crossbeam-channel/tests/tick.rs b/crossbeam-channel/tests/tick.rs deleted file mode 100644 index 23bbb1f18..000000000 --- a/crossbeam-channel/tests/tick.rs +++ /dev/null @@ -1,352 +0,0 @@ -//! Tests for the tick channel flavor. - -#![cfg(not(miri))] // TODO: many assertions failed due to Miri is slow - -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_channel::{after, select, tick, Select, TryRecvError}; -use crossbeam_utils::thread::scope; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn fire() { - let start = Instant::now(); - let r = tick(ms(50)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - thread::sleep(ms(100)); - - let fired = r.try_recv().unwrap(); - assert!(start < fired); - assert!(fired - start >= ms(50)); - - let now = Instant::now(); - assert!(fired < now); - assert!(now - fired >= ms(50)); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - - select! { - recv(r) -> _ => panic!(), - default => {} - } - - select! { - recv(r) -> _ => {} - recv(tick(ms(200))) -> _ => panic!(), - } -} - -#[test] -fn intervals() { - let start = Instant::now(); - let r = tick(ms(50)); - - let t1 = r.recv().unwrap(); - assert!(start + ms(50) <= t1); - assert!(start + ms(100) > t1); - - thread::sleep(ms(300)); - let t2 = r.try_recv().unwrap(); - assert!(start + ms(100) <= t2); - assert!(start + ms(150) > t2); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - let t3 = r.recv().unwrap(); - assert!(start + ms(400) <= t3); - assert!(start + ms(450) > t3); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn capacity() { - const COUNT: usize = 10; - - for i in 0..COUNT { - let r = tick(ms(i as u64)); - assert_eq!(r.capacity(), Some(1)); - } -} - -#[test] -fn len_empty_full() { - let r = tick(ms(50)); - - assert_eq!(r.len(), 0); - assert!(r.is_empty()); - assert!(!r.is_full()); - - thread::sleep(ms(100)); - - assert_eq!(r.len(), 1); - assert!(!r.is_empty()); - assert!(r.is_full()); - - r.try_recv().unwrap(); - - assert_eq!(r.len(), 0); - assert!(r.is_empty()); - assert!(!r.is_full()); -} - -#[test] -fn try_recv() { - let r = tick(ms(200)); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(100)); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(200)); - assert!(r.try_recv().is_ok()); - assert!(r.try_recv().is_err()); - - thread::sleep(ms(200)); - assert!(r.try_recv().is_ok()); - assert!(r.try_recv().is_err()); -} - -#[test] -fn recv() { - let start = Instant::now(); - let r = tick(ms(50)); - - let fired = r.recv().unwrap(); - assert!(start < fired); - assert!(fired - start >= ms(50)); - - let now = Instant::now(); - assert!(fired < now); - assert!(now - fired < fired - start); - - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[cfg(not(crossbeam_sanitize))] // TODO: assertions failed due to tsan is slow -#[test] -fn recv_timeout() { - let start = Instant::now(); - let r = tick(ms(200)); - - assert!(r.recv_timeout(ms(100)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(100)); - assert!(now - start <= ms(150)); - - let fired = r.recv_timeout(ms(200)).unwrap(); - assert!(fired - start >= ms(200)); - assert!(fired - start <= ms(250)); - - assert!(r.recv_timeout(ms(100)).is_err()); - let now = Instant::now(); - assert!(now - start >= ms(300)); - assert!(now - start <= ms(350)); - - let fired = r.recv_timeout(ms(200)).unwrap(); - assert!(fired - start >= ms(400)); - assert!(fired - start <= ms(450)); -} - -#[test] -fn recv_two() { - let r1 = tick(ms(50)); - let r2 = tick(ms(50)); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..10 { - select! { - recv(r1) -> _ => {} - recv(r2) -> _ => {} - } - } - }); - scope.spawn(|_| { - for _ in 0..10 { - select! { - recv(r1) -> _ => {} - recv(r2) -> _ => {} - } - } - }); - }) - .unwrap(); -} - -#[test] -fn recv_race() { - select! { - recv(tick(ms(50))) -> _ => {} - recv(tick(ms(100))) -> _ => panic!(), - } - - select! { - recv(tick(ms(100))) -> _ => panic!(), - recv(tick(ms(50))) -> _ => {} - } -} - -#[test] -fn stress_default() { - const COUNT: usize = 10; - - for _ in 0..COUNT { - select! { - recv(tick(ms(0))) -> _ => {} - default => panic!(), - } - } - - for _ in 0..COUNT { - select! { - recv(tick(ms(100))) -> _ => panic!(), - default => {} - } - } -} - -#[test] -fn select() { - const THREADS: usize = 4; - - let hits = AtomicUsize::new(0); - let r1 = tick(ms(200)); - let r2 = tick(ms(300)); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - let timeout = after(ms(1100)); - loop { - let mut sel = Select::new(); - let oper1 = sel.recv(&r1); - let oper2 = sel.recv(&r2); - let oper3 = sel.recv(&timeout); - let oper = sel.select(); - match oper.index() { - i if i == oper1 => { - oper.recv(&r1).unwrap(); - hits.fetch_add(1, Ordering::SeqCst); - } - i if i == oper2 => { - oper.recv(&r2).unwrap(); - hits.fetch_add(1, Ordering::SeqCst); - } - i if i == oper3 => { - oper.recv(&timeout).unwrap(); - break; - } - _ => unreachable!(), - } - } - }); - } - }) - .unwrap(); - - assert_eq!(hits.load(Ordering::SeqCst), 8); -} - -#[cfg(not(crossbeam_sanitize))] // TODO: assertions failed due to tsan is slow -#[test] -fn ready() { - const THREADS: usize = 4; - - let hits = AtomicUsize::new(0); - let r1 = tick(ms(200)); - let r2 = tick(ms(300)); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - let timeout = after(ms(1100)); - 'outer: loop { - let mut sel = Select::new(); - sel.recv(&r1); - sel.recv(&r2); - sel.recv(&timeout); - loop { - match sel.ready() { - 0 => { - if r1.try_recv().is_ok() { - hits.fetch_add(1, Ordering::SeqCst); - break; - } - } - 1 => { - if r2.try_recv().is_ok() { - hits.fetch_add(1, Ordering::SeqCst); - break; - } - } - 2 => { - if timeout.try_recv().is_ok() { - break 'outer; - } - } - _ => unreachable!(), - } - } - } - }); - } - }) - .unwrap(); - - assert_eq!(hits.load(Ordering::SeqCst), 8); -} - -#[test] -fn fairness() { - const COUNT: usize = 30; - - for &dur in &[0, 1] { - let mut hits = [0usize; 2]; - - for _ in 0..COUNT { - let r1 = tick(ms(dur)); - let r2 = tick(ms(dur)); - - for _ in 0..COUNT { - select! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - } - } - } - - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - } -} - -#[test] -fn fairness_duplicates() { - const COUNT: usize = 30; - - for &dur in &[0, 1] { - let mut hits = [0usize; 5]; - - for _ in 0..COUNT { - let r = tick(ms(dur)); - - for _ in 0..COUNT { - select! { - recv(r) -> _ => hits[0] += 1, - recv(r) -> _ => hits[1] += 1, - recv(r) -> _ => hits[2] += 1, - recv(r) -> _ => hits[3] += 1, - recv(r) -> _ => hits[4] += 1, - } - } - } - - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - } -} diff --git a/crossbeam-channel/tests/zero.rs b/crossbeam-channel/tests/zero.rs deleted file mode 100644 index 74c9a3e10..000000000 --- a/crossbeam-channel/tests/zero.rs +++ /dev/null @@ -1,587 +0,0 @@ -//! Tests for the zero channel flavor. - -use std::any::Any; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering; -use std::thread; -use std::time::Duration; - -use crossbeam_channel::{bounded, select, Receiver}; -use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError}; -use crossbeam_channel::{SendError, SendTimeoutError, TrySendError}; -use crossbeam_utils::thread::scope; -use rand::{thread_rng, Rng}; - -fn ms(ms: u64) -> Duration { - Duration::from_millis(ms) -} - -#[test] -fn smoke() { - let (s, r) = bounded(0); - assert_eq!(s.try_send(7), Err(TrySendError::Full(7))); - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); -} - -#[test] -fn capacity() { - let (s, r) = bounded::<()>(0); - assert_eq!(s.capacity(), Some(0)); - assert_eq!(r.capacity(), Some(0)); -} - -#[test] -fn len_empty_full() { - let (s, r) = bounded(0); - - assert_eq!(s.len(), 0); - assert!(s.is_empty()); - assert!(s.is_full()); - assert_eq!(r.len(), 0); - assert!(r.is_empty()); - assert!(r.is_full()); - - scope(|scope| { - scope.spawn(|_| s.send(0).unwrap()); - scope.spawn(|_| r.recv().unwrap()); - }) - .unwrap(); - - assert_eq!(s.len(), 0); - assert!(s.is_empty()); - assert!(s.is_full()); - assert_eq!(r.len(), 0); - assert!(r.is_empty()); - assert!(r.is_full()); -} - -#[test] -fn try_recv() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.try_recv(), Err(TryRecvError::Empty)); - thread::sleep(ms(1500)); - assert_eq!(r.try_recv(), Ok(7)); - thread::sleep(ms(500)); - assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - s.send(7).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn recv() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv(), Ok(7)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(8)); - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(9)); - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - s.send(7).unwrap(); - s.send(8).unwrap(); - s.send(9).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn recv_timeout() { - let (s, r) = bounded::(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout)); - assert_eq!(r.recv_timeout(ms(1000)), Ok(7)); - assert_eq!( - r.recv_timeout(ms(1000)), - Err(RecvTimeoutError::Disconnected) - ); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - s.send(7).unwrap(); - }); - }) - .unwrap(); -} - -#[test] -fn try_send() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(s.try_send(7), Err(TrySendError::Full(7))); - thread::sleep(ms(1500)); - assert_eq!(s.try_send(8), Ok(())); - thread::sleep(ms(500)); - assert_eq!(s.try_send(9), Err(TrySendError::Disconnected(9))); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - assert_eq!(r.recv(), Ok(8)); - }); - }) - .unwrap(); -} - -#[test] -fn send() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - s.send(7).unwrap(); - thread::sleep(ms(1000)); - s.send(8).unwrap(); - thread::sleep(ms(1000)); - s.send(9).unwrap(); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - assert_eq!(r.recv(), Ok(7)); - assert_eq!(r.recv(), Ok(8)); - assert_eq!(r.recv(), Ok(9)); - }); - }) - .unwrap(); -} - -#[test] -fn send_timeout() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!( - s.send_timeout(7, ms(1000)), - Err(SendTimeoutError::Timeout(7)) - ); - assert_eq!(s.send_timeout(8, ms(1000)), Ok(())); - assert_eq!( - s.send_timeout(9, ms(1000)), - Err(SendTimeoutError::Disconnected(9)) - ); - }); - scope.spawn(move |_| { - thread::sleep(ms(1500)); - assert_eq!(r.recv(), Ok(8)); - }); - }) - .unwrap(); -} - -#[test] -fn len() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - - let (s, r) = bounded(0); - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - assert_eq!(r.recv(), Ok(i)); - assert_eq!(r.len(), 0); - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - s.send(i).unwrap(); - assert_eq!(s.len(), 0); - } - }); - }) - .unwrap(); - - assert_eq!(s.len(), 0); - assert_eq!(r.len(), 0); -} - -#[test] -fn disconnect_wakes_sender() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(s.send(()), Err(SendError(()))); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(r); - }); - }) - .unwrap(); -} - -#[test] -fn disconnect_wakes_receiver() { - let (s, r) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(move |_| { - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - thread::sleep(ms(1000)); - drop(s); - }); - }) - .unwrap(); -} - -#[test] -fn spsc() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - for i in 0..COUNT { - assert_eq!(r.recv(), Ok(i)); - } - assert_eq!(r.recv(), Err(RecvError)); - }); - scope.spawn(move |_| { - for i in 0..COUNT { - s.send(i).unwrap(); - } - }); - }) - .unwrap(); -} - -#[test] -fn mpmc() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let (s, r) = bounded::(0); - let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - let n = r.recv().unwrap(); - v[n].fetch_add(1, Ordering::SeqCst); - } - }); - } - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..COUNT { - s.send(i).unwrap(); - } - }); - } - }) - .unwrap(); - - for c in v { - assert_eq!(c.load(Ordering::SeqCst), THREADS); - } -} - -#[test] -fn stress_oneshot() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - for _ in 0..COUNT { - let (s, r) = bounded(1); - - scope(|scope| { - scope.spawn(|_| r.recv().unwrap()); - scope.spawn(|_| s.send(0).unwrap()); - }) - .unwrap(); - } -} - -#[test] -fn stress_iter() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 1000; - - let (request_s, request_r) = bounded(0); - let (response_s, response_r) = bounded(0); - - scope(|scope| { - scope.spawn(move |_| { - let mut count = 0; - loop { - for x in response_r.try_iter() { - count += x; - if count == COUNT { - return; - } - } - let _ = request_s.try_send(()); - } - }); - - for _ in request_r.iter() { - if response_s.send(1).is_err() { - break; - } - } - }) - .unwrap(); -} - -#[test] -fn stress_timeout_two_threads() { - const COUNT: usize = 100; - - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(50)); - } - loop { - if let Ok(()) = s.send_timeout(i, ms(10)) { - break; - } - } - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if i % 2 == 0 { - thread::sleep(ms(50)); - } - loop { - if let Ok(x) = r.recv_timeout(ms(10)) { - assert_eq!(x, i); - break; - } - } - } - }); - }) - .unwrap(); -} - -#[test] -fn drops() { - #[cfg(miri)] - const RUNS: usize = 20; - #[cfg(not(miri))] - const RUNS: usize = 100; - #[cfg(miri)] - const STEPS: usize = 100; - #[cfg(not(miri))] - const STEPS: usize = 10_000; - - static DROPS: AtomicUsize = AtomicUsize::new(0); - - #[derive(Debug, PartialEq)] - struct DropCounter; - - impl Drop for DropCounter { - fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::SeqCst); - } - } - - let mut rng = thread_rng(); - - for _ in 0..RUNS { - let steps = rng.gen_range(0..STEPS); - - DROPS.store(0, Ordering::SeqCst); - let (s, r) = bounded::(0); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..steps { - r.recv().unwrap(); - } - }); - - scope.spawn(|_| { - for _ in 0..steps { - s.send(DropCounter).unwrap(); - } - }); - }) - .unwrap(); - - assert_eq!(DROPS.load(Ordering::SeqCst), steps); - drop(s); - drop(r); - assert_eq!(DROPS.load(Ordering::SeqCst), steps); - } -} - -#[test] -fn fairness() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s1, r1) = bounded::<()>(0); - let (s2, r2) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| { - let mut hits = [0usize; 2]; - for _ in 0..COUNT { - select! { - recv(r1) -> _ => hits[0] += 1, - recv(r2) -> _ => hits[1] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - }); - - let mut hits = [0usize; 2]; - for _ in 0..COUNT { - select! { - send(s1, ()) -> _ => hits[0] += 1, - send(s2, ()) -> _ => hits[1] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - }) - .unwrap(); -} - -#[test] -fn fairness_duplicates() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 10_000; - - let (s, r) = bounded::<()>(0); - - scope(|scope| { - scope.spawn(|_| { - let mut hits = [0usize; 5]; - for _ in 0..COUNT { - select! { - recv(r) -> _ => hits[0] += 1, - recv(r) -> _ => hits[1] += 1, - recv(r) -> _ => hits[2] += 1, - recv(r) -> _ => hits[3] += 1, - recv(r) -> _ => hits[4] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - }); - - let mut hits = [0usize; 5]; - for _ in 0..COUNT { - select! { - send(s, ()) -> _ => hits[0] += 1, - send(s, ()) -> _ => hits[1] += 1, - send(s, ()) -> _ => hits[2] += 1, - send(s, ()) -> _ => hits[3] += 1, - send(s, ()) -> _ => hits[4] += 1, - } - } - assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2)); - }) - .unwrap(); -} - -#[test] -fn recv_in_send() { - let (s, r) = bounded(0); - - scope(|scope| { - scope.spawn(|_| { - thread::sleep(ms(100)); - r.recv() - }); - - scope.spawn(|_| { - thread::sleep(ms(500)); - s.send(()).unwrap(); - }); - - select! { - send(s, r.recv().unwrap()) -> _ => {} - } - }) - .unwrap(); -} - -#[test] -fn channel_through_channel() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 1000; - - type T = Box; - - let (s, r) = bounded::(0); - - scope(|scope| { - scope.spawn(move |_| { - let mut s = s; - - for _ in 0..COUNT { - let (new_s, new_r) = bounded(0); - let new_r: T = Box::new(Some(new_r)); - - s.send(new_r).unwrap(); - s = new_s; - } - }); - - scope.spawn(move |_| { - let mut r = r; - - for _ in 0..COUNT { - r = r - .recv() - .unwrap() - .downcast_mut::>>() - .unwrap() - .take() - .unwrap() - } - }); - }) - .unwrap(); -} diff --git a/crossbeam-deque/CHANGELOG.md b/crossbeam-deque/CHANGELOG.md deleted file mode 100644 index 5bc6e9f47..000000000 --- a/crossbeam-deque/CHANGELOG.md +++ /dev/null @@ -1,141 +0,0 @@ -# Version 0.8.6 - -- Fix stack overflow when pushing large value to `Injector`. (#1146, #1147, #1159) - -# Version 0.8.5 - -- Remove dependency on `cfg-if`. (#1072) - -# Version 0.8.4 - -- Bump the minimum supported Rust version to 1.61. (#1037) - -# Version 0.8.3 - -- Add `Stealer::{steal_batch_with_limit, steal_batch_with_limit_and_pop}` methods. (#903) -- Add `Injector::{steal_batch_with_limit, steal_batch_with_limit_and_pop}` methods. (#903) - -# Version 0.8.2 - -- Bump the minimum supported Rust version to 1.38. (#877) - -# Version 0.8.1 - -- Fix deque steal race condition. (#726) -- Add `Stealer::len` method. (#708) - -# Version 0.8.0 - -**Note:** This release has been yanked. See [GHSA-pqqp-xmhj-wgcw](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-pqqp-xmhj-wgcw) for details. - -- Bump the minimum supported Rust version to 1.36. -- Add `Worker::len()` and `Injector::len()` methods. -- Add `std` (enabled by default) feature for forward compatibility. - -# Version 0.7.4 - -- Fix deque steal race condition. - -# Version 0.7.3 - -**Note:** This release has been yanked. See [GHSA-pqqp-xmhj-wgcw](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-pqqp-xmhj-wgcw) for details. - -- Stop stealing from the same deque. (#448) -- Fix unsoundness issues by adopting `MaybeUninit`. (#458) - -# Version 0.7.2 - -**Note:** This release has been yanked. See [GHSA-pqqp-xmhj-wgcw](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-pqqp-xmhj-wgcw) for details. - -- Bump `crossbeam-epoch` to `0.8`. -- Bump `crossbeam-utils` to `0.7`. - -# Version 0.7.1 - -**Note:** This release has been yanked. See [GHSA-pqqp-xmhj-wgcw](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-pqqp-xmhj-wgcw) for details. - -- Bump the minimum required version of `crossbeam-utils`. - -# Version 0.7.0 - -**Note:** This release has been yanked. See [GHSA-pqqp-xmhj-wgcw](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-pqqp-xmhj-wgcw) for details. - -- Make `Worker::pop()` faster in the FIFO case. -- Replace `fifo()` nad `lifo()` with `Worker::new_fifo()` and `Worker::new_lifo()`. -- Add more batched steal methods. -- Introduce `Injector`, a MPMC queue. -- Rename `Steal::Data` to `Steal::Success`. -- Add `Steal::or_else()` and implement `FromIterator` for `Steal`. -- Add `#[must_use]` to `Steal`. - -# Version 0.6.3 - -- Bump `crossbeam-epoch` to `0.7`. - -# Version 0.6.2 - -- Update `crosbeam-utils` to `0.6`. - -# Version 0.6.1 - -- Change a few `Relaxed` orderings to `Release` in order to fix false positives by tsan. - -# Version 0.6.0 - -- Add `Stealer::steal_many` for batched stealing. -- Change the return type of `pop` to `Pop` so that spinning can be handled manually. - -# Version 0.5.2 - -- Update `crossbeam-utils` to `0.5.0`. - -# Version 0.5.1 - -- Minor optimizations. - -# Version 0.5.0 - -- Add two deque constructors : `fifo()` and `lifo()`. -- Update `rand` to `0.5.3`. -- Rename `Deque` to `Worker`. -- Return `Option` from `Stealer::steal`. -- Remove methods `Deque::len` and `Stealer::len`. -- Remove method `Deque::stealer`. -- Remove method `Deque::steal`. - -# Version 0.4.1 - -- Update `crossbeam-epoch` to `0.5.0`. - -# Version 0.4.0 - -- Update `crossbeam-epoch` to `0.4.2`. -- Update `crossbeam-utils` to `0.4.0`. -- Require minimum Rust version 1.25. - -# Version 0.3.1 - -- Add `Deque::capacity`. -- Add `Deque::min_capacity`. -- Add `Deque::shrink_to_fit`. -- Update `crossbeam-epoch` to `0.3.0`. -- Support Rust 1.20. -- Shrink the buffer in `Deque::push` if necessary. - -# Version 0.3.0 - -- Update `crossbeam-epoch` to `0.4.0`. -- Drop support for Rust 1.13. - -# Version 0.2.0 - -- Update `crossbeam-epoch` to `0.3.0`. -- Support Rust 1.13. - -# Version 0.1.1 - -- Update `crossbeam-epoch` to `0.2.0`. - -# Version 0.1.0 - -- First implementation of the Chase-Lev deque. diff --git a/crossbeam-deque/Cargo.toml b/crossbeam-deque/Cargo.toml deleted file mode 100644 index 3c6d44639..000000000 --- a/crossbeam-deque/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "crossbeam-deque" -# When publishing a new version: -# - Update CHANGELOG.md -# - Update README.md (when increasing major or minor version) -# - Run './tools/publish.sh crossbeam-deque ' -version = "0.8.6" -edition = "2021" -rust-version = "1.61" -license = "MIT OR Apache-2.0" -repository = "https://github.com/crossbeam-rs/crossbeam" -homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-deque" -description = "Concurrent work-stealing deque" -keywords = ["chase-lev", "lock-free", "scheduler", "scheduling"] -categories = ["algorithms", "concurrency", "data-structures"] - -[features] -default = ["std"] - -# Enable to use APIs that require `std`. -# This is enabled by default. -# -# NOTE: Disabling `std` feature is not supported yet. -std = ["crossbeam-epoch/std", "crossbeam-utils/std"] - -[dependencies] -crossbeam-epoch = { version = "0.9.17", path = "../crossbeam-epoch", default-features = false } -crossbeam-utils = { version = "0.8.18", path = "../crossbeam-utils", default-features = false } - -[dev-dependencies] -rand = "0.8" - -[lints] -workspace = true diff --git a/crossbeam-deque/LICENSE-APACHE b/crossbeam-deque/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/crossbeam-deque/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/crossbeam-deque/LICENSE-MIT b/crossbeam-deque/LICENSE-MIT deleted file mode 100644 index 068d491fd..000000000 --- a/crossbeam-deque/LICENSE-MIT +++ /dev/null @@ -1,27 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2019 The Crossbeam Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/crossbeam-deque/README.md b/crossbeam-deque/README.md deleted file mode 100644 index 4eae1447e..000000000 --- a/crossbeam-deque/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Crossbeam Deque - -[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)]( -https://github.com/crossbeam-rs/crossbeam/actions) -[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)]( -https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-deque#license) -[![Cargo](https://img.shields.io/crates/v/crossbeam-deque.svg)]( -https://crates.io/crates/crossbeam-deque) -[![Documentation](https://docs.rs/crossbeam-deque/badge.svg)]( -https://docs.rs/crossbeam-deque) -[![Rust 1.61+](https://img.shields.io/badge/rust-1.61+-lightgray.svg)]( -https://www.rust-lang.org) -[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ) - -This crate provides work-stealing deques, which are primarily intended for -building task schedulers. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -crossbeam-deque = "0.8" -``` - -## Compatibility - -Crossbeam Deque supports stable Rust releases going back at least six months, -and every time the minimum supported Rust version is increased, a new minor -version is released. Currently, the minimum supported Rust version is 1.61. - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. - -#### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff --git a/crossbeam-deque/src/deque.rs b/crossbeam-deque/src/deque.rs deleted file mode 100644 index 4aa69a68f..000000000 --- a/crossbeam-deque/src/deque.rs +++ /dev/null @@ -1,2209 +0,0 @@ -use std::alloc::{alloc_zeroed, handle_alloc_error, Layout}; -use std::boxed::Box; -use std::cell::{Cell, UnsafeCell}; -use std::cmp; -use std::fmt; -use std::marker::PhantomData; -use std::mem::{self, MaybeUninit}; -use std::ptr; -use std::sync::atomic::{self, AtomicIsize, AtomicPtr, AtomicUsize, Ordering}; -use std::sync::Arc; - -use crossbeam_epoch::{self as epoch, Atomic, Owned}; -use crossbeam_utils::{Backoff, CachePadded}; - -// Minimum buffer capacity. -const MIN_CAP: usize = 64; -// Maximum number of tasks that can be stolen in `steal_batch()` and `steal_batch_and_pop()`. -const MAX_BATCH: usize = 32; -// If a buffer of at least this size is retired, thread-local garbage is flushed so that it gets -// deallocated as soon as possible. -const FLUSH_THRESHOLD_BYTES: usize = 1 << 10; - -/// A buffer that holds tasks in a worker queue. -/// -/// This is just a pointer to the buffer and its length - dropping an instance of this struct will -/// *not* deallocate the buffer. -struct Buffer { - /// Pointer to the allocated memory. - ptr: *mut T, - - /// Capacity of the buffer. Always a power of two. - cap: usize, -} - -unsafe impl Send for Buffer {} - -impl Buffer { - /// Allocates a new buffer with the specified capacity. - fn alloc(cap: usize) -> Self { - debug_assert_eq!(cap, cap.next_power_of_two()); - - let ptr = Box::into_raw( - (0..cap) - .map(|_| MaybeUninit::::uninit()) - .collect::>(), - ) - .cast::(); - - Self { ptr, cap } - } - - /// Deallocates the buffer. - unsafe fn dealloc(self) { - drop(unsafe { - Box::from_raw(ptr::slice_from_raw_parts_mut( - self.ptr.cast::>(), - self.cap, - )) - }); - } - - /// Returns a pointer to the task at the specified `index`. - unsafe fn at(&self, index: isize) -> *mut T { - // `self.cap` is always a power of two. - // We do all the loads at `MaybeUninit` because we might realize, after loading, that we - // don't actually have the right to access this memory. - unsafe { self.ptr.offset(index & (self.cap - 1) as isize) } - } - - /// Writes `task` into the specified `index`. - /// - /// This method might be concurrently called with another `read` at the same index, which is - /// technically speaking a data race and therefore UB. We should use an atomic store here, but - /// that would be more expensive and difficult to implement generically for all types `T`. - /// Hence, as a hack, we use a volatile write instead. - unsafe fn write(&self, index: isize, task: MaybeUninit) { - unsafe { ptr::write_volatile(self.at(index).cast::>(), task) } - } - - /// Reads a task from the specified `index`. - /// - /// This method might be concurrently called with another `write` at the same index, which is - /// technically speaking a data race and therefore UB. We should use an atomic load here, but - /// that would be more expensive and difficult to implement generically for all types `T`. - /// Hence, as a hack, we use a volatile load instead. - unsafe fn read(&self, index: isize) -> MaybeUninit { - unsafe { ptr::read_volatile(self.at(index).cast::>()) } - } -} - -impl Clone for Buffer { - fn clone(&self) -> Self { - *self - } -} - -impl Copy for Buffer {} - -/// Internal queue data shared between the worker and stealers. -/// -/// The implementation is based on the following work: -/// -/// 1. [Chase and Lev. Dynamic circular work-stealing deque. SPAA 2005.][chase-lev] -/// 2. [Le, Pop, Cohen, and Nardelli. Correct and efficient work-stealing for weak memory models. -/// PPoPP 2013.][weak-mem] -/// 3. [Norris and Demsky. CDSchecker: checking concurrent data structures written with C/C++ -/// atomics. OOPSLA 2013.][checker] -/// -/// [chase-lev]: https://dl.acm.org/citation.cfm?id=1073974 -/// [weak-mem]: https://dl.acm.org/citation.cfm?id=2442524 -/// [checker]: https://dl.acm.org/citation.cfm?id=2509514 -struct Inner { - /// The front index. - front: AtomicIsize, - - /// The back index. - back: AtomicIsize, - - /// The underlying buffer. - buffer: CachePadded>>, -} - -impl Drop for Inner { - fn drop(&mut self) { - // Load the back index, front index, and buffer. - let b = *self.back.get_mut(); - let f = *self.front.get_mut(); - - unsafe { - let buffer = self.buffer.load(Ordering::Relaxed, epoch::unprotected()); - - // Go through the buffer from front to back and drop all tasks in the queue. - let mut i = f; - while i != b { - buffer.deref().at(i).drop_in_place(); - i = i.wrapping_add(1); - } - - // Free the memory allocated by the buffer. - buffer.into_owned().into_box().dealloc(); - } - } -} - -/// Worker queue flavor: FIFO or LIFO. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -enum Flavor { - /// The first-in first-out flavor. - Fifo, - - /// The last-in first-out flavor. - Lifo, -} - -/// A worker queue. -/// -/// This is a FIFO or LIFO queue that is owned by a single thread, but other threads may steal -/// tasks from it. Task schedulers typically create a single worker queue per thread. -/// -/// # Examples -/// -/// A FIFO worker: -/// -/// ``` -/// use crossbeam_deque::{Steal, Worker}; -/// -/// let w = Worker::new_fifo(); -/// let s = w.stealer(); -/// -/// w.push(1); -/// w.push(2); -/// w.push(3); -/// -/// assert_eq!(s.steal(), Steal::Success(1)); -/// assert_eq!(w.pop(), Some(2)); -/// assert_eq!(w.pop(), Some(3)); -/// ``` -/// -/// A LIFO worker: -/// -/// ``` -/// use crossbeam_deque::{Steal, Worker}; -/// -/// let w = Worker::new_lifo(); -/// let s = w.stealer(); -/// -/// w.push(1); -/// w.push(2); -/// w.push(3); -/// -/// assert_eq!(s.steal(), Steal::Success(1)); -/// assert_eq!(w.pop(), Some(3)); -/// assert_eq!(w.pop(), Some(2)); -/// ``` -pub struct Worker { - /// A reference to the inner representation of the queue. - inner: Arc>>, - - /// A copy of `inner.buffer` for quick access. - buffer: Cell>, - - /// The flavor of the queue. - flavor: Flavor, - - /// Indicates that the worker cannot be shared among threads. - _marker: PhantomData<*mut ()>, // !Send + !Sync -} - -unsafe impl Send for Worker {} - -impl Worker { - /// Creates a FIFO worker queue. - /// - /// Tasks are pushed and popped from opposite ends. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Worker; - /// - /// let w = Worker::::new_fifo(); - /// ``` - pub fn new_fifo() -> Self { - let buffer = Buffer::alloc(MIN_CAP); - - let inner = Arc::new(CachePadded::new(Inner { - front: AtomicIsize::new(0), - back: AtomicIsize::new(0), - buffer: CachePadded::new(Atomic::new(buffer)), - })); - - Self { - inner, - buffer: Cell::new(buffer), - flavor: Flavor::Fifo, - _marker: PhantomData, - } - } - - /// Creates a LIFO worker queue. - /// - /// Tasks are pushed and popped from the same end. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Worker; - /// - /// let w = Worker::::new_lifo(); - /// ``` - pub fn new_lifo() -> Self { - let buffer = Buffer::alloc(MIN_CAP); - - let inner = Arc::new(CachePadded::new(Inner { - front: AtomicIsize::new(0), - back: AtomicIsize::new(0), - buffer: CachePadded::new(Atomic::new(buffer)), - })); - - Self { - inner, - buffer: Cell::new(buffer), - flavor: Flavor::Lifo, - _marker: PhantomData, - } - } - - /// Creates a stealer for this queue. - /// - /// The returned stealer can be shared among threads and cloned. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Worker; - /// - /// let w = Worker::::new_lifo(); - /// let s = w.stealer(); - /// ``` - pub fn stealer(&self) -> Stealer { - Stealer { - inner: self.inner.clone(), - flavor: self.flavor, - } - } - - /// Resizes the internal buffer to the new capacity of `new_cap`. - #[cold] - unsafe fn resize(&self, new_cap: usize) { - // Load the back index, front index, and buffer. - let b = self.inner.back.load(Ordering::Relaxed); - let f = self.inner.front.load(Ordering::Relaxed); - let buffer = self.buffer.get(); - - // Allocate a new buffer and copy data from the old buffer to the new one. - let new = Buffer::alloc(new_cap); - let mut i = f; - while i != b { - unsafe { ptr::copy_nonoverlapping(buffer.at(i), new.at(i), 1) } - i = i.wrapping_add(1); - } - - let guard = &epoch::pin(); - - // Replace the old buffer with the new one. - self.buffer.replace(new); - let old = - self.inner - .buffer - .swap(Owned::new(new).into_shared(guard), Ordering::Release, guard); - - // Destroy the old buffer later. - unsafe { guard.defer_unchecked(move || old.into_owned().into_box().dealloc()) } - - // If the buffer is very large, then flush the thread-local garbage in order to deallocate - // it as soon as possible. - if mem::size_of::() * new_cap >= FLUSH_THRESHOLD_BYTES { - guard.flush(); - } - } - - /// Reserves enough capacity so that `reserve_cap` tasks can be pushed without growing the - /// buffer. - fn reserve(&self, reserve_cap: usize) { - if reserve_cap > 0 { - // Compute the current length. - let b = self.inner.back.load(Ordering::Relaxed); - let f = self.inner.front.load(Ordering::SeqCst); - let len = b.wrapping_sub(f) as usize; - - // The current capacity. - let cap = self.buffer.get().cap; - - // Is there enough capacity to push `reserve_cap` tasks? - if cap - len < reserve_cap { - // Keep doubling the capacity as much as is needed. - let mut new_cap = cap * 2; - while new_cap - len < reserve_cap { - new_cap *= 2; - } - - // Resize the buffer. - unsafe { - self.resize(new_cap); - } - } - } - } - - /// Returns `true` if the queue is empty. - /// - /// ``` - /// use crossbeam_deque::Worker; - /// - /// let w = Worker::new_lifo(); - /// - /// assert!(w.is_empty()); - /// w.push(1); - /// assert!(!w.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - let b = self.inner.back.load(Ordering::Relaxed); - let f = self.inner.front.load(Ordering::SeqCst); - b.wrapping_sub(f) <= 0 - } - - /// Returns the number of tasks in the deque. - /// - /// ``` - /// use crossbeam_deque::Worker; - /// - /// let w = Worker::new_lifo(); - /// - /// assert_eq!(w.len(), 0); - /// w.push(1); - /// assert_eq!(w.len(), 1); - /// w.push(1); - /// assert_eq!(w.len(), 2); - /// ``` - pub fn len(&self) -> usize { - let b = self.inner.back.load(Ordering::Relaxed); - let f = self.inner.front.load(Ordering::SeqCst); - b.wrapping_sub(f).max(0) as usize - } - - /// Pushes a task into the queue. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Worker; - /// - /// let w = Worker::new_lifo(); - /// w.push(1); - /// w.push(2); - /// ``` - pub fn push(&self, task: T) { - // Load the back index, front index, and buffer. - let b = self.inner.back.load(Ordering::Relaxed); - let f = self.inner.front.load(Ordering::Acquire); - let mut buffer = self.buffer.get(); - - // Calculate the length of the queue. - let len = b.wrapping_sub(f); - - // Is the queue full? - if len >= buffer.cap as isize { - // Yes. Grow the underlying buffer. - unsafe { - self.resize(2 * buffer.cap); - } - buffer = self.buffer.get(); - } - - // Write `task` into the slot. - unsafe { - buffer.write(b, MaybeUninit::new(task)); - } - - atomic::fence(Ordering::Release); - - // Increment the back index. - // - // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data - // races because it doesn't understand fences. - self.inner.back.store(b.wrapping_add(1), Ordering::Release); - } - - /// Pops a task from the queue. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Worker; - /// - /// let w = Worker::new_fifo(); - /// w.push(1); - /// w.push(2); - /// - /// assert_eq!(w.pop(), Some(1)); - /// assert_eq!(w.pop(), Some(2)); - /// assert_eq!(w.pop(), None); - /// ``` - pub fn pop(&self) -> Option { - // Load the back and front index. - let b = self.inner.back.load(Ordering::Relaxed); - let f = self.inner.front.load(Ordering::Relaxed); - - // Calculate the length of the queue. - let len = b.wrapping_sub(f); - - // Is the queue empty? - if len <= 0 { - return None; - } - - match self.flavor { - // Pop from the front of the queue. - Flavor::Fifo => { - // Try incrementing the front index to pop the task. - let f = self.inner.front.fetch_add(1, Ordering::SeqCst); - let new_f = f.wrapping_add(1); - - if b.wrapping_sub(new_f) < 0 { - self.inner.front.store(f, Ordering::Relaxed); - return None; - } - - unsafe { - // Read the popped task. - let buffer = self.buffer.get(); - let task = buffer.read(f).assume_init(); - - // Shrink the buffer if `len - 1` is less than one fourth of the capacity. - if buffer.cap > MIN_CAP && len <= buffer.cap as isize / 4 { - self.resize(buffer.cap / 2); - } - - Some(task) - } - } - - // Pop from the back of the queue. - Flavor::Lifo => { - // Decrement the back index. - let b = b.wrapping_sub(1); - self.inner.back.store(b, Ordering::Relaxed); - - atomic::fence(Ordering::SeqCst); - - // Load the front index. - let f = self.inner.front.load(Ordering::Relaxed); - - // Compute the length after the back index was decremented. - let len = b.wrapping_sub(f); - - if len < 0 { - // The queue is empty. Restore the back index to the original task. - self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); - None - } else { - // Read the task to be popped. - let buffer = self.buffer.get(); - let mut task = unsafe { Some(buffer.read(b)) }; - - // Are we popping the last task from the queue? - if len == 0 { - // Try incrementing the front index. - if self - .inner - .front - .compare_exchange( - f, - f.wrapping_add(1), - Ordering::SeqCst, - Ordering::Relaxed, - ) - .is_err() - { - // Failed. We didn't pop anything. Reset to `None`. - task.take(); - } - - // Restore the back index to the original task. - self.inner.back.store(b.wrapping_add(1), Ordering::Relaxed); - } else { - // Shrink the buffer if `len` is less than one fourth of the capacity. - if buffer.cap > MIN_CAP && len < buffer.cap as isize / 4 { - unsafe { - self.resize(buffer.cap / 2); - } - } - } - - task.map(|t| unsafe { t.assume_init() }) - } - } - } - } -} - -impl fmt::Debug for Worker { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Worker { .. }") - } -} - -/// A stealer handle of a worker queue. -/// -/// Stealers can be shared among threads. -/// -/// Task schedulers typically have a single worker queue per worker thread. -/// -/// # Examples -/// -/// ``` -/// use crossbeam_deque::{Steal, Worker}; -/// -/// let w = Worker::new_lifo(); -/// w.push(1); -/// w.push(2); -/// -/// let s = w.stealer(); -/// assert_eq!(s.steal(), Steal::Success(1)); -/// assert_eq!(s.steal(), Steal::Success(2)); -/// assert_eq!(s.steal(), Steal::Empty); -/// ``` -pub struct Stealer { - /// A reference to the inner representation of the queue. - inner: Arc>>, - - /// The flavor of the queue. - flavor: Flavor, -} - -unsafe impl Send for Stealer {} -unsafe impl Sync for Stealer {} - -impl Stealer { - /// Returns `true` if the queue is empty. - /// - /// ``` - /// use crossbeam_deque::Worker; - /// - /// let w = Worker::new_lifo(); - /// let s = w.stealer(); - /// - /// assert!(s.is_empty()); - /// w.push(1); - /// assert!(!s.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - let f = self.inner.front.load(Ordering::Acquire); - atomic::fence(Ordering::SeqCst); - let b = self.inner.back.load(Ordering::Acquire); - b.wrapping_sub(f) <= 0 - } - - /// Returns the number of tasks in the deque. - /// - /// ``` - /// use crossbeam_deque::Worker; - /// - /// let w = Worker::new_lifo(); - /// let s = w.stealer(); - /// - /// assert_eq!(s.len(), 0); - /// w.push(1); - /// assert_eq!(s.len(), 1); - /// w.push(2); - /// assert_eq!(s.len(), 2); - /// ``` - pub fn len(&self) -> usize { - let f = self.inner.front.load(Ordering::Acquire); - atomic::fence(Ordering::SeqCst); - let b = self.inner.back.load(Ordering::Acquire); - b.wrapping_sub(f).max(0) as usize - } - - /// Steals a task from the queue. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::{Steal, Worker}; - /// - /// let w = Worker::new_lifo(); - /// w.push(1); - /// w.push(2); - /// - /// let s = w.stealer(); - /// assert_eq!(s.steal(), Steal::Success(1)); - /// assert_eq!(s.steal(), Steal::Success(2)); - /// ``` - pub fn steal(&self) -> Steal { - // Load the front index. - let f = self.inner.front.load(Ordering::Acquire); - - // A SeqCst fence is needed here. - // - // If the current thread is already pinned (reentrantly), we must manually issue the - // fence. Otherwise, the following pinning will issue the fence anyway, so we don't - // have to. - if epoch::is_pinned() { - atomic::fence(Ordering::SeqCst); - } - - let guard = &epoch::pin(); - - // Load the back index. - let b = self.inner.back.load(Ordering::Acquire); - - // Is the queue empty? - if b.wrapping_sub(f) <= 0 { - return Steal::Empty; - } - - // Load the buffer and read the task at the front. - let buffer = self.inner.buffer.load(Ordering::Acquire, guard); - let task = unsafe { buffer.deref().read(f) }; - - // Try incrementing the front index to steal the task. - // If the buffer has been swapped or the increment fails, we retry. - if self.inner.buffer.load(Ordering::Acquire, guard) != buffer - || self - .inner - .front - .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) - .is_err() - { - // We didn't steal this task, forget it. - return Steal::Retry; - } - - // Return the stolen task. - Steal::Success(unsafe { task.assume_init() }) - } - - /// Steals a batch of tasks and pushes them into another worker. - /// - /// How many tasks exactly will be stolen is not specified. That said, this method will try to - /// steal around half of the tasks in the queue, but also not more than some constant limit. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Worker; - /// - /// let w1 = Worker::new_fifo(); - /// w1.push(1); - /// w1.push(2); - /// w1.push(3); - /// w1.push(4); - /// - /// let s = w1.stealer(); - /// let w2 = Worker::new_fifo(); - /// - /// let _ = s.steal_batch(&w2); - /// assert_eq!(w2.pop(), Some(1)); - /// assert_eq!(w2.pop(), Some(2)); - /// ``` - pub fn steal_batch(&self, dest: &Worker) -> Steal<()> { - self.steal_batch_with_limit(dest, MAX_BATCH) - } - - /// Steals no more than `limit` of tasks and pushes them into another worker. - /// - /// How many tasks exactly will be stolen is not specified. That said, this method will try to - /// steal around half of the tasks in the queue, but also not more than the given limit. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Worker; - /// - /// let w1 = Worker::new_fifo(); - /// w1.push(1); - /// w1.push(2); - /// w1.push(3); - /// w1.push(4); - /// w1.push(5); - /// w1.push(6); - /// - /// let s = w1.stealer(); - /// let w2 = Worker::new_fifo(); - /// - /// let _ = s.steal_batch_with_limit(&w2, 2); - /// assert_eq!(w2.pop(), Some(1)); - /// assert_eq!(w2.pop(), Some(2)); - /// assert_eq!(w2.pop(), None); - /// - /// w1.push(7); - /// w1.push(8); - /// // Setting a large limit does not guarantee that all elements will be popped. In this case, - /// // half of the elements are currently popped, but the number of popped elements is considered - /// // an implementation detail that may be changed in the future. - /// let _ = s.steal_batch_with_limit(&w2, usize::MAX); - /// assert_eq!(w2.len(), 3); - /// ``` - pub fn steal_batch_with_limit(&self, dest: &Worker, limit: usize) -> Steal<()> { - assert!(limit > 0); - if Arc::ptr_eq(&self.inner, &dest.inner) { - if dest.is_empty() { - return Steal::Empty; - } else { - return Steal::Success(()); - } - } - - // Load the front index. - let mut f = self.inner.front.load(Ordering::Acquire); - - // A SeqCst fence is needed here. - // - // If the current thread is already pinned (reentrantly), we must manually issue the - // fence. Otherwise, the following pinning will issue the fence anyway, so we don't - // have to. - if epoch::is_pinned() { - atomic::fence(Ordering::SeqCst); - } - - let guard = &epoch::pin(); - - // Load the back index. - let b = self.inner.back.load(Ordering::Acquire); - - // Is the queue empty? - let len = b.wrapping_sub(f); - if len <= 0 { - return Steal::Empty; - } - - // Reserve capacity for the stolen batch. - let batch_size = cmp::min((len as usize + 1) / 2, limit); - dest.reserve(batch_size); - let mut batch_size = batch_size as isize; - - // Get the destination buffer and back index. - let dest_buffer = dest.buffer.get(); - let mut dest_b = dest.inner.back.load(Ordering::Relaxed); - - // Load the buffer. - let buffer = self.inner.buffer.load(Ordering::Acquire, guard); - - match self.flavor { - // Steal a batch of tasks from the front at once. - Flavor::Fifo => { - // Copy the batch from the source to the destination buffer. - match dest.flavor { - Flavor::Fifo => { - for i in 0..batch_size { - unsafe { - let task = buffer.deref().read(f.wrapping_add(i)); - dest_buffer.write(dest_b.wrapping_add(i), task); - } - } - } - Flavor::Lifo => { - for i in 0..batch_size { - unsafe { - let task = buffer.deref().read(f.wrapping_add(i)); - dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); - } - } - } - } - - // Try incrementing the front index to steal the batch. - // If the buffer has been swapped or the increment fails, we retry. - if self.inner.buffer.load(Ordering::Acquire, guard) != buffer - || self - .inner - .front - .compare_exchange( - f, - f.wrapping_add(batch_size), - Ordering::SeqCst, - Ordering::Relaxed, - ) - .is_err() - { - return Steal::Retry; - } - - dest_b = dest_b.wrapping_add(batch_size); - } - - // Steal a batch of tasks from the front one by one. - Flavor::Lifo => { - // This loop may modify the batch_size, which triggers a clippy lint warning. - // Use a new variable to avoid the warning, and to make it clear we aren't - // modifying the loop exit condition during iteration. - let original_batch_size = batch_size; - - for i in 0..original_batch_size { - // If this is not the first steal, check whether the queue is empty. - if i > 0 { - // We've already got the current front index. Now execute the fence to - // synchronize with other threads. - atomic::fence(Ordering::SeqCst); - - // Load the back index. - let b = self.inner.back.load(Ordering::Acquire); - - // Is the queue empty? - if b.wrapping_sub(f) <= 0 { - batch_size = i; - break; - } - } - - // Read the task at the front. - let task = unsafe { buffer.deref().read(f) }; - - // Try incrementing the front index to steal the task. - // If the buffer has been swapped or the increment fails, we retry. - if self.inner.buffer.load(Ordering::Acquire, guard) != buffer - || self - .inner - .front - .compare_exchange( - f, - f.wrapping_add(1), - Ordering::SeqCst, - Ordering::Relaxed, - ) - .is_err() - { - // We didn't steal this task, forget it and break from the loop. - batch_size = i; - break; - } - - // Write the stolen task into the destination buffer. - unsafe { - dest_buffer.write(dest_b, task); - } - - // Move the source front index and the destination back index one step forward. - f = f.wrapping_add(1); - dest_b = dest_b.wrapping_add(1); - } - - // If we didn't steal anything, the operation needs to be retried. - if batch_size == 0 { - return Steal::Retry; - } - - // If stealing into a FIFO queue, stolen tasks need to be reversed. - if dest.flavor == Flavor::Fifo { - for i in 0..batch_size / 2 { - unsafe { - let i1 = dest_b.wrapping_sub(batch_size - i); - let i2 = dest_b.wrapping_sub(i + 1); - let t1 = dest_buffer.read(i1); - let t2 = dest_buffer.read(i2); - dest_buffer.write(i1, t2); - dest_buffer.write(i2, t1); - } - } - } - } - } - - atomic::fence(Ordering::Release); - - // Update the back index in the destination queue. - // - // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data - // races because it doesn't understand fences. - dest.inner.back.store(dest_b, Ordering::Release); - - // Return with success. - Steal::Success(()) - } - - /// Steals a batch of tasks, pushes them into another worker, and pops a task from that worker. - /// - /// How many tasks exactly will be stolen is not specified. That said, this method will try to - /// steal around half of the tasks in the queue, but also not more than some constant limit. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::{Steal, Worker}; - /// - /// let w1 = Worker::new_fifo(); - /// w1.push(1); - /// w1.push(2); - /// w1.push(3); - /// w1.push(4); - /// - /// let s = w1.stealer(); - /// let w2 = Worker::new_fifo(); - /// - /// assert_eq!(s.steal_batch_and_pop(&w2), Steal::Success(1)); - /// assert_eq!(w2.pop(), Some(2)); - /// ``` - pub fn steal_batch_and_pop(&self, dest: &Worker) -> Steal { - self.steal_batch_with_limit_and_pop(dest, MAX_BATCH) - } - - /// Steals no more than `limit` of tasks, pushes them into another worker, and pops a task from - /// that worker. - /// - /// How many tasks exactly will be stolen is not specified. That said, this method will try to - /// steal around half of the tasks in the queue, but also not more than the given limit. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::{Steal, Worker}; - /// - /// let w1 = Worker::new_fifo(); - /// w1.push(1); - /// w1.push(2); - /// w1.push(3); - /// w1.push(4); - /// w1.push(5); - /// w1.push(6); - /// - /// let s = w1.stealer(); - /// let w2 = Worker::new_fifo(); - /// - /// assert_eq!(s.steal_batch_with_limit_and_pop(&w2, 2), Steal::Success(1)); - /// assert_eq!(w2.pop(), Some(2)); - /// assert_eq!(w2.pop(), None); - /// - /// w1.push(7); - /// w1.push(8); - /// // Setting a large limit does not guarantee that all elements will be popped. In this case, - /// // half of the elements are currently popped, but the number of popped elements is considered - /// // an implementation detail that may be changed in the future. - /// assert_eq!(s.steal_batch_with_limit_and_pop(&w2, usize::MAX), Steal::Success(3)); - /// assert_eq!(w2.pop(), Some(4)); - /// assert_eq!(w2.pop(), Some(5)); - /// assert_eq!(w2.pop(), None); - /// ``` - pub fn steal_batch_with_limit_and_pop(&self, dest: &Worker, limit: usize) -> Steal { - assert!(limit > 0); - if Arc::ptr_eq(&self.inner, &dest.inner) { - match dest.pop() { - None => return Steal::Empty, - Some(task) => return Steal::Success(task), - } - } - - // Load the front index. - let mut f = self.inner.front.load(Ordering::Acquire); - - // A SeqCst fence is needed here. - // - // If the current thread is already pinned (reentrantly), we must manually issue the - // fence. Otherwise, the following pinning will issue the fence anyway, so we don't - // have to. - if epoch::is_pinned() { - atomic::fence(Ordering::SeqCst); - } - - let guard = &epoch::pin(); - - // Load the back index. - let b = self.inner.back.load(Ordering::Acquire); - - // Is the queue empty? - let len = b.wrapping_sub(f); - if len <= 0 { - return Steal::Empty; - } - - // Reserve capacity for the stolen batch. - let batch_size = cmp::min((len as usize - 1) / 2, limit - 1); - dest.reserve(batch_size); - let mut batch_size = batch_size as isize; - - // Get the destination buffer and back index. - let dest_buffer = dest.buffer.get(); - let mut dest_b = dest.inner.back.load(Ordering::Relaxed); - - // Load the buffer - let buffer = self.inner.buffer.load(Ordering::Acquire, guard); - - // Read the task at the front. - let mut task = unsafe { buffer.deref().read(f) }; - - match self.flavor { - // Steal a batch of tasks from the front at once. - Flavor::Fifo => { - // Copy the batch from the source to the destination buffer. - match dest.flavor { - Flavor::Fifo => { - for i in 0..batch_size { - unsafe { - let task = buffer.deref().read(f.wrapping_add(i + 1)); - dest_buffer.write(dest_b.wrapping_add(i), task); - } - } - } - Flavor::Lifo => { - for i in 0..batch_size { - unsafe { - let task = buffer.deref().read(f.wrapping_add(i + 1)); - dest_buffer.write(dest_b.wrapping_add(batch_size - 1 - i), task); - } - } - } - } - - // Try incrementing the front index to steal the task. - // If the buffer has been swapped or the increment fails, we retry. - if self.inner.buffer.load(Ordering::Acquire, guard) != buffer - || self - .inner - .front - .compare_exchange( - f, - f.wrapping_add(batch_size + 1), - Ordering::SeqCst, - Ordering::Relaxed, - ) - .is_err() - { - // We didn't steal this task, forget it. - return Steal::Retry; - } - - dest_b = dest_b.wrapping_add(batch_size); - } - - // Steal a batch of tasks from the front one by one. - Flavor::Lifo => { - // Try incrementing the front index to steal the task. - if self - .inner - .front - .compare_exchange(f, f.wrapping_add(1), Ordering::SeqCst, Ordering::Relaxed) - .is_err() - { - // We didn't steal this task, forget it. - return Steal::Retry; - } - - // Move the front index one step forward. - f = f.wrapping_add(1); - - // Repeat the same procedure for the batch steals. - // - // This loop may modify the batch_size, which triggers a clippy lint warning. - // Use a new variable to avoid the warning, and to make it clear we aren't - // modifying the loop exit condition during iteration. - let original_batch_size = batch_size; - for i in 0..original_batch_size { - // We've already got the current front index. Now execute the fence to - // synchronize with other threads. - atomic::fence(Ordering::SeqCst); - - // Load the back index. - let b = self.inner.back.load(Ordering::Acquire); - - // Is the queue empty? - if b.wrapping_sub(f) <= 0 { - batch_size = i; - break; - } - - // Read the task at the front. - let tmp = unsafe { buffer.deref().read(f) }; - - // Try incrementing the front index to steal the task. - // If the buffer has been swapped or the increment fails, we retry. - if self.inner.buffer.load(Ordering::Acquire, guard) != buffer - || self - .inner - .front - .compare_exchange( - f, - f.wrapping_add(1), - Ordering::SeqCst, - Ordering::Relaxed, - ) - .is_err() - { - // We didn't steal this task, forget it and break from the loop. - batch_size = i; - break; - } - - // Write the previously stolen task into the destination buffer. - unsafe { - dest_buffer.write(dest_b, mem::replace(&mut task, tmp)); - } - - // Move the source front index and the destination back index one step forward. - f = f.wrapping_add(1); - dest_b = dest_b.wrapping_add(1); - } - - // If stealing into a FIFO queue, stolen tasks need to be reversed. - if dest.flavor == Flavor::Fifo { - for i in 0..batch_size / 2 { - unsafe { - let i1 = dest_b.wrapping_sub(batch_size - i); - let i2 = dest_b.wrapping_sub(i + 1); - let t1 = dest_buffer.read(i1); - let t2 = dest_buffer.read(i2); - dest_buffer.write(i1, t2); - dest_buffer.write(i2, t1); - } - } - } - } - } - - atomic::fence(Ordering::Release); - - // Update the back index in the destination queue. - // - // This ordering could be `Relaxed`, but then thread sanitizer would falsely report data - // races because it doesn't understand fences. - dest.inner.back.store(dest_b, Ordering::Release); - - // Return with success. - Steal::Success(unsafe { task.assume_init() }) - } -} - -impl Clone for Stealer { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - flavor: self.flavor, - } - } -} - -impl fmt::Debug for Stealer { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Stealer { .. }") - } -} - -// Bits indicating the state of a slot: -// * If a task has been written into the slot, `WRITE` is set. -// * If a task has been read from the slot, `READ` is set. -// * If the block is being destroyed, `DESTROY` is set. -const WRITE: usize = 1; -const READ: usize = 2; -const DESTROY: usize = 4; - -// Each block covers one "lap" of indices. -const LAP: usize = 64; -// The maximum number of values a block can hold. -const BLOCK_CAP: usize = LAP - 1; -// How many lower bits are reserved for metadata. -const SHIFT: usize = 1; -// Indicates that the block is not the last one. -const HAS_NEXT: usize = 1; - -/// A slot in a block. -struct Slot { - /// The task. - task: UnsafeCell>, - - /// The state of the slot. - state: AtomicUsize, -} - -impl Slot { - /// Waits until a task is written into the slot. - fn wait_write(&self) { - let backoff = Backoff::new(); - while self.state.load(Ordering::Acquire) & WRITE == 0 { - backoff.snooze(); - } - } -} - -/// A block in a linked list. -/// -/// Each block in the list can hold up to `BLOCK_CAP` values. -struct Block { - /// The next block in the linked list. - next: AtomicPtr>, - - /// Slots for values. - slots: [Slot; BLOCK_CAP], -} - -impl Block { - const LAYOUT: Layout = { - let layout = Layout::new::(); - assert!( - layout.size() != 0, - "Block should never be zero-sized, as it has an AtomicPtr field" - ); - layout - }; - - /// Creates an empty block. - fn new() -> Box { - // SAFETY: layout is not zero-sized - let ptr = unsafe { alloc_zeroed(Self::LAYOUT) }; - // Handle allocation failure - if ptr.is_null() { - handle_alloc_error(Self::LAYOUT) - } - // SAFETY: This is safe because: - // [1] `Block::next` (AtomicPtr) may be safely zero initialized. - // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. - // [3] `Slot::task` (UnsafeCell) may be safely zero initialized because it - // holds a MaybeUninit. - // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. - // TODO: unsafe { Box::new_zeroed().assume_init() } - unsafe { Box::from_raw(ptr.cast()) } - } - - /// Waits until the next pointer is set. - fn wait_next(&self) -> *mut Self { - let backoff = Backoff::new(); - loop { - let next = self.next.load(Ordering::Acquire); - if !next.is_null() { - return next; - } - backoff.snooze(); - } - } - - /// Sets the `DESTROY` bit in slots starting from `start` and destroys the block. - unsafe fn destroy(this: *mut Self, count: usize) { - // It is not necessary to set the `DESTROY` bit in the last slot because that slot has - // begun destruction of the block. - for i in (0..count).rev() { - let slot = unsafe { (*this).slots.get_unchecked(i) }; - - // Mark the `DESTROY` bit if a thread is still using the slot. - if slot.state.load(Ordering::Acquire) & READ == 0 - && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0 - { - // If a thread is still using the slot, it will continue destruction of the block. - return; - } - } - - // No thread is using the block, now it is safe to destroy it. - drop(unsafe { Box::from_raw(this) }); - } -} - -/// A position in a queue. -struct Position { - /// The index in the queue. - index: AtomicUsize, - - /// The block in the linked list. - block: AtomicPtr>, -} - -/// An injector queue. -/// -/// This is a FIFO queue that can be shared among multiple threads. Task schedulers typically have -/// a single injector queue, which is the entry point for new tasks. -/// -/// # Examples -/// -/// ``` -/// use crossbeam_deque::{Injector, Steal}; -/// -/// let q = Injector::new(); -/// q.push(1); -/// q.push(2); -/// -/// assert_eq!(q.steal(), Steal::Success(1)); -/// assert_eq!(q.steal(), Steal::Success(2)); -/// assert_eq!(q.steal(), Steal::Empty); -/// ``` -pub struct Injector { - /// The head of the queue. - head: CachePadded>, - - /// The tail of the queue. - tail: CachePadded>, - - /// Indicates that dropping a `Injector` may drop values of type `T`. - _marker: PhantomData, -} - -unsafe impl Send for Injector {} -unsafe impl Sync for Injector {} - -impl Default for Injector { - fn default() -> Self { - let block = Box::into_raw(Block::::new()); - Self { - head: CachePadded::new(Position { - block: AtomicPtr::new(block), - index: AtomicUsize::new(0), - }), - tail: CachePadded::new(Position { - block: AtomicPtr::new(block), - index: AtomicUsize::new(0), - }), - _marker: PhantomData, - } - } -} - -impl Injector { - /// Creates a new injector queue. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Injector; - /// - /// let q = Injector::::new(); - /// ``` - pub fn new() -> Self { - Self::default() - } - - /// Pushes a task into the queue. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Injector; - /// - /// let w = Injector::new(); - /// w.push(1); - /// w.push(2); - /// ``` - pub fn push(&self, task: T) { - let backoff = Backoff::new(); - let mut tail = self.tail.index.load(Ordering::Acquire); - let mut block = self.tail.block.load(Ordering::Acquire); - let mut next_block = None; - - loop { - // Calculate the offset of the index into the block. - let offset = (tail >> SHIFT) % LAP; - - // If we reached the end of the block, wait until the next one is installed. - if offset == BLOCK_CAP { - backoff.snooze(); - tail = self.tail.index.load(Ordering::Acquire); - block = self.tail.block.load(Ordering::Acquire); - continue; - } - - // If we're going to have to install the next block, allocate it in advance in order to - // make the wait for other threads as short as possible. - if offset + 1 == BLOCK_CAP && next_block.is_none() { - next_block = Some(Block::::new()); - } - - let new_tail = tail + (1 << SHIFT); - - // Try advancing the tail forward. - match self.tail.index.compare_exchange_weak( - tail, - new_tail, - Ordering::SeqCst, - Ordering::Acquire, - ) { - Ok(_) => unsafe { - // If we've reached the end of the block, install the next one. - if offset + 1 == BLOCK_CAP { - let next_block = Box::into_raw(next_block.unwrap()); - let next_index = new_tail.wrapping_add(1 << SHIFT); - - self.tail.block.store(next_block, Ordering::Release); - self.tail.index.store(next_index, Ordering::Release); - (*block).next.store(next_block, Ordering::Release); - } - - // Write the task into the slot. - let slot = (*block).slots.get_unchecked(offset); - slot.task.get().write(MaybeUninit::new(task)); - slot.state.fetch_or(WRITE, Ordering::Release); - - return; - }, - Err(t) => { - tail = t; - block = self.tail.block.load(Ordering::Acquire); - backoff.spin(); - } - } - } - } - - /// Steals a task from the queue. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::{Injector, Steal}; - /// - /// let q = Injector::new(); - /// q.push(1); - /// q.push(2); - /// - /// assert_eq!(q.steal(), Steal::Success(1)); - /// assert_eq!(q.steal(), Steal::Success(2)); - /// assert_eq!(q.steal(), Steal::Empty); - /// ``` - pub fn steal(&self) -> Steal { - let mut head; - let mut block; - let mut offset; - - let backoff = Backoff::new(); - loop { - head = self.head.index.load(Ordering::Acquire); - block = self.head.block.load(Ordering::Acquire); - - // Calculate the offset of the index into the block. - offset = (head >> SHIFT) % LAP; - - // If we reached the end of the block, wait until the next one is installed. - if offset == BLOCK_CAP { - backoff.snooze(); - } else { - break; - } - } - - let mut new_head = head + (1 << SHIFT); - - if new_head & HAS_NEXT == 0 { - atomic::fence(Ordering::SeqCst); - let tail = self.tail.index.load(Ordering::Relaxed); - - // If the tail equals the head, that means the queue is empty. - if head >> SHIFT == tail >> SHIFT { - return Steal::Empty; - } - - // If head and tail are not in the same block, set `HAS_NEXT` in head. - if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { - new_head |= HAS_NEXT; - } - } - - // Try moving the head index forward. - if self - .head - .index - .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) - .is_err() - { - return Steal::Retry; - } - - unsafe { - // If we've reached the end of the block, move to the next one. - if offset + 1 == BLOCK_CAP { - let next = (*block).wait_next(); - let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); - if !(*next).next.load(Ordering::Relaxed).is_null() { - next_index |= HAS_NEXT; - } - - self.head.block.store(next, Ordering::Release); - self.head.index.store(next_index, Ordering::Release); - } - - // Read the task. - let slot = (*block).slots.get_unchecked(offset); - slot.wait_write(); - let task = slot.task.get().read().assume_init(); - - // Destroy the block if we've reached the end, or if another thread wanted to destroy - // but couldn't because we were busy reading from the slot. - if (offset + 1 == BLOCK_CAP) - || (slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0) - { - Block::destroy(block, offset); - } - - Steal::Success(task) - } - } - - /// Steals a batch of tasks and pushes them into a worker. - /// - /// How many tasks exactly will be stolen is not specified. That said, this method will try to - /// steal around half of the tasks in the queue, but also not more than some constant limit. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::{Injector, Worker}; - /// - /// let q = Injector::new(); - /// q.push(1); - /// q.push(2); - /// q.push(3); - /// q.push(4); - /// - /// let w = Worker::new_fifo(); - /// let _ = q.steal_batch(&w); - /// assert_eq!(w.pop(), Some(1)); - /// assert_eq!(w.pop(), Some(2)); - /// ``` - pub fn steal_batch(&self, dest: &Worker) -> Steal<()> { - self.steal_batch_with_limit(dest, MAX_BATCH) - } - - /// Steals no more than of tasks and pushes them into a worker. - /// - /// How many tasks exactly will be stolen is not specified. That said, this method will try to - /// steal around half of the tasks in the queue, but also not more than some constant limit. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::{Injector, Worker}; - /// - /// let q = Injector::new(); - /// q.push(1); - /// q.push(2); - /// q.push(3); - /// q.push(4); - /// q.push(5); - /// q.push(6); - /// - /// let w = Worker::new_fifo(); - /// let _ = q.steal_batch_with_limit(&w, 2); - /// assert_eq!(w.pop(), Some(1)); - /// assert_eq!(w.pop(), Some(2)); - /// assert_eq!(w.pop(), None); - /// - /// q.push(7); - /// q.push(8); - /// // Setting a large limit does not guarantee that all elements will be popped. In this case, - /// // half of the elements are currently popped, but the number of popped elements is considered - /// // an implementation detail that may be changed in the future. - /// let _ = q.steal_batch_with_limit(&w, usize::MAX); - /// assert_eq!(w.len(), 3); - /// ``` - pub fn steal_batch_with_limit(&self, dest: &Worker, limit: usize) -> Steal<()> { - assert!(limit > 0); - let mut head; - let mut block; - let mut offset; - - let backoff = Backoff::new(); - loop { - head = self.head.index.load(Ordering::Acquire); - block = self.head.block.load(Ordering::Acquire); - - // Calculate the offset of the index into the block. - offset = (head >> SHIFT) % LAP; - - // If we reached the end of the block, wait until the next one is installed. - if offset == BLOCK_CAP { - backoff.snooze(); - } else { - break; - } - } - - let mut new_head = head; - let advance; - - if new_head & HAS_NEXT == 0 { - atomic::fence(Ordering::SeqCst); - let tail = self.tail.index.load(Ordering::Relaxed); - - // If the tail equals the head, that means the queue is empty. - if head >> SHIFT == tail >> SHIFT { - return Steal::Empty; - } - - // If head and tail are not in the same block, set `HAS_NEXT` in head. Also, calculate - // the right batch size to steal. - if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { - new_head |= HAS_NEXT; - // We can steal all tasks till the end of the block. - advance = (BLOCK_CAP - offset).min(limit); - } else { - let len = (tail - head) >> SHIFT; - // Steal half of the available tasks. - advance = ((len + 1) / 2).min(limit); - } - } else { - // We can steal all tasks till the end of the block. - advance = (BLOCK_CAP - offset).min(limit); - } - - new_head += advance << SHIFT; - let new_offset = offset + advance; - - // Try moving the head index forward. - if self - .head - .index - .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) - .is_err() - { - return Steal::Retry; - } - - // Reserve capacity for the stolen batch. - let batch_size = new_offset - offset; - dest.reserve(batch_size); - - // Get the destination buffer and back index. - let dest_buffer = dest.buffer.get(); - let dest_b = dest.inner.back.load(Ordering::Relaxed); - - unsafe { - // If we've reached the end of the block, move to the next one. - if new_offset == BLOCK_CAP { - let next = (*block).wait_next(); - let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); - if !(*next).next.load(Ordering::Relaxed).is_null() { - next_index |= HAS_NEXT; - } - - self.head.block.store(next, Ordering::Release); - self.head.index.store(next_index, Ordering::Release); - } - - // Copy values from the injector into the destination queue. - match dest.flavor { - Flavor::Fifo => { - for i in 0..batch_size { - // Read the task. - let slot = (*block).slots.get_unchecked(offset + i); - slot.wait_write(); - let task = slot.task.get().read(); - - // Write it into the destination queue. - dest_buffer.write(dest_b.wrapping_add(i as isize), task); - } - } - - Flavor::Lifo => { - for i in 0..batch_size { - // Read the task. - let slot = (*block).slots.get_unchecked(offset + i); - slot.wait_write(); - let task = slot.task.get().read(); - - // Write it into the destination queue. - dest_buffer.write(dest_b.wrapping_add((batch_size - 1 - i) as isize), task); - } - } - } - - atomic::fence(Ordering::Release); - - // Update the back index in the destination queue. - // - // This ordering could be `Relaxed`, but then thread sanitizer would falsely report - // data races because it doesn't understand fences. - dest.inner - .back - .store(dest_b.wrapping_add(batch_size as isize), Ordering::Release); - - // Destroy the block if we've reached the end, or if another thread wanted to destroy - // but couldn't because we were busy reading from the slot. - if new_offset == BLOCK_CAP { - Block::destroy(block, offset); - } else { - for i in offset..new_offset { - let slot = (*block).slots.get_unchecked(i); - - if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { - Block::destroy(block, offset); - break; - } - } - } - - Steal::Success(()) - } - } - - /// Steals a batch of tasks, pushes them into a worker, and pops a task from that worker. - /// - /// How many tasks exactly will be stolen is not specified. That said, this method will try to - /// steal around half of the tasks in the queue, but also not more than some constant limit. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::{Injector, Steal, Worker}; - /// - /// let q = Injector::new(); - /// q.push(1); - /// q.push(2); - /// q.push(3); - /// q.push(4); - /// - /// let w = Worker::new_fifo(); - /// assert_eq!(q.steal_batch_and_pop(&w), Steal::Success(1)); - /// assert_eq!(w.pop(), Some(2)); - /// ``` - pub fn steal_batch_and_pop(&self, dest: &Worker) -> Steal { - // TODO: we use `MAX_BATCH + 1` as the hard limit for Injecter as the performance is slightly - // better, but we may change it in the future to be compatible with the same method in Stealer. - self.steal_batch_with_limit_and_pop(dest, MAX_BATCH + 1) - } - - /// Steals no more than `limit` of tasks, pushes them into a worker, and pops a task from that worker. - /// - /// How many tasks exactly will be stolen is not specified. That said, this method will try to - /// steal around half of the tasks in the queue, but also not more than the given limit. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::{Injector, Steal, Worker}; - /// - /// let q = Injector::new(); - /// q.push(1); - /// q.push(2); - /// q.push(3); - /// q.push(4); - /// q.push(5); - /// q.push(6); - /// - /// let w = Worker::new_fifo(); - /// assert_eq!(q.steal_batch_with_limit_and_pop(&w, 2), Steal::Success(1)); - /// assert_eq!(w.pop(), Some(2)); - /// assert_eq!(w.pop(), None); - /// - /// q.push(7); - /// // Setting a large limit does not guarantee that all elements will be popped. In this case, - /// // half of the elements are currently popped, but the number of popped elements is considered - /// // an implementation detail that may be changed in the future. - /// assert_eq!(q.steal_batch_with_limit_and_pop(&w, usize::MAX), Steal::Success(3)); - /// assert_eq!(w.pop(), Some(4)); - /// assert_eq!(w.pop(), Some(5)); - /// assert_eq!(w.pop(), None); - /// ``` - pub fn steal_batch_with_limit_and_pop(&self, dest: &Worker, limit: usize) -> Steal { - assert!(limit > 0); - let mut head; - let mut block; - let mut offset; - - let backoff = Backoff::new(); - loop { - head = self.head.index.load(Ordering::Acquire); - block = self.head.block.load(Ordering::Acquire); - - // Calculate the offset of the index into the block. - offset = (head >> SHIFT) % LAP; - - // If we reached the end of the block, wait until the next one is installed. - if offset == BLOCK_CAP { - backoff.snooze(); - } else { - break; - } - } - - let mut new_head = head; - let advance; - - if new_head & HAS_NEXT == 0 { - atomic::fence(Ordering::SeqCst); - let tail = self.tail.index.load(Ordering::Relaxed); - - // If the tail equals the head, that means the queue is empty. - if head >> SHIFT == tail >> SHIFT { - return Steal::Empty; - } - - // If head and tail are not in the same block, set `HAS_NEXT` in head. - if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { - new_head |= HAS_NEXT; - // We can steal all tasks till the end of the block. - advance = (BLOCK_CAP - offset).min(limit); - } else { - let len = (tail - head) >> SHIFT; - // Steal half of the available tasks. - advance = ((len + 1) / 2).min(limit); - } - } else { - // We can steal all tasks till the end of the block. - advance = (BLOCK_CAP - offset).min(limit); - } - - new_head += advance << SHIFT; - let new_offset = offset + advance; - - // Try moving the head index forward. - if self - .head - .index - .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Acquire) - .is_err() - { - return Steal::Retry; - } - - // Reserve capacity for the stolen batch. - let batch_size = new_offset - offset - 1; - dest.reserve(batch_size); - - // Get the destination buffer and back index. - let dest_buffer = dest.buffer.get(); - let dest_b = dest.inner.back.load(Ordering::Relaxed); - - unsafe { - // If we've reached the end of the block, move to the next one. - if new_offset == BLOCK_CAP { - let next = (*block).wait_next(); - let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); - if !(*next).next.load(Ordering::Relaxed).is_null() { - next_index |= HAS_NEXT; - } - - self.head.block.store(next, Ordering::Release); - self.head.index.store(next_index, Ordering::Release); - } - - // Read the task. - let slot = (*block).slots.get_unchecked(offset); - slot.wait_write(); - let task = slot.task.get().read(); - - match dest.flavor { - Flavor::Fifo => { - // Copy values from the injector into the destination queue. - for i in 0..batch_size { - // Read the task. - let slot = (*block).slots.get_unchecked(offset + i + 1); - slot.wait_write(); - let task = slot.task.get().read(); - - // Write it into the destination queue. - dest_buffer.write(dest_b.wrapping_add(i as isize), task); - } - } - - Flavor::Lifo => { - // Copy values from the injector into the destination queue. - for i in 0..batch_size { - // Read the task. - let slot = (*block).slots.get_unchecked(offset + i + 1); - slot.wait_write(); - let task = slot.task.get().read(); - - // Write it into the destination queue. - dest_buffer.write(dest_b.wrapping_add((batch_size - 1 - i) as isize), task); - } - } - } - - atomic::fence(Ordering::Release); - - // Update the back index in the destination queue. - // - // This ordering could be `Relaxed`, but then thread sanitizer would falsely report - // data races because it doesn't understand fences. - dest.inner - .back - .store(dest_b.wrapping_add(batch_size as isize), Ordering::Release); - - // Destroy the block if we've reached the end, or if another thread wanted to destroy - // but couldn't because we were busy reading from the slot. - if new_offset == BLOCK_CAP { - Block::destroy(block, offset); - } else { - for i in offset..new_offset { - let slot = (*block).slots.get_unchecked(i); - - if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { - Block::destroy(block, offset); - break; - } - } - } - - Steal::Success(task.assume_init()) - } - } - - /// Returns `true` if the queue is empty. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Injector; - /// - /// let q = Injector::new(); - /// - /// assert!(q.is_empty()); - /// q.push(1); - /// assert!(!q.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - let head = self.head.index.load(Ordering::SeqCst); - let tail = self.tail.index.load(Ordering::SeqCst); - head >> SHIFT == tail >> SHIFT - } - - /// Returns the number of tasks in the queue. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Injector; - /// - /// let q = Injector::new(); - /// - /// assert_eq!(q.len(), 0); - /// q.push(1); - /// assert_eq!(q.len(), 1); - /// q.push(1); - /// assert_eq!(q.len(), 2); - /// ``` - pub fn len(&self) -> usize { - loop { - // Load the tail index, then load the head index. - let mut tail = self.tail.index.load(Ordering::SeqCst); - let mut head = self.head.index.load(Ordering::SeqCst); - - // If the tail index didn't change, we've got consistent indices to work with. - if self.tail.index.load(Ordering::SeqCst) == tail { - // Erase the lower bits. - tail &= !((1 << SHIFT) - 1); - head &= !((1 << SHIFT) - 1); - - // Fix up indices if they fall onto block ends. - if (tail >> SHIFT) & (LAP - 1) == LAP - 1 { - tail = tail.wrapping_add(1 << SHIFT); - } - if (head >> SHIFT) & (LAP - 1) == LAP - 1 { - head = head.wrapping_add(1 << SHIFT); - } - - // Rotate indices so that head falls into the first block. - let lap = (head >> SHIFT) / LAP; - tail = tail.wrapping_sub((lap * LAP) << SHIFT); - head = head.wrapping_sub((lap * LAP) << SHIFT); - - // Remove the lower bits. - tail >>= SHIFT; - head >>= SHIFT; - - // Return the difference minus the number of blocks between tail and head. - return tail - head - tail / LAP; - } - } - } -} - -impl Drop for Injector { - fn drop(&mut self) { - let mut head = *self.head.index.get_mut(); - let mut tail = *self.tail.index.get_mut(); - let mut block = *self.head.block.get_mut(); - - // Erase the lower bits. - head &= !((1 << SHIFT) - 1); - tail &= !((1 << SHIFT) - 1); - - unsafe { - // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks. - while head != tail { - let offset = (head >> SHIFT) % LAP; - - if offset < BLOCK_CAP { - // Drop the task in the slot. - let slot = (*block).slots.get_unchecked(offset); - (*slot.task.get()).assume_init_drop(); - } else { - // Deallocate the block and move to the next one. - let next = *(*block).next.get_mut(); - drop(Box::from_raw(block)); - block = next; - } - - head = head.wrapping_add(1 << SHIFT); - } - - // Deallocate the last remaining block. - drop(Box::from_raw(block)); - } - } -} - -impl fmt::Debug for Injector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Worker { .. }") - } -} - -/// Possible outcomes of a steal operation. -/// -/// # Examples -/// -/// There are lots of ways to chain results of steal operations together: -/// -/// ``` -/// use crossbeam_deque::Steal::{self, Empty, Retry, Success}; -/// -/// let collect = |v: Vec>| v.into_iter().collect::>(); -/// -/// assert_eq!(collect(vec![Empty, Empty, Empty]), Empty); -/// assert_eq!(collect(vec![Empty, Retry, Empty]), Retry); -/// assert_eq!(collect(vec![Retry, Success(1), Empty]), Success(1)); -/// -/// assert_eq!(collect(vec![Empty, Empty]).or_else(|| Retry), Retry); -/// assert_eq!(collect(vec![Retry, Empty]).or_else(|| Success(1)), Success(1)); -/// ``` -#[must_use] -#[derive(PartialEq, Eq, Copy, Clone)] -pub enum Steal { - /// The queue was empty at the time of stealing. - Empty, - - /// At least one task was successfully stolen. - Success(T), - - /// The steal operation needs to be retried. - Retry, -} - -impl Steal { - /// Returns `true` if the queue was empty at the time of stealing. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Steal::{Empty, Retry, Success}; - /// - /// assert!(!Success(7).is_empty()); - /// assert!(!Retry::.is_empty()); - /// - /// assert!(Empty::.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - matches!(self, Self::Empty) - } - - /// Returns `true` if at least one task was stolen. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Steal::{Empty, Retry, Success}; - /// - /// assert!(!Empty::.is_success()); - /// assert!(!Retry::.is_success()); - /// - /// assert!(Success(7).is_success()); - /// ``` - pub fn is_success(&self) -> bool { - matches!(self, Self::Success(_)) - } - - /// Returns `true` if the steal operation needs to be retried. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Steal::{Empty, Retry, Success}; - /// - /// assert!(!Empty::.is_retry()); - /// assert!(!Success(7).is_retry()); - /// - /// assert!(Retry::.is_retry()); - /// ``` - pub fn is_retry(&self) -> bool { - matches!(self, Self::Retry) - } - - /// Returns the result of the operation, if successful. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Steal::{Empty, Retry, Success}; - /// - /// assert_eq!(Empty::.success(), None); - /// assert_eq!(Retry::.success(), None); - /// - /// assert_eq!(Success(7).success(), Some(7)); - /// ``` - pub fn success(self) -> Option { - match self { - Self::Success(res) => Some(res), - _ => None, - } - } - - /// If no task was stolen, attempts another steal operation. - /// - /// Returns this steal result if it is `Success`. Otherwise, closure `f` is invoked and then: - /// - /// * If the second steal resulted in `Success`, it is returned. - /// * If both steals were unsuccessful but any resulted in `Retry`, then `Retry` is returned. - /// * If both resulted in `None`, then `None` is returned. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_deque::Steal::{Empty, Retry, Success}; - /// - /// assert_eq!(Success(1).or_else(|| Success(2)), Success(1)); - /// assert_eq!(Retry.or_else(|| Success(2)), Success(2)); - /// - /// assert_eq!(Retry.or_else(|| Empty), Retry::); - /// assert_eq!(Empty.or_else(|| Retry), Retry::); - /// - /// assert_eq!(Empty.or_else(|| Empty), Empty::); - /// ``` - pub fn or_else(self, f: F) -> Self - where - F: FnOnce() -> Self, - { - match self { - Self::Empty => f(), - Self::Success(_) => self, - Self::Retry => { - if let Self::Success(res) = f() { - Self::Success(res) - } else { - Self::Retry - } - } - } - } -} - -impl fmt::Debug for Steal { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Empty => f.pad("Empty"), - Self::Success(_) => f.pad("Success(..)"), - Self::Retry => f.pad("Retry"), - } - } -} - -impl FromIterator for Steal { - /// Consumes items until a `Success` is found and returns it. - /// - /// If no `Success` was found, but there was at least one `Retry`, then returns `Retry`. - /// Otherwise, `Empty` is returned. - fn from_iter(iter: I) -> Self - where - I: IntoIterator, - { - let mut retry = false; - for s in iter { - match &s { - Self::Empty => {} - Self::Success(_) => return s, - Self::Retry => retry = true, - } - } - - if retry { - Self::Retry - } else { - Self::Empty - } - } -} diff --git a/crossbeam-deque/src/lib.rs b/crossbeam-deque/src/lib.rs deleted file mode 100644 index cbe7cfe90..000000000 --- a/crossbeam-deque/src/lib.rs +++ /dev/null @@ -1,101 +0,0 @@ -//! Concurrent work-stealing deques. -//! -//! These data structures are most commonly used in work-stealing schedulers. The typical setup -//! involves a number of threads, each having its own FIFO or LIFO queue (*worker*). There is also -//! one global FIFO queue (*injector*) and a list of references to *worker* queues that are able to -//! steal tasks (*stealers*). -//! -//! We spawn a new task onto the scheduler by pushing it into the *injector* queue. Each worker -//! thread waits in a loop until it finds the next task to run and then runs it. To find a task, it -//! first looks into its local *worker* queue, and then into the *injector* and *stealers*. -//! -//! # Queues -//! -//! [`Injector`] is a FIFO queue, where tasks are pushed and stolen from opposite ends. It is -//! shared among threads and is usually the entry point for new tasks. -//! -//! [`Worker`] has two constructors: -//! -//! * [`new_fifo()`] - Creates a FIFO queue, in which tasks are pushed and popped from opposite -//! ends. -//! * [`new_lifo()`] - Creates a LIFO queue, in which tasks are pushed and popped from the same -//! end. -//! -//! Each [`Worker`] is owned by a single thread and supports only push and pop operations. -//! -//! Method [`stealer()`] creates a [`Stealer`] that may be shared among threads and can only steal -//! tasks from its [`Worker`]. Tasks are stolen from the end opposite to where they get pushed. -//! -//! # Stealing -//! -//! Steal operations come in three flavors: -//! -//! 1. [`steal()`] - Steals one task. -//! 2. [`steal_batch()`] - Steals a batch of tasks and moves them into another worker. -//! 3. [`steal_batch_and_pop()`] - Steals a batch of tasks, moves them into another queue, and pops -//! one task from that worker. -//! -//! In contrast to push and pop operations, stealing can spuriously fail with [`Steal::Retry`], in -//! which case the steal operation needs to be retried. -//! -//! # Examples -//! -//! Suppose a thread in a work-stealing scheduler is idle and looking for the next task to run. To -//! find an available task, it might do the following: -//! -//! 1. Try popping one task from the local worker queue. -//! 2. Try stealing a batch of tasks from the global injector queue. -//! 3. Try stealing one task from another thread using the stealer list. -//! -//! An implementation of this work-stealing strategy: -//! -//! ``` -//! use crossbeam_deque::{Injector, Stealer, Worker}; -//! use std::iter; -//! -//! fn find_task( -//! local: &Worker, -//! global: &Injector, -//! stealers: &[Stealer], -//! ) -> Option { -//! // Pop a task from the local queue, if not empty. -//! local.pop().or_else(|| { -//! // Otherwise, we need to look for a task elsewhere. -//! iter::repeat_with(|| { -//! // Try stealing a batch of tasks from the global queue. -//! global.steal_batch_and_pop(local) -//! // Or try stealing a task from one of the other threads. -//! .or_else(|| stealers.iter().map(|s| s.steal()).collect()) -//! }) -//! // Loop while no task was stolen and any steal operation needs to be retried. -//! .find(|s| !s.is_retry()) -//! // Extract the stolen task, if there is one. -//! .and_then(|s| s.success()) -//! }) -//! } -//! ``` -//! -//! [`new_fifo()`]: Worker::new_fifo -//! [`new_lifo()`]: Worker::new_lifo -//! [`stealer()`]: Worker::stealer -//! [`steal()`]: Stealer::steal -//! [`steal_batch()`]: Stealer::steal_batch -//! [`steal_batch_and_pop()`]: Stealer::steal_batch_and_pop - -#![no_std] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] -#![warn(missing_docs, unsafe_op_in_unsafe_fn)] - -#[cfg(feature = "std")] -extern crate std; - -#[cfg(feature = "std")] -mod deque; -#[cfg(feature = "std")] -pub use crate::deque::{Injector, Steal, Stealer, Worker}; diff --git a/crossbeam-deque/tests/fifo.rs b/crossbeam-deque/tests/fifo.rs deleted file mode 100644 index f98737b58..000000000 --- a/crossbeam-deque/tests/fifo.rs +++ /dev/null @@ -1,357 +0,0 @@ -use std::sync::atomic::Ordering::SeqCst; -use std::sync::atomic::{AtomicBool, AtomicUsize}; -use std::sync::{Arc, Mutex}; - -use crossbeam_deque::Steal::{Empty, Success}; -use crossbeam_deque::Worker; -use crossbeam_utils::thread::scope; -use rand::Rng; - -#[test] -fn smoke() { - let w = Worker::new_fifo(); - let s = w.stealer(); - assert_eq!(w.pop(), None); - assert_eq!(s.steal(), Empty); - - w.push(1); - assert_eq!(w.pop(), Some(1)); - assert_eq!(w.pop(), None); - assert_eq!(s.steal(), Empty); - - w.push(2); - assert_eq!(s.steal(), Success(2)); - assert_eq!(s.steal(), Empty); - assert_eq!(w.pop(), None); - - w.push(3); - w.push(4); - w.push(5); - assert_eq!(s.steal(), Success(3)); - assert_eq!(s.steal(), Success(4)); - assert_eq!(s.steal(), Success(5)); - assert_eq!(s.steal(), Empty); - - w.push(6); - w.push(7); - w.push(8); - w.push(9); - assert_eq!(w.pop(), Some(6)); - assert_eq!(s.steal(), Success(7)); - assert_eq!(w.pop(), Some(8)); - assert_eq!(w.pop(), Some(9)); - assert_eq!(w.pop(), None); -} - -#[test] -fn is_empty() { - let w = Worker::new_fifo(); - let s = w.stealer(); - - assert!(w.is_empty()); - w.push(1); - assert!(!w.is_empty()); - w.push(2); - assert!(!w.is_empty()); - let _ = w.pop(); - assert!(!w.is_empty()); - let _ = w.pop(); - assert!(w.is_empty()); - - assert!(s.is_empty()); - w.push(1); - assert!(!s.is_empty()); - w.push(2); - assert!(!s.is_empty()); - let _ = s.steal(); - assert!(!s.is_empty()); - let _ = s.steal(); - assert!(s.is_empty()); -} - -#[test] -fn spsc() { - #[cfg(miri)] - const STEPS: usize = 500; - #[cfg(not(miri))] - const STEPS: usize = 50_000; - - let w = Worker::new_fifo(); - let s = w.stealer(); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..STEPS { - loop { - if let Success(v) = s.steal() { - assert_eq!(i, v); - break; - } - } - } - - assert_eq!(s.steal(), Empty); - }); - - for i in 0..STEPS { - w.push(i); - } - }) - .unwrap(); -} - -#[test] -fn stampede() { - const THREADS: usize = 8; - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 50_000; - - let w = Worker::new_fifo(); - - for i in 0..COUNT { - w.push(Box::new(i + 1)); - } - let remaining = Arc::new(AtomicUsize::new(COUNT)); - - scope(|scope| { - for _ in 0..THREADS { - let s = w.stealer(); - let remaining = remaining.clone(); - - scope.spawn(move |_| { - let mut last = 0; - while remaining.load(SeqCst) > 0 { - if let Success(x) = s.steal() { - assert!(last < *x); - last = *x; - remaining.fetch_sub(1, SeqCst); - } - } - }); - } - - let mut last = 0; - while remaining.load(SeqCst) > 0 { - if let Some(x) = w.pop() { - assert!(last < *x); - last = *x; - remaining.fetch_sub(1, SeqCst); - } - } - }) - .unwrap(); -} - -#[test] -fn stress() { - const THREADS: usize = 8; - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 50_000; - - let w = Worker::new_fifo(); - let done = Arc::new(AtomicBool::new(false)); - let hits = Arc::new(AtomicUsize::new(0)); - - scope(|scope| { - for _ in 0..THREADS { - let s = w.stealer(); - let done = done.clone(); - let hits = hits.clone(); - - scope.spawn(move |_| { - let w2 = Worker::new_fifo(); - - while !done.load(SeqCst) { - if let Success(_) = s.steal() { - hits.fetch_add(1, SeqCst); - } - - let _ = s.steal_batch(&w2); - - if let Success(_) = s.steal_batch_and_pop(&w2) { - hits.fetch_add(1, SeqCst); - } - - while w2.pop().is_some() { - hits.fetch_add(1, SeqCst); - } - } - }); - } - - let mut rng = rand::thread_rng(); - let mut expected = 0; - while expected < COUNT { - if rng.gen_range(0..3) == 0 { - while w.pop().is_some() { - hits.fetch_add(1, SeqCst); - } - } else { - w.push(expected); - expected += 1; - } - } - - while hits.load(SeqCst) < COUNT { - while w.pop().is_some() { - hits.fetch_add(1, SeqCst); - } - } - done.store(true, SeqCst); - }) - .unwrap(); -} - -#[cfg_attr(miri, ignore)] // Miri is too slow -#[test] -fn no_starvation() { - const THREADS: usize = 8; - const COUNT: usize = 50_000; - - let w = Worker::new_fifo(); - let done = Arc::new(AtomicBool::new(false)); - let mut all_hits = Vec::new(); - - scope(|scope| { - for _ in 0..THREADS { - let s = w.stealer(); - let done = done.clone(); - let hits = Arc::new(AtomicUsize::new(0)); - all_hits.push(hits.clone()); - - scope.spawn(move |_| { - let w2 = Worker::new_fifo(); - - while !done.load(SeqCst) { - if let Success(_) = s.steal() { - hits.fetch_add(1, SeqCst); - } - - let _ = s.steal_batch(&w2); - - if let Success(_) = s.steal_batch_and_pop(&w2) { - hits.fetch_add(1, SeqCst); - } - - while w2.pop().is_some() { - hits.fetch_add(1, SeqCst); - } - } - }); - } - - let mut rng = rand::thread_rng(); - let mut my_hits = 0; - loop { - for i in 0..rng.gen_range(0..COUNT) { - if rng.gen_range(0..3) == 0 && my_hits == 0 { - while w.pop().is_some() { - my_hits += 1; - } - } else { - w.push(i); - } - } - - if my_hits > 0 && all_hits.iter().all(|h| h.load(SeqCst) > 0) { - break; - } - } - done.store(true, SeqCst); - }) - .unwrap(); -} - -#[test] -fn destructors() { - #[cfg(miri)] - const THREADS: usize = 2; - #[cfg(not(miri))] - const THREADS: usize = 8; - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 50_000; - #[cfg(miri)] - const STEPS: usize = 100; - #[cfg(not(miri))] - const STEPS: usize = 1000; - - struct Elem(usize, Arc>>); - - impl Drop for Elem { - fn drop(&mut self) { - self.1.lock().unwrap().push(self.0); - } - } - - let w = Worker::new_fifo(); - let dropped = Arc::new(Mutex::new(Vec::new())); - let remaining = Arc::new(AtomicUsize::new(COUNT)); - - for i in 0..COUNT { - w.push(Elem(i, dropped.clone())); - } - - scope(|scope| { - for _ in 0..THREADS { - let remaining = remaining.clone(); - let s = w.stealer(); - - scope.spawn(move |_| { - let w2 = Worker::new_fifo(); - let mut cnt = 0; - - while cnt < STEPS { - if let Success(_) = s.steal() { - cnt += 1; - remaining.fetch_sub(1, SeqCst); - } - - let _ = s.steal_batch(&w2); - - if let Success(_) = s.steal_batch_and_pop(&w2) { - cnt += 1; - remaining.fetch_sub(1, SeqCst); - } - - while w2.pop().is_some() { - cnt += 1; - remaining.fetch_sub(1, SeqCst); - } - } - }); - } - - for _ in 0..STEPS { - if w.pop().is_some() { - remaining.fetch_sub(1, SeqCst); - } - } - }) - .unwrap(); - - let rem = remaining.load(SeqCst); - assert!(rem > 0); - - { - let mut v = dropped.lock().unwrap(); - assert_eq!(v.len(), COUNT - rem); - v.clear(); - } - - drop(w); - - { - let mut v = dropped.lock().unwrap(); - assert_eq!(v.len(), rem); - v.sort_unstable(); - for pair in v.windows(2) { - assert_eq!(pair[0] + 1, pair[1]); - } - } -} diff --git a/crossbeam-deque/tests/injector.rs b/crossbeam-deque/tests/injector.rs deleted file mode 100644 index 5f6c3e98e..000000000 --- a/crossbeam-deque/tests/injector.rs +++ /dev/null @@ -1,391 +0,0 @@ -use std::sync::atomic::Ordering::SeqCst; -use std::sync::atomic::{AtomicBool, AtomicUsize}; -use std::sync::{Arc, Mutex}; - -use crossbeam_deque::Steal::{Empty, Success}; -use crossbeam_deque::{Injector, Worker}; -use crossbeam_utils::thread::scope; -use rand::Rng; - -#[test] -fn smoke() { - let q = Injector::new(); - assert_eq!(q.steal(), Empty); - - q.push(1); - q.push(2); - assert_eq!(q.steal(), Success(1)); - assert_eq!(q.steal(), Success(2)); - assert_eq!(q.steal(), Empty); - - q.push(3); - assert_eq!(q.steal(), Success(3)); - assert_eq!(q.steal(), Empty); -} - -#[test] -fn is_empty() { - let q = Injector::new(); - assert!(q.is_empty()); - - q.push(1); - assert!(!q.is_empty()); - q.push(2); - assert!(!q.is_empty()); - - let _ = q.steal(); - assert!(!q.is_empty()); - let _ = q.steal(); - assert!(q.is_empty()); - - q.push(3); - assert!(!q.is_empty()); - let _ = q.steal(); - assert!(q.is_empty()); -} - -#[test] -fn spsc() { - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - let q = Injector::new(); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - loop { - if let Success(v) = q.steal() { - assert_eq!(i, v); - break; - } - #[cfg(miri)] - std::hint::spin_loop(); - } - } - - assert_eq!(q.steal(), Empty); - }); - - for i in 0..COUNT { - q.push(i); - } - }) - .unwrap(); -} - -#[test] -fn mpmc() { - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let q = Injector::new(); - let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..COUNT { - q.push(i); - } - }); - } - - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - loop { - if let Success(n) = q.steal() { - v[n].fetch_add(1, SeqCst); - break; - } - #[cfg(miri)] - std::hint::spin_loop(); - } - } - }); - } - }) - .unwrap(); - - for c in v { - assert_eq!(c.load(SeqCst), THREADS); - } -} - -#[test] -fn stampede() { - const THREADS: usize = 8; - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 50_000; - - let q = Injector::new(); - - for i in 0..COUNT { - q.push(Box::new(i + 1)); - } - let remaining = Arc::new(AtomicUsize::new(COUNT)); - - scope(|scope| { - for _ in 0..THREADS { - let remaining = remaining.clone(); - let q = &q; - - scope.spawn(move |_| { - let mut last = 0; - while remaining.load(SeqCst) > 0 { - if let Success(x) = q.steal() { - assert!(last < *x); - last = *x; - remaining.fetch_sub(1, SeqCst); - } - } - }); - } - - let mut last = 0; - while remaining.load(SeqCst) > 0 { - if let Success(x) = q.steal() { - assert!(last < *x); - last = *x; - remaining.fetch_sub(1, SeqCst); - } - } - }) - .unwrap(); -} - -#[test] -fn stress() { - const THREADS: usize = 8; - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 50_000; - - let q = Injector::new(); - let done = Arc::new(AtomicBool::new(false)); - let hits = Arc::new(AtomicUsize::new(0)); - - scope(|scope| { - for _ in 0..THREADS { - let done = done.clone(); - let hits = hits.clone(); - let q = &q; - - scope.spawn(move |_| { - let w2 = Worker::new_fifo(); - - while !done.load(SeqCst) { - if let Success(_) = q.steal() { - hits.fetch_add(1, SeqCst); - } - - let _ = q.steal_batch(&w2); - - if let Success(_) = q.steal_batch_and_pop(&w2) { - hits.fetch_add(1, SeqCst); - } - - while w2.pop().is_some() { - hits.fetch_add(1, SeqCst); - } - } - }); - } - - let mut rng = rand::thread_rng(); - let mut expected = 0; - while expected < COUNT { - if rng.gen_range(0..3) == 0 { - while let Success(_) = q.steal() { - hits.fetch_add(1, SeqCst); - } - } else { - q.push(expected); - expected += 1; - } - } - - while hits.load(SeqCst) < COUNT { - while let Success(_) = q.steal() { - hits.fetch_add(1, SeqCst); - } - } - done.store(true, SeqCst); - }) - .unwrap(); -} - -#[cfg_attr(miri, ignore)] // Miri is too slow -#[test] -fn no_starvation() { - const THREADS: usize = 8; - const COUNT: usize = 50_000; - - let q = Injector::new(); - let done = Arc::new(AtomicBool::new(false)); - let mut all_hits = Vec::new(); - - scope(|scope| { - for _ in 0..THREADS { - let done = done.clone(); - let hits = Arc::new(AtomicUsize::new(0)); - all_hits.push(hits.clone()); - let q = &q; - - scope.spawn(move |_| { - let w2 = Worker::new_fifo(); - - while !done.load(SeqCst) { - if let Success(_) = q.steal() { - hits.fetch_add(1, SeqCst); - } - - let _ = q.steal_batch(&w2); - - if let Success(_) = q.steal_batch_and_pop(&w2) { - hits.fetch_add(1, SeqCst); - } - - while w2.pop().is_some() { - hits.fetch_add(1, SeqCst); - } - } - }); - } - - let mut rng = rand::thread_rng(); - let mut my_hits = 0; - loop { - for i in 0..rng.gen_range(0..COUNT) { - if rng.gen_range(0..3) == 0 && my_hits == 0 { - while let Success(_) = q.steal() { - my_hits += 1; - } - } else { - q.push(i); - } - } - - if my_hits > 0 && all_hits.iter().all(|h| h.load(SeqCst) > 0) { - break; - } - } - done.store(true, SeqCst); - }) - .unwrap(); -} - -#[test] -fn destructors() { - #[cfg(miri)] - const THREADS: usize = 2; - #[cfg(not(miri))] - const THREADS: usize = 8; - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 50_000; - #[cfg(miri)] - const STEPS: usize = 100; - #[cfg(not(miri))] - const STEPS: usize = 1000; - - struct Elem(usize, Arc>>); - - impl Drop for Elem { - fn drop(&mut self) { - self.1.lock().unwrap().push(self.0); - } - } - - let q = Injector::new(); - let dropped = Arc::new(Mutex::new(Vec::new())); - let remaining = Arc::new(AtomicUsize::new(COUNT)); - - for i in 0..COUNT { - q.push(Elem(i, dropped.clone())); - } - - scope(|scope| { - for _ in 0..THREADS { - let remaining = remaining.clone(); - let q = &q; - - scope.spawn(move |_| { - let w2 = Worker::new_fifo(); - let mut cnt = 0; - - while cnt < STEPS { - if let Success(_) = q.steal() { - cnt += 1; - remaining.fetch_sub(1, SeqCst); - } - - let _ = q.steal_batch(&w2); - - if let Success(_) = q.steal_batch_and_pop(&w2) { - cnt += 1; - remaining.fetch_sub(1, SeqCst); - } - - while w2.pop().is_some() { - cnt += 1; - remaining.fetch_sub(1, SeqCst); - } - } - }); - } - - for _ in 0..STEPS { - if let Success(_) = q.steal() { - remaining.fetch_sub(1, SeqCst); - } - } - }) - .unwrap(); - - let rem = remaining.load(SeqCst); - assert!(rem > 0); - - { - let mut v = dropped.lock().unwrap(); - assert_eq!(v.len(), COUNT - rem); - v.clear(); - } - - drop(q); - - { - let mut v = dropped.lock().unwrap(); - assert_eq!(v.len(), rem); - v.sort_unstable(); - for pair in v.windows(2) { - assert_eq!(pair[0] + 1, pair[1]); - } - } -} - -// If `Block` is created on the stack, the array of slots will multiply this `BigStruct` and -// probably overflow the thread stack. It's now directly created on the heap to avoid this. -#[test] -fn stack_overflow() { - const N: usize = 32_768; - struct BigStruct { - _data: [u8; N], - } - - let q = Injector::new(); - - q.push(BigStruct { _data: [0u8; N] }); - - while !matches!(q.steal(), Empty) {} -} diff --git a/crossbeam-deque/tests/lifo.rs b/crossbeam-deque/tests/lifo.rs deleted file mode 100644 index c1a65cd2e..000000000 --- a/crossbeam-deque/tests/lifo.rs +++ /dev/null @@ -1,359 +0,0 @@ -use std::sync::atomic::Ordering::SeqCst; -use std::sync::atomic::{AtomicBool, AtomicUsize}; -use std::sync::{Arc, Mutex}; - -use crossbeam_deque::Steal::{Empty, Success}; -use crossbeam_deque::Worker; -use crossbeam_utils::thread::scope; -use rand::Rng; - -#[test] -fn smoke() { - let w = Worker::new_lifo(); - let s = w.stealer(); - assert_eq!(w.pop(), None); - assert_eq!(s.steal(), Empty); - - w.push(1); - assert_eq!(w.pop(), Some(1)); - assert_eq!(w.pop(), None); - assert_eq!(s.steal(), Empty); - - w.push(2); - assert_eq!(s.steal(), Success(2)); - assert_eq!(s.steal(), Empty); - assert_eq!(w.pop(), None); - - w.push(3); - w.push(4); - w.push(5); - assert_eq!(s.steal(), Success(3)); - assert_eq!(s.steal(), Success(4)); - assert_eq!(s.steal(), Success(5)); - assert_eq!(s.steal(), Empty); - - w.push(6); - w.push(7); - w.push(8); - w.push(9); - assert_eq!(w.pop(), Some(9)); - assert_eq!(s.steal(), Success(6)); - assert_eq!(w.pop(), Some(8)); - assert_eq!(w.pop(), Some(7)); - assert_eq!(w.pop(), None); -} - -#[test] -fn is_empty() { - let w = Worker::new_lifo(); - let s = w.stealer(); - - assert!(w.is_empty()); - w.push(1); - assert!(!w.is_empty()); - w.push(2); - assert!(!w.is_empty()); - let _ = w.pop(); - assert!(!w.is_empty()); - let _ = w.pop(); - assert!(w.is_empty()); - - assert!(s.is_empty()); - w.push(1); - assert!(!s.is_empty()); - w.push(2); - assert!(!s.is_empty()); - let _ = s.steal(); - assert!(!s.is_empty()); - let _ = s.steal(); - assert!(s.is_empty()); -} - -#[test] -fn spsc() { - #[cfg(miri)] - const STEPS: usize = 500; - #[cfg(not(miri))] - const STEPS: usize = 50_000; - - let w = Worker::new_lifo(); - let s = w.stealer(); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..STEPS { - loop { - if let Success(v) = s.steal() { - assert_eq!(i, v); - break; - } - #[cfg(miri)] - std::hint::spin_loop(); - } - } - - assert_eq!(s.steal(), Empty); - }); - - for i in 0..STEPS { - w.push(i); - } - }) - .unwrap(); -} - -#[test] -fn stampede() { - const THREADS: usize = 8; - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 50_000; - - let w = Worker::new_lifo(); - - for i in 0..COUNT { - w.push(Box::new(i + 1)); - } - let remaining = Arc::new(AtomicUsize::new(COUNT)); - - scope(|scope| { - for _ in 0..THREADS { - let s = w.stealer(); - let remaining = remaining.clone(); - - scope.spawn(move |_| { - let mut last = 0; - while remaining.load(SeqCst) > 0 { - if let Success(x) = s.steal() { - assert!(last < *x); - last = *x; - remaining.fetch_sub(1, SeqCst); - } - } - }); - } - - let mut last = COUNT + 1; - while remaining.load(SeqCst) > 0 { - if let Some(x) = w.pop() { - assert!(last > *x); - last = *x; - remaining.fetch_sub(1, SeqCst); - } - } - }) - .unwrap(); -} - -#[test] -fn stress() { - const THREADS: usize = 8; - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 50_000; - - let w = Worker::new_lifo(); - let done = Arc::new(AtomicBool::new(false)); - let hits = Arc::new(AtomicUsize::new(0)); - - scope(|scope| { - for _ in 0..THREADS { - let s = w.stealer(); - let done = done.clone(); - let hits = hits.clone(); - - scope.spawn(move |_| { - let w2 = Worker::new_lifo(); - - while !done.load(SeqCst) { - if let Success(_) = s.steal() { - hits.fetch_add(1, SeqCst); - } - - let _ = s.steal_batch(&w2); - - if let Success(_) = s.steal_batch_and_pop(&w2) { - hits.fetch_add(1, SeqCst); - } - - while w2.pop().is_some() { - hits.fetch_add(1, SeqCst); - } - } - }); - } - - let mut rng = rand::thread_rng(); - let mut expected = 0; - while expected < COUNT { - if rng.gen_range(0..3) == 0 { - while w.pop().is_some() { - hits.fetch_add(1, SeqCst); - } - } else { - w.push(expected); - expected += 1; - } - } - - while hits.load(SeqCst) < COUNT { - while w.pop().is_some() { - hits.fetch_add(1, SeqCst); - } - } - done.store(true, SeqCst); - }) - .unwrap(); -} - -#[cfg_attr(miri, ignore)] // Miri is too slow -#[test] -fn no_starvation() { - const THREADS: usize = 8; - const COUNT: usize = 50_000; - - let w = Worker::new_lifo(); - let done = Arc::new(AtomicBool::new(false)); - let mut all_hits = Vec::new(); - - scope(|scope| { - for _ in 0..THREADS { - let s = w.stealer(); - let done = done.clone(); - let hits = Arc::new(AtomicUsize::new(0)); - all_hits.push(hits.clone()); - - scope.spawn(move |_| { - let w2 = Worker::new_lifo(); - - while !done.load(SeqCst) { - if let Success(_) = s.steal() { - hits.fetch_add(1, SeqCst); - } - - let _ = s.steal_batch(&w2); - - if let Success(_) = s.steal_batch_and_pop(&w2) { - hits.fetch_add(1, SeqCst); - } - - while w2.pop().is_some() { - hits.fetch_add(1, SeqCst); - } - } - }); - } - - let mut rng = rand::thread_rng(); - let mut my_hits = 0; - loop { - for i in 0..rng.gen_range(0..COUNT) { - if rng.gen_range(0..3) == 0 && my_hits == 0 { - while w.pop().is_some() { - my_hits += 1; - } - } else { - w.push(i); - } - } - - if my_hits > 0 && all_hits.iter().all(|h| h.load(SeqCst) > 0) { - break; - } - } - done.store(true, SeqCst); - }) - .unwrap(); -} - -#[test] -fn destructors() { - #[cfg(miri)] - const THREADS: usize = 2; - #[cfg(not(miri))] - const THREADS: usize = 8; - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 50_000; - #[cfg(miri)] - const STEPS: usize = 100; - #[cfg(not(miri))] - const STEPS: usize = 1000; - - struct Elem(usize, Arc>>); - - impl Drop for Elem { - fn drop(&mut self) { - self.1.lock().unwrap().push(self.0); - } - } - - let w = Worker::new_lifo(); - let dropped = Arc::new(Mutex::new(Vec::new())); - let remaining = Arc::new(AtomicUsize::new(COUNT)); - - for i in 0..COUNT { - w.push(Elem(i, dropped.clone())); - } - - scope(|scope| { - for _ in 0..THREADS { - let remaining = remaining.clone(); - let s = w.stealer(); - - scope.spawn(move |_| { - let w2 = Worker::new_lifo(); - let mut cnt = 0; - - while cnt < STEPS { - if let Success(_) = s.steal() { - cnt += 1; - remaining.fetch_sub(1, SeqCst); - } - - let _ = s.steal_batch(&w2); - - if let Success(_) = s.steal_batch_and_pop(&w2) { - cnt += 1; - remaining.fetch_sub(1, SeqCst); - } - - while w2.pop().is_some() { - cnt += 1; - remaining.fetch_sub(1, SeqCst); - } - } - }); - } - - for _ in 0..STEPS { - if w.pop().is_some() { - remaining.fetch_sub(1, SeqCst); - } - } - }) - .unwrap(); - - let rem = remaining.load(SeqCst); - assert!(rem > 0); - - { - let mut v = dropped.lock().unwrap(); - assert_eq!(v.len(), COUNT - rem); - v.clear(); - } - - drop(w); - - { - let mut v = dropped.lock().unwrap(); - assert_eq!(v.len(), rem); - v.sort_unstable(); - for pair in v.windows(2) { - assert_eq!(pair[0] + 1, pair[1]); - } - } -} diff --git a/crossbeam-deque/tests/steal.rs b/crossbeam-deque/tests/steal.rs deleted file mode 100644 index af2499856..000000000 --- a/crossbeam-deque/tests/steal.rs +++ /dev/null @@ -1,212 +0,0 @@ -use crossbeam_deque::Steal::Success; -use crossbeam_deque::{Injector, Worker}; - -#[test] -fn steal_fifo() { - let w = Worker::new_fifo(); - for i in 1..=3 { - w.push(i); - } - - let s = w.stealer(); - assert_eq!(s.steal(), Success(1)); - assert_eq!(s.steal(), Success(2)); - assert_eq!(s.steal(), Success(3)); -} - -#[test] -fn steal_lifo() { - let w = Worker::new_lifo(); - for i in 1..=3 { - w.push(i); - } - - let s = w.stealer(); - assert_eq!(s.steal(), Success(1)); - assert_eq!(s.steal(), Success(2)); - assert_eq!(s.steal(), Success(3)); -} - -#[test] -fn steal_injector() { - let q = Injector::new(); - for i in 1..=3 { - q.push(i); - } - - assert_eq!(q.steal(), Success(1)); - assert_eq!(q.steal(), Success(2)); - assert_eq!(q.steal(), Success(3)); -} - -#[test] -fn steal_batch_fifo_fifo() { - let w = Worker::new_fifo(); - for i in 1..=4 { - w.push(i); - } - - let s = w.stealer(); - let w2 = Worker::new_fifo(); - - assert_eq!(s.steal_batch(&w2), Success(())); - assert_eq!(w2.pop(), Some(1)); - assert_eq!(w2.pop(), Some(2)); -} - -#[test] -fn steal_batch_lifo_lifo() { - let w = Worker::new_lifo(); - for i in 1..=4 { - w.push(i); - } - - let s = w.stealer(); - let w2 = Worker::new_lifo(); - - assert_eq!(s.steal_batch(&w2), Success(())); - assert_eq!(w2.pop(), Some(2)); - assert_eq!(w2.pop(), Some(1)); -} - -#[test] -fn steal_batch_fifo_lifo() { - let w = Worker::new_fifo(); - for i in 1..=4 { - w.push(i); - } - - let s = w.stealer(); - let w2 = Worker::new_lifo(); - - assert_eq!(s.steal_batch(&w2), Success(())); - assert_eq!(w2.pop(), Some(1)); - assert_eq!(w2.pop(), Some(2)); -} - -#[test] -fn steal_batch_lifo_fifo() { - let w = Worker::new_lifo(); - for i in 1..=4 { - w.push(i); - } - - let s = w.stealer(); - let w2 = Worker::new_fifo(); - - assert_eq!(s.steal_batch(&w2), Success(())); - assert_eq!(w2.pop(), Some(2)); - assert_eq!(w2.pop(), Some(1)); -} - -#[test] -fn steal_batch_injector_fifo() { - let q = Injector::new(); - for i in 1..=4 { - q.push(i); - } - - let w2 = Worker::new_fifo(); - assert_eq!(q.steal_batch(&w2), Success(())); - assert_eq!(w2.pop(), Some(1)); - assert_eq!(w2.pop(), Some(2)); -} - -#[test] -fn steal_batch_injector_lifo() { - let q = Injector::new(); - for i in 1..=4 { - q.push(i); - } - - let w2 = Worker::new_lifo(); - assert_eq!(q.steal_batch(&w2), Success(())); - assert_eq!(w2.pop(), Some(1)); - assert_eq!(w2.pop(), Some(2)); -} - -#[test] -fn steal_batch_and_pop_fifo_fifo() { - let w = Worker::new_fifo(); - for i in 1..=6 { - w.push(i); - } - - let s = w.stealer(); - let w2 = Worker::new_fifo(); - - assert_eq!(s.steal_batch_and_pop(&w2), Success(1)); - assert_eq!(w2.pop(), Some(2)); - assert_eq!(w2.pop(), Some(3)); -} - -#[test] -fn steal_batch_and_pop_lifo_lifo() { - let w = Worker::new_lifo(); - for i in 1..=6 { - w.push(i); - } - - let s = w.stealer(); - let w2 = Worker::new_lifo(); - - assert_eq!(s.steal_batch_and_pop(&w2), Success(3)); - assert_eq!(w2.pop(), Some(2)); - assert_eq!(w2.pop(), Some(1)); -} - -#[test] -fn steal_batch_and_pop_fifo_lifo() { - let w = Worker::new_fifo(); - for i in 1..=6 { - w.push(i); - } - - let s = w.stealer(); - let w2 = Worker::new_lifo(); - - assert_eq!(s.steal_batch_and_pop(&w2), Success(1)); - assert_eq!(w2.pop(), Some(2)); - assert_eq!(w2.pop(), Some(3)); -} - -#[test] -fn steal_batch_and_pop_lifo_fifo() { - let w = Worker::new_lifo(); - for i in 1..=6 { - w.push(i); - } - - let s = w.stealer(); - let w2 = Worker::new_fifo(); - - assert_eq!(s.steal_batch_and_pop(&w2), Success(3)); - assert_eq!(w2.pop(), Some(2)); - assert_eq!(w2.pop(), Some(1)); -} - -#[test] -fn steal_batch_and_pop_injector_fifo() { - let q = Injector::new(); - for i in 1..=6 { - q.push(i); - } - - let w2 = Worker::new_fifo(); - assert_eq!(q.steal_batch_and_pop(&w2), Success(1)); - assert_eq!(w2.pop(), Some(2)); - assert_eq!(w2.pop(), Some(3)); -} - -#[test] -fn steal_batch_and_pop_injector_lifo() { - let q = Injector::new(); - for i in 1..=6 { - q.push(i); - } - - let w2 = Worker::new_lifo(); - assert_eq!(q.steal_batch_and_pop(&w2), Success(1)); - assert_eq!(w2.pop(), Some(2)); - assert_eq!(w2.pop(), Some(3)); -} diff --git a/crossbeam-epoch/CHANGELOG.md b/crossbeam-epoch/CHANGELOG.md deleted file mode 100644 index d5ca3a071..000000000 --- a/crossbeam-epoch/CHANGELOG.md +++ /dev/null @@ -1,204 +0,0 @@ -# Version 0.9.18 - -- Remove dependency on `cfg-if`. (#1072) -- Remove dependency on `autocfg`. (#1071) - -# Version 0.9.17 - -- Remove dependency on `memoffset`. (#1058) - -# Version 0.9.16 - -- Bump the minimum supported Rust version to 1.61. (#1037) -- Improve support for targets without atomic CAS. (#1037) -- Remove build script. (#1037) -- Remove dependency on `scopeguard`. (#1045) -- Update `loom` dependency to 0.7. - -# Version 0.9.15 - -- Update `memoffset` to 0.9. (#981) - -# Version 0.9.14 - -- Update `memoffset` to 0.8. (#955) - -# Version 0.9.13 - -- Fix build script bug introduced in 0.9.12. (#932) - -# Version 0.9.12 - -**Note:** This release has been yanked due to regression fixed in 0.9.13. - -- Update `memoffset` to 0.7. (#926) -- Improve support for custom targets. (#922) - -# Version 0.9.11 - -- Removes the dependency on the `once_cell` crate to restore the MSRV. (#913) -- Work around [rust-lang#98302](https://github.com/rust-lang/rust/issues/98302), which causes compile error on windows-gnu when LTO is enabled. (#913) - -# Version 0.9.10 - -- Bump the minimum supported Rust version to 1.38. (#877) -- Mitigate the risk of segmentation faults in buggy downstream implementations. (#879) -- Add `{Atomic, Shared}::try_into_owned` (#701) - -# Version 0.9.9 - -- Replace lazy_static with once_cell. (#817) - -# Version 0.9.8 - -- Make `Atomic::null()` const function at 1.61+. (#797) - -# Version 0.9.7 - -- Fix Miri error when `-Zmiri-check-number-validity` is enabled. (#779) - -# Version 0.9.6 - -- Add `Atomic::fetch_update`. (#706) - -# Version 0.9.5 - -- Fix UB in `Pointable` impl of `[MaybeUninit]`. (#694) -- Support targets that do not have atomic CAS on stable Rust. (#698) -- Fix breakage with nightly feature due to rust-lang/rust#84510. (#692) - -# Version 0.9.4 - -**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details. - -- Fix UB in `<[MaybeUninit] as Pointable>::init` when global allocator failed allocation. (#690) -- Bump `loom` dependency to version 0.5. (#686) - -# Version 0.9.3 - -**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details. - -- Make `loom` dependency optional. (#666) - -# Version 0.9.2 - -**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details. - -- Add `Atomic::compare_exchange` and `Atomic::compare_exchange_weak`. (#628) -- Deprecate `Atomic::compare_and_set` and `Atomic::compare_and_set_weak`. Use `Atomic::compare_exchange` or `Atomic::compare_exchange_weak` instead. (#628) -- Make `const_fn` dependency optional. (#611) -- Add unstable support for `loom`. (#487) - -# Version 0.9.1 - -**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details. - -- Bump `memoffset` dependency to version 0.6. (#592) - -# Version 0.9.0 - -**Note**: This release has been yanked. See [#693](https://github.com/crossbeam-rs/crossbeam/issues/693) for details. - -- Bump the minimum supported Rust version to 1.36. -- Support dynamically sized types. - -# Version 0.8.2 - -- Fix bug in release (yanking 0.8.1) - -# Version 0.8.1 - -- Bump `autocfg` dependency to version 1.0. (#460) -- Reduce stall in list iteration. (#376) -- Stop stealing from the same deque. (#448) -- Fix unsoundness issues by adopting `MaybeUninit`. (#458) -- Fix use-after-free in lock-free queue. (#466) - -# Version 0.8.0 - -- Bump the minimum required version to 1.28. -- Fix breakage with nightly feature due to rust-lang/rust#65214. -- Make `Atomic::null()` const function at 1.31+. -- Bump `crossbeam-utils` to `0.7`. - -# Version 0.7.2 - -- Add `Atomic::into_owned()`. -- Update `memoffset` dependency. - -# Version 0.7.1 - -- Add `Shared::deref_mut()`. -- Add a Treiber stack to examples. - -# Version 0.7.0 - -- Remove `Guard::clone()`. -- Bump dependencies. - -# Version 0.6.1 - -- Update `crossbeam-utils` to `0.6`. - -# Version 0.6.0 - -- `defer` now requires `F: Send + 'static`. -- Bump the minimum Rust version to 1.26. -- Pinning while TLS is tearing down does not fail anymore. -- Rename `Handle` to `LocalHandle`. -- Add `defer_unchecked` and `defer_destroy`. -- Remove `Clone` impl for `LocalHandle`. - -# Version 0.5.2 - -- Update `crossbeam-utils` to `0.5`. - -# Version 0.5.1 - -- Fix compatibility with the latest Rust nightly. - -# Version 0.5.0 - -- Update `crossbeam-utils` to `0.4`. -- Specify the minimum Rust version to `1.25.0`. - -# Version 0.4.3 - -- Downgrade `crossbeam-utils` to `0.3` because it was a breaking change. - -# Version 0.4.2 - -- Expose the `Pointer` trait. -- Warn missing docs and missing debug impls. -- Update `crossbeam-utils` to `0.4`. - -# Version 0.4.1 - -- Add `Debug` impls for `Collector`, `Handle`, and `Guard`. -- Add `load_consume` to `Atomic`. -- Rename `Collector::handle` to `Collector::register`. -- Remove the `Send` implementation for `Handle` (this was a bug). Only - `Collector`s can be shared among multiple threads, while `Handle`s and - `Guard`s must stay within the thread in which they were created. - -# Version 0.4.0 - -- Update dependencies. -- Remove support for Rust 1.13. - -# Version 0.3.0 - -- Add support for Rust 1.13. -- Improve documentation for CAS. - -# Version 0.2.0 - -- Add method `Owned::into_box`. -- Fix a use-after-free bug in `Local::finalize`. -- Fix an ordering bug in `Global::push_bag`. -- Fix a bug in calculating distance between epochs. -- Remove `impl Into> for Owned`. - -# Version 0.1.0 - -- First version of the new epoch-based GC. diff --git a/crossbeam-epoch/Cargo.toml b/crossbeam-epoch/Cargo.toml deleted file mode 100644 index 918215fa7..000000000 --- a/crossbeam-epoch/Cargo.toml +++ /dev/null @@ -1,50 +0,0 @@ -[package] -name = "crossbeam-epoch" -# When publishing a new version: -# - Update CHANGELOG.md -# - Update README.md (when increasing major or minor version) -# - Run './tools/publish.sh crossbeam-epoch ' -version = "0.9.18" -edition = "2021" -rust-version = "1.61" -license = "MIT OR Apache-2.0" -repository = "https://github.com/crossbeam-rs/crossbeam" -homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-epoch" -description = "Epoch-based garbage collection" -keywords = ["lock-free", "rcu", "atomic", "garbage"] -categories = ["concurrency", "memory-management", "no-std"] - -[features] -default = ["std"] - -# Enable to use APIs that require `std`. -# This is enabled by default. -std = ["alloc", "crossbeam-utils/std"] - -# Enable to use APIs that require `alloc`. -# This is enabled by default and also enabled if the `std` feature is enabled. -# -# NOTE: Disabling both `std` *and* `alloc` features is not supported yet. -alloc = [] - -# Enable the use of loom for concurrency testing. -# -# NOTE: This feature is outside of the normal semver guarantees and minor or -# patch versions of crossbeam may make breaking changes to them at any time. -loom = ["loom-crate", "crossbeam-utils/loom"] - -[dependencies] -crossbeam-utils = { version = "0.8.18", path = "../crossbeam-utils", default-features = false, features = ["atomic"] } - -# Enable the use of loom for concurrency testing. -# -# NOTE: This feature is outside of the normal semver guarantees and minor or -# patch versions of crossbeam may make breaking changes to them at any time. -[target.'cfg(crossbeam_loom)'.dependencies] -loom-crate = { package = "loom", version = "0.7.1", optional = true } - -[dev-dependencies] -rand = "0.8" - -[lints] -workspace = true diff --git a/crossbeam-epoch/LICENSE-APACHE b/crossbeam-epoch/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/crossbeam-epoch/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/crossbeam-epoch/LICENSE-MIT b/crossbeam-epoch/LICENSE-MIT deleted file mode 100644 index 068d491fd..000000000 --- a/crossbeam-epoch/LICENSE-MIT +++ /dev/null @@ -1,27 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2019 The Crossbeam Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/crossbeam-epoch/README.md b/crossbeam-epoch/README.md deleted file mode 100644 index ba74c7c75..000000000 --- a/crossbeam-epoch/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# Crossbeam Epoch - -[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)]( -https://github.com/crossbeam-rs/crossbeam/actions) -[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)]( -https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-epoch#license) -[![Cargo](https://img.shields.io/crates/v/crossbeam-epoch.svg)]( -https://crates.io/crates/crossbeam-epoch) -[![Documentation](https://docs.rs/crossbeam-epoch/badge.svg)]( -https://docs.rs/crossbeam-epoch) -[![Rust 1.61+](https://img.shields.io/badge/rust-1.61+-lightgray.svg)]( -https://www.rust-lang.org) -[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ) - -This crate provides epoch-based garbage collection for building concurrent data structures. - -When a thread removes an object from a concurrent data structure, other threads -may be still using pointers to it at the same time, so it cannot be destroyed -immediately. Epoch-based GC is an efficient mechanism for deferring destruction of -shared objects until no pointers to them can exist. - -Everything in this crate except the global GC can be used in `no_std` environments, provided that -`alloc` feature is enabled. - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -crossbeam-epoch = "0.9" -``` - -## Compatibility - -Crossbeam Epoch supports stable Rust releases going back at least six months, -and every time the minimum supported Rust version is increased, a new minor -version is released. Currently, the minimum supported Rust version is 1.61. - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. - -#### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff --git a/crossbeam-epoch/benches/defer.rs b/crossbeam-epoch/benches/defer.rs deleted file mode 100644 index 246f90798..000000000 --- a/crossbeam-epoch/benches/defer.rs +++ /dev/null @@ -1,69 +0,0 @@ -#![feature(test)] - -extern crate test; - -use crossbeam_epoch::{self as epoch, Owned}; -use crossbeam_utils::thread::scope; -use test::Bencher; - -#[bench] -fn single_alloc_defer_free(b: &mut Bencher) { - b.iter(|| { - let guard = &epoch::pin(); - let p = Owned::new(1).into_shared(guard); - unsafe { - guard.defer_destroy(p); - } - }); -} - -#[bench] -fn single_defer(b: &mut Bencher) { - b.iter(|| { - let guard = &epoch::pin(); - guard.defer(move || ()); - }); -} - -#[bench] -fn multi_alloc_defer_free(b: &mut Bencher) { - const THREADS: usize = 16; - const STEPS: usize = 10_000; - - b.iter(|| { - scope(|s| { - for _ in 0..THREADS { - s.spawn(|_| { - for _ in 0..STEPS { - let guard = &epoch::pin(); - let p = Owned::new(1).into_shared(guard); - unsafe { - guard.defer_destroy(p); - } - } - }); - } - }) - .unwrap(); - }); -} - -#[bench] -fn multi_defer(b: &mut Bencher) { - const THREADS: usize = 16; - const STEPS: usize = 10_000; - - b.iter(|| { - scope(|s| { - for _ in 0..THREADS { - s.spawn(|_| { - for _ in 0..STEPS { - let guard = &epoch::pin(); - guard.defer(move || ()); - } - }); - } - }) - .unwrap(); - }); -} diff --git a/crossbeam-epoch/benches/flush.rs b/crossbeam-epoch/benches/flush.rs deleted file mode 100644 index 99aab19e1..000000000 --- a/crossbeam-epoch/benches/flush.rs +++ /dev/null @@ -1,52 +0,0 @@ -#![feature(test)] - -extern crate test; - -use std::sync::Barrier; - -use crossbeam_epoch as epoch; -use crossbeam_utils::thread::scope; -use test::Bencher; - -#[bench] -fn single_flush(b: &mut Bencher) { - const THREADS: usize = 16; - - let start = Barrier::new(THREADS + 1); - let end = Barrier::new(THREADS + 1); - - scope(|s| { - for _ in 0..THREADS { - s.spawn(|_| { - epoch::pin(); - start.wait(); - end.wait(); - }); - } - - start.wait(); - b.iter(|| epoch::pin().flush()); - end.wait(); - }) - .unwrap(); -} - -#[bench] -fn multi_flush(b: &mut Bencher) { - const THREADS: usize = 16; - const STEPS: usize = 10_000; - - b.iter(|| { - scope(|s| { - for _ in 0..THREADS { - s.spawn(|_| { - for _ in 0..STEPS { - let guard = &epoch::pin(); - guard.flush(); - } - }); - } - }) - .unwrap(); - }); -} diff --git a/crossbeam-epoch/benches/pin.rs b/crossbeam-epoch/benches/pin.rs deleted file mode 100644 index 8bf87e9b7..000000000 --- a/crossbeam-epoch/benches/pin.rs +++ /dev/null @@ -1,31 +0,0 @@ -#![feature(test)] - -extern crate test; - -use crossbeam_epoch as epoch; -use crossbeam_utils::thread::scope; -use test::Bencher; - -#[bench] -fn single_pin(b: &mut Bencher) { - b.iter(epoch::pin); -} - -#[bench] -fn multi_pin(b: &mut Bencher) { - const THREADS: usize = 16; - const STEPS: usize = 100_000; - - b.iter(|| { - scope(|s| { - for _ in 0..THREADS { - s.spawn(|_| { - for _ in 0..STEPS { - epoch::pin(); - } - }); - } - }) - .unwrap(); - }); -} diff --git a/crossbeam-epoch/examples/sanitize.rs b/crossbeam-epoch/examples/sanitize.rs deleted file mode 100644 index 4109c34a8..000000000 --- a/crossbeam-epoch/examples/sanitize.rs +++ /dev/null @@ -1,66 +0,0 @@ -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed}; -use std::sync::Arc; -use std::thread; -use std::time::{Duration, Instant}; - -use crossbeam_epoch::{self as epoch, Atomic, Collector, LocalHandle, Owned, Shared}; -use rand::Rng; - -fn worker(a: Arc>, handle: LocalHandle) -> usize { - let mut rng = rand::thread_rng(); - let mut sum = 0; - - if rng.gen() { - thread::sleep(Duration::from_millis(1)); - } - let timeout = Duration::from_millis(rng.gen_range(0..10)); - let now = Instant::now(); - - while now.elapsed() < timeout { - for _ in 0..100 { - let guard = &handle.pin(); - guard.flush(); - - let val = if rng.gen() { - let p = a.swap(Owned::new(AtomicUsize::new(sum)), AcqRel, guard); - unsafe { - guard.defer_destroy(p); - guard.flush(); - p.deref().load(Relaxed) - } - } else { - let p = a.load(Acquire, guard); - unsafe { p.deref().fetch_add(sum, Relaxed) } - }; - - sum = sum.wrapping_add(val); - } - } - - sum -} - -fn main() { - for _ in 0..100 { - let collector = Collector::new(); - let a = Arc::new(Atomic::new(AtomicUsize::new(777))); - - let threads = (0..16) - .map(|_| { - let a = a.clone(); - let c = collector.clone(); - thread::spawn(move || worker(a, c.register())) - }) - .collect::>(); - - for t in threads { - t.join().unwrap(); - } - - unsafe { - a.swap(Shared::null(), AcqRel, epoch::unprotected()) - .into_owned(); - } - } -} diff --git a/crossbeam-epoch/src/atomic.rs b/crossbeam-epoch/src/atomic.rs deleted file mode 100644 index b31926dc1..000000000 --- a/crossbeam-epoch/src/atomic.rs +++ /dev/null @@ -1,1609 +0,0 @@ -use alloc::boxed::Box; -use core::alloc::Layout; -use core::borrow::{Borrow, BorrowMut}; -use core::cmp; -use core::fmt; -use core::marker::PhantomData; -use core::mem::{self, MaybeUninit}; -use core::ops::{Deref, DerefMut}; -use core::ptr; -use core::slice; - -use crate::guard::Guard; -#[cfg(not(miri))] -use crate::primitive::sync::atomic::AtomicUsize; -use crate::primitive::sync::atomic::{AtomicPtr, Ordering}; -use crossbeam_utils::atomic::AtomicConsume; - -/// Given ordering for the success case in a compare-exchange operation, returns the strongest -/// appropriate ordering for the failure case. -#[cfg(miri)] -#[inline] -fn strongest_failure_ordering(order: Ordering) -> Ordering { - use Ordering::*; - match order { - Relaxed | Release => Relaxed, - Acquire | AcqRel => Acquire, - _ => SeqCst, - } -} - -/// The error returned on failed compare-and-swap operation. -pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer> { - /// The value in the atomic pointer at the time of the failed operation. - pub current: Shared<'g, T>, - - /// The new value, which the operation failed to store. - pub new: P, -} - -impl + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("CompareExchangeError") - .field("current", &self.current) - .field("new", &self.new) - .finish() - } -} - -/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`. -#[inline] -fn low_bits() -> usize { - (1 << T::ALIGN.trailing_zeros()) - 1 -} - -/// Panics if the pointer is not properly unaligned. -#[inline] -fn ensure_aligned(raw: *mut ()) { - assert_eq!(raw as usize & low_bits::(), 0, "unaligned pointer"); -} - -/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`. -/// -/// `tag` is truncated to fit into the unused bits of the pointer to `T`. -#[inline] -fn compose_tag(ptr: *mut (), tag: usize) -> *mut () { - int_to_ptr_with_provenance( - (ptr as usize & !low_bits::()) | (tag & low_bits::()), - ptr, - ) -} - -/// Decomposes a tagged pointer `data` into the pointer and the tag. -#[inline] -fn decompose_tag(ptr: *mut ()) -> (*mut (), usize) { - ( - int_to_ptr_with_provenance(ptr as usize & !low_bits::(), ptr), - ptr as usize & low_bits::(), - ) -} - -// HACK: https://github.com/rust-lang/miri/issues/1866#issuecomment-985802751 -#[inline] -fn int_to_ptr_with_provenance(addr: usize, prov: *mut T) -> *mut T { - let ptr = prov.cast::(); - ptr.wrapping_add(addr.wrapping_sub(ptr as usize)).cast() -} - -/// Types that are pointed to by a single word. -/// -/// In concurrent programming, it is necessary to represent an object within a word because atomic -/// operations (e.g., reads, writes, read-modify-writes) support only single words. This trait -/// qualifies such types that are pointed to by a single word. -/// -/// The trait generalizes `Box` for a sized type `T`. In a box, an object of type `T` is -/// allocated in heap and it is owned by a single-word pointer. This trait is also implemented for -/// `[MaybeUninit]` by storing its size along with its elements and pointing to the pair of array -/// size and elements. -/// -/// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`]. In -/// particular, Crossbeam supports dynamically sized slices as follows. -/// -/// ``` -/// use std::mem::MaybeUninit; -/// use crossbeam_epoch::Owned; -/// -/// let o = Owned::<[MaybeUninit]>::init(10); // allocating [i32; 10] -/// ``` -pub trait Pointable { - /// The alignment of pointer. - const ALIGN: usize; - - /// The type for initializers. - type Init; - - /// Initializes a with the given initializer. - /// - /// # Safety - /// - /// The result should be a multiple of `ALIGN`. - unsafe fn init(init: Self::Init) -> *mut (); - - /// Dereferences the given pointer. - /// - /// # Safety - /// - /// - The given `ptr` should have been initialized with [`Pointable::init`]. - /// - `ptr` should not have yet been dropped by [`Pointable::drop`]. - /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently. - unsafe fn deref<'a>(ptr: *mut ()) -> &'a Self; - - /// Mutably dereferences the given pointer. - /// - /// # Safety - /// - /// - The given `ptr` should have been initialized with [`Pointable::init`]. - /// - `ptr` should not have yet been dropped by [`Pointable::drop`]. - /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`] - /// concurrently. - unsafe fn deref_mut<'a>(ptr: *mut ()) -> &'a mut Self; - - /// Drops the object pointed to by the given pointer. - /// - /// # Safety - /// - /// - The given `ptr` should have been initialized with [`Pointable::init`]. - /// - `ptr` should not have yet been dropped by [`Pointable::drop`]. - /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`] - /// concurrently. - unsafe fn drop(ptr: *mut ()); -} - -impl Pointable for T { - const ALIGN: usize = mem::align_of::(); - - type Init = T; - - unsafe fn init(init: Self::Init) -> *mut () { - Box::into_raw(Box::new(init)).cast::<()>() - } - - unsafe fn deref<'a>(ptr: *mut ()) -> &'a Self { - unsafe { &*(ptr as *const T) } - } - - unsafe fn deref_mut<'a>(ptr: *mut ()) -> &'a mut Self { - unsafe { &mut *ptr.cast::() } - } - - unsafe fn drop(ptr: *mut ()) { - drop(unsafe { Box::from_raw(ptr.cast::()) }); - } -} - -/// Array with size. -/// -/// # Memory layout -/// -/// An array consisting of size and elements: -/// -/// ```text -/// elements -/// | -/// | -/// ------------------------------------ -/// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 | -/// ------------------------------------ -/// ``` -/// -/// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not -/// along with pointer as in `Box<[T]>`). -/// -/// Elements are not present in the type, but they will be in the allocation. -/// ``` -#[repr(C)] -struct Array { - /// The number of elements (not the number of bytes). - len: usize, - elements: [MaybeUninit; 0], -} - -impl Array { - fn layout(len: usize) -> Layout { - Layout::new::() - .extend(Layout::array::>(len).unwrap()) - .unwrap() - .0 - .pad_to_align() - } -} - -impl Pointable for [MaybeUninit] { - const ALIGN: usize = mem::align_of::>(); - - type Init = usize; - - unsafe fn init(len: Self::Init) -> *mut () { - let layout = Array::::layout(len); - unsafe { - let ptr = alloc::alloc::alloc(layout).cast::>(); - if ptr.is_null() { - alloc::alloc::handle_alloc_error(layout); - } - ptr::addr_of_mut!((*ptr).len).write(len); - ptr.cast::<()>() - } - } - - unsafe fn deref<'a>(ptr: *mut ()) -> &'a Self { - unsafe { - let array = &*(ptr as *const Array); - slice::from_raw_parts(array.elements.as_ptr(), array.len) - } - } - - unsafe fn deref_mut<'a>(ptr: *mut ()) -> &'a mut Self { - unsafe { - let array = &mut *ptr.cast::>(); - slice::from_raw_parts_mut(array.elements.as_mut_ptr(), array.len) - } - } - - unsafe fn drop(ptr: *mut ()) { - unsafe { - let len = (*ptr.cast::>()).len; - let layout = Array::::layout(len); - alloc::alloc::dealloc(ptr.cast::(), layout); - } - } -} - -/// An atomic pointer that can be safely shared between threads. -/// -/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused -/// least significant bits of the address. For example, the tag for a pointer to a sized type `T` -/// should be less than `(1 << mem::align_of::().trailing_zeros())`. -/// -/// Any method that loads the pointer must be passed a reference to a [`Guard`]. -/// -/// Crossbeam supports dynamically sized types. See [`Pointable`] for details. -pub struct Atomic { - data: AtomicPtr<()>, - _marker: PhantomData<*mut T>, -} - -unsafe impl Send for Atomic {} -unsafe impl Sync for Atomic {} - -impl Atomic { - /// Allocates `value` on the heap and returns a new atomic pointer pointing to it. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::Atomic; - /// - /// let a = Atomic::new(1234); - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub fn new(init: T) -> Self { - Self::init(init) - } -} - -impl Atomic { - /// Allocates `value` on the heap and returns a new atomic pointer pointing to it. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::Atomic; - /// - /// let a = Atomic::::init(1234); - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub fn init(init: T::Init) -> Self { - Self::from(Owned::init(init)) - } - - /// Returns a new atomic pointer pointing to the tagged pointer `data`. - fn from_ptr(data: *mut ()) -> Self { - Self { - data: AtomicPtr::new(data), - _marker: PhantomData, - } - } - - /// Returns a new null atomic pointer. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::Atomic; - /// - /// let a = Atomic::::null(); - /// ``` - #[cfg(not(crossbeam_loom))] - pub const fn null() -> Self { - Self { - data: AtomicPtr::new(ptr::null_mut()), - _marker: PhantomData, - } - } - /// Returns a new null atomic pointer. - #[cfg(crossbeam_loom)] - pub fn null() -> Self { - Self { - data: AtomicPtr::new(ptr::null_mut()), - _marker: PhantomData, - } - } - - /// Loads a `Shared` from the atomic pointer. - /// - /// This method takes an [`Ordering`] argument which describes the memory ordering of this - /// operation. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(1234); - /// let guard = &epoch::pin(); - /// let p = a.load(SeqCst, guard); - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub fn load<'g>(&self, order: Ordering, _: &'g Guard) -> Shared<'g, T> { - unsafe { Shared::from_ptr(self.data.load(order)) } - } - - /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering. - /// - /// This is similar to the "acquire" ordering, except that an ordering is - /// only guaranteed with operations that "depend on" the result of the load. - /// However consume loads are usually much faster than acquire loads on - /// architectures with a weak memory model since they don't require memory - /// fence instructions. - /// - /// The exact definition of "depend on" is a bit vague, but it works as you - /// would expect in practice since a lot of software, especially the Linux - /// kernel, rely on this behavior. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic}; - /// - /// let a = Atomic::new(1234); - /// let guard = &epoch::pin(); - /// let p = a.load_consume(guard); - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> { - unsafe { Shared::from_ptr(self.data.load_consume()) } - } - - /// Stores a `Shared` or `Owned` pointer into the atomic pointer. - /// - /// This method takes an [`Ordering`] argument which describes the memory ordering of this - /// operation. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{Atomic, Owned, Shared}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(1234); - /// # unsafe { drop(a.load(SeqCst, &crossbeam_epoch::pin()).into_owned()); } // avoid leak - /// a.store(Shared::null(), SeqCst); - /// a.store(Owned::new(1234), SeqCst); - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub fn store>(&self, new: P, order: Ordering) { - self.data.store(new.into_ptr(), order); - } - - /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous - /// `Shared`. - /// - /// This method takes an [`Ordering`] argument which describes the memory ordering of this - /// operation. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(1234); - /// let guard = &epoch::pin(); - /// let p = a.swap(Shared::null(), SeqCst, guard); - /// # unsafe { drop(p.into_owned()); } // avoid leak - /// ``` - pub fn swap<'g, P: Pointer>(&self, new: P, order: Ordering, _: &'g Guard) -> Shared<'g, T> { - unsafe { Shared::from_ptr(self.data.swap(new.into_ptr(), order)) } - } - - /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current - /// value is the same as `current`. The tag is also taken into account, so two pointers to the - /// same object, but with different tags, will not be considered equal. - /// - /// The return value is a result indicating whether the new pointer was written. On success the - /// pointer that was written is returned. On failure the actual current value and `new` are - /// returned. - /// - /// This method takes two `Ordering` arguments to describe the memory - /// ordering of this operation. `success` describes the required ordering for the - /// read-modify-write operation that takes place if the comparison with `current` succeeds. - /// `failure` describes the required ordering for the load operation that takes place when - /// the comparison fails. Using `Acquire` as success ordering makes the store part - /// of this operation `Relaxed`, and using `Release` makes the successful load - /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed` - /// and must be equivalent to or weaker than the success ordering. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(1234); - /// - /// let guard = &epoch::pin(); - /// let curr = a.load(SeqCst, guard); - /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard); - /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard); - /// # unsafe { drop(curr.into_owned()); } // avoid leak - /// ``` - pub fn compare_exchange<'g, P>( - &self, - current: Shared<'_, T>, - new: P, - success: Ordering, - failure: Ordering, - _: &'g Guard, - ) -> Result, CompareExchangeError<'g, T, P>> - where - P: Pointer, - { - let new = new.into_ptr(); - self.data - .compare_exchange(current.into_ptr(), new, success, failure) - .map(|_| unsafe { Shared::from_ptr(new) }) - .map_err(|current| unsafe { - CompareExchangeError { - current: Shared::from_ptr(current), - new: P::from_ptr(new), - } - }) - } - - /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current - /// value is the same as `current`. The tag is also taken into account, so two pointers to the - /// same object, but with different tags, will not be considered equal. - /// - /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison - /// succeeds, which can result in more efficient code on some platforms. The return value is a - /// result indicating whether the new pointer was written. On success the pointer that was - /// written is returned. On failure the actual current value and `new` are returned. - /// - /// This method takes two `Ordering` arguments to describe the memory - /// ordering of this operation. `success` describes the required ordering for the - /// read-modify-write operation that takes place if the comparison with `current` succeeds. - /// `failure` describes the required ordering for the load operation that takes place when - /// the comparison fails. Using `Acquire` as success ordering makes the store part - /// of this operation `Relaxed`, and using `Release` makes the successful load - /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed` - /// and must be equivalent to or weaker than the success ordering. - /// - /// [`compare_exchange`]: Atomic::compare_exchange - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(1234); - /// let guard = &epoch::pin(); - /// - /// let mut new = Owned::new(5678); - /// let mut ptr = a.load(SeqCst, guard); - /// # unsafe { drop(a.load(SeqCst, guard).into_owned()); } // avoid leak - /// loop { - /// match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) { - /// Ok(p) => { - /// ptr = p; - /// break; - /// } - /// Err(err) => { - /// ptr = err.current; - /// new = err.new; - /// } - /// } - /// } - /// - /// let mut curr = a.load(SeqCst, guard); - /// loop { - /// match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) { - /// Ok(_) => break, - /// Err(err) => curr = err.current, - /// } - /// } - /// # unsafe { drop(curr.into_owned()); } // avoid leak - /// ``` - pub fn compare_exchange_weak<'g, P>( - &self, - current: Shared<'_, T>, - new: P, - success: Ordering, - failure: Ordering, - _: &'g Guard, - ) -> Result, CompareExchangeError<'g, T, P>> - where - P: Pointer, - { - let new = new.into_ptr(); - self.data - .compare_exchange_weak(current.into_ptr(), new, success, failure) - .map(|_| unsafe { Shared::from_ptr(new) }) - .map_err(|current| unsafe { - CompareExchangeError { - current: Shared::from_ptr(current), - new: P::from_ptr(new), - } - }) - } - - /// Fetches the pointer, and then applies a function to it that returns a new value. - /// Returns a `Result` of `Ok(previous_value)` if the function returned `Some`, else `Err(_)`. - /// - /// Note that the given function may be called multiple times if the value has been changed by - /// other threads in the meantime, as long as the function returns `Some(_)`, but the function - /// will have been applied only once to the stored value. - /// - /// `fetch_update` takes two [`Ordering`] arguments to describe the memory - /// ordering of this operation. The first describes the required ordering for - /// when the operation finally succeeds while the second describes the - /// required ordering for loads. These correspond to the success and failure - /// orderings of [`Atomic::compare_exchange`] respectively. - /// - /// Using [`Acquire`] as success ordering makes the store part of this - /// operation [`Relaxed`], and using [`Release`] makes the final successful - /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], - /// [`Acquire`] or [`Relaxed`] and must be equivalent to or weaker than the - /// success ordering. - /// - /// [`Relaxed`]: Ordering::Relaxed - /// [`Acquire`]: Ordering::Acquire - /// [`Release`]: Ordering::Release - /// [`SeqCst`]: Ordering::SeqCst - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(1234); - /// let guard = &epoch::pin(); - /// - /// let res1 = a.fetch_update(SeqCst, SeqCst, guard, |x| Some(x.with_tag(1))); - /// assert!(res1.is_ok()); - /// - /// let res2 = a.fetch_update(SeqCst, SeqCst, guard, |x| None); - /// assert!(res2.is_err()); - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub fn fetch_update<'g, F>( - &self, - set_order: Ordering, - fail_order: Ordering, - guard: &'g Guard, - mut func: F, - ) -> Result, Shared<'g, T>> - where - F: FnMut(Shared<'g, T>) -> Option>, - { - let mut prev = self.load(fail_order, guard); - while let Some(next) = func(prev) { - match self.compare_exchange_weak(prev, next, set_order, fail_order, guard) { - Ok(shared) => return Ok(shared), - Err(next_prev) => prev = next_prev.current, - } - } - Err(prev) - } - - /// Bitwise "and" with the current tag. - /// - /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the - /// new tag to the result. Returns the previous pointer. - /// - /// This method takes an [`Ordering`] argument which describes the memory ordering of this - /// operation. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::::from(Shared::null().with_tag(3)); - /// let guard = &epoch::pin(); - /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3); - /// assert_eq!(a.load(SeqCst, guard).tag(), 2); - /// ``` - pub fn fetch_and<'g>(&self, val: usize, order: Ordering, _: &'g Guard) -> Shared<'g, T> { - // Ideally, we would always use AtomicPtr::fetch_* since it is strict-provenance - // compatible, but it is unstable. So, for now emulate it only on cfg(miri). - // Code using AtomicUsize::fetch_* via casts is still permissive-provenance - // compatible and is sound. - // TODO: Once `#![feature(strict_provenance_atomic_ptr)]` is stabilized, - // use AtomicPtr::fetch_* in all cases from the version in which it is stabilized. - #[cfg(miri)] - unsafe { - let val = val | !low_bits::(); - let fetch_order = strongest_failure_ordering(order); - Shared::from_ptr( - self.data - .fetch_update(order, fetch_order, |x| { - Some(int_to_ptr_with_provenance(x as usize & val, x)) - }) - .unwrap(), - ) - } - #[cfg(not(miri))] - unsafe { - Shared::from_ptr( - (*(&self.data as *const AtomicPtr<_> as *const AtomicUsize)) - .fetch_and(val | !low_bits::(), order) as *mut (), - ) - } - } - - /// Bitwise "or" with the current tag. - /// - /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the - /// new tag to the result. Returns the previous pointer. - /// - /// This method takes an [`Ordering`] argument which describes the memory ordering of this - /// operation. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::::from(Shared::null().with_tag(1)); - /// let guard = &epoch::pin(); - /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1); - /// assert_eq!(a.load(SeqCst, guard).tag(), 3); - /// ``` - pub fn fetch_or<'g>(&self, val: usize, order: Ordering, _: &'g Guard) -> Shared<'g, T> { - // Ideally, we would always use AtomicPtr::fetch_* since it is strict-provenance - // compatible, but it is unstable. So, for now emulate it only on cfg(miri). - // Code using AtomicUsize::fetch_* via casts is still permissive-provenance - // compatible and is sound. - // TODO: Once `#![feature(strict_provenance_atomic_ptr)]` is stabilized, - // use AtomicPtr::fetch_* in all cases from the version in which it is stabilized. - #[cfg(miri)] - unsafe { - let val = val & low_bits::(); - let fetch_order = strongest_failure_ordering(order); - Shared::from_ptr( - self.data - .fetch_update(order, fetch_order, |x| { - Some(int_to_ptr_with_provenance(x as usize | val, x)) - }) - .unwrap(), - ) - } - #[cfg(not(miri))] - unsafe { - Shared::from_ptr( - (*(&self.data as *const AtomicPtr<_> as *const AtomicUsize)) - .fetch_or(val & low_bits::(), order) as *mut (), - ) - } - } - - /// Bitwise "xor" with the current tag. - /// - /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the - /// new tag to the result. Returns the previous pointer. - /// - /// This method takes an [`Ordering`] argument which describes the memory ordering of this - /// operation. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Shared}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::::from(Shared::null().with_tag(1)); - /// let guard = &epoch::pin(); - /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1); - /// assert_eq!(a.load(SeqCst, guard).tag(), 2); - /// ``` - pub fn fetch_xor<'g>(&self, val: usize, order: Ordering, _: &'g Guard) -> Shared<'g, T> { - // Ideally, we would always use AtomicPtr::fetch_* since it is strict-provenance - // compatible, but it is unstable. So, for now emulate it only on cfg(miri). - // Code using AtomicUsize::fetch_* via casts is still permissive-provenance - // compatible and is sound. - // TODO: Once `#![feature(strict_provenance_atomic_ptr)]` is stabilized, - // use AtomicPtr::fetch_* in all cases from the version in which it is stabilized. - #[cfg(miri)] - unsafe { - let val = val & low_bits::(); - let fetch_order = strongest_failure_ordering(order); - Shared::from_ptr( - self.data - .fetch_update(order, fetch_order, |x| { - Some(int_to_ptr_with_provenance(x as usize ^ val, x)) - }) - .unwrap(), - ) - } - #[cfg(not(miri))] - unsafe { - Shared::from_ptr( - (*(&self.data as *const AtomicPtr<_> as *const AtomicUsize)) - .fetch_xor(val & low_bits::(), order) as *mut (), - ) - } - } - - /// Takes ownership of the pointee. - /// - /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a - /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for - /// destructors of data structures. - /// - /// # Panics - /// - /// Panics if this pointer is null, but only in debug mode. - /// - /// # Safety - /// - /// This method may be called only if the pointer is valid and nobody else is holding a - /// reference to the same object. - /// - /// # Examples - /// - /// ```rust - /// # use std::mem; - /// # use crossbeam_epoch::Atomic; - /// struct DataStructure { - /// ptr: Atomic, - /// } - /// - /// impl Drop for DataStructure { - /// fn drop(&mut self) { - /// // By now the DataStructure lives only in our thread and we are sure we don't hold - /// // any Shared or & to it ourselves. - /// unsafe { - /// drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned()); - /// } - /// } - /// } - /// ``` - pub unsafe fn into_owned(self) -> Owned { - unsafe { Owned::from_ptr(self.data.into_inner()) } - } - - /// Takes ownership of the pointee if it is non-null. - /// - /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a - /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for - /// destructors of data structures. - /// - /// # Safety - /// - /// This method may be called only if the pointer is valid and nobody else is holding a - /// reference to the same object, or the pointer is null. - /// - /// # Examples - /// - /// ```rust - /// # use std::mem; - /// # use crossbeam_epoch::Atomic; - /// struct DataStructure { - /// ptr: Atomic, - /// } - /// - /// impl Drop for DataStructure { - /// fn drop(&mut self) { - /// // By now the DataStructure lives only in our thread and we are sure we don't hold - /// // any Shared or & to it ourselves, but it may be null, so we have to be careful. - /// let old = mem::replace(&mut self.ptr, Atomic::null()); - /// unsafe { - /// if let Some(x) = old.try_into_owned() { - /// drop(x) - /// } - /// } - /// } - /// } - /// ``` - pub unsafe fn try_into_owned(self) -> Option> { - let data = self.data.into_inner(); - if decompose_tag::(data).0.is_null() { - None - } else { - Some(unsafe { Owned::from_ptr(data) }) - } - } -} - -impl fmt::Debug for Atomic { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let data = self.data.load(Ordering::SeqCst); - let (raw, tag) = decompose_tag::(data); - - f.debug_struct("Atomic") - .field("raw", &raw) - .field("tag", &tag) - .finish() - } -} - -impl fmt::Pointer for Atomic { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let data = self.data.load(Ordering::SeqCst); - let (raw, _) = decompose_tag::(data); - fmt::Pointer::fmt(&(unsafe { T::deref(raw) as *const _ }), f) - } -} - -impl Clone for Atomic { - /// Returns a copy of the atomic value. - /// - /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other - /// atomics or fences. - fn clone(&self) -> Self { - let data = self.data.load(Ordering::Relaxed); - Self::from_ptr(data) - } -} - -impl Default for Atomic { - fn default() -> Self { - Self::null() - } -} - -impl From> for Atomic { - /// Returns a new atomic pointer pointing to `owned`. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{Atomic, Owned}; - /// - /// let a = Atomic::::from(Owned::new(1234)); - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - fn from(owned: Owned) -> Self { - let data = owned.data; - mem::forget(owned); - Self::from_ptr(data) - } -} - -impl From> for Atomic { - fn from(b: Box) -> Self { - Self::from(Owned::from(b)) - } -} - -impl From for Atomic { - fn from(t: T) -> Self { - Self::new(t) - } -} - -impl<'g, T: ?Sized + Pointable> From> for Atomic { - /// Returns a new atomic pointer pointing to `ptr`. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{Atomic, Shared}; - /// - /// let a = Atomic::::from(Shared::::null()); - /// ``` - fn from(ptr: Shared<'g, T>) -> Self { - Self::from_ptr(ptr.data) - } -} - -impl From<*const T> for Atomic { - /// Returns a new atomic pointer pointing to `raw`. - /// - /// # Examples - /// - /// ``` - /// use std::ptr; - /// use crossbeam_epoch::Atomic; - /// - /// let a = Atomic::::from(ptr::null::()); - /// ``` - fn from(raw: *const T) -> Self { - Self::from_ptr(raw as *mut ()) - } -} - -/// A trait for either `Owned` or `Shared` pointers. -/// -/// This trait is sealed and cannot be implemented for types outside of `crossbeam-epoch`. -pub trait Pointer: crate::sealed::Sealed { - /// Returns the machine representation of the pointer. - fn into_ptr(self) -> *mut (); - - /// Returns a new pointer pointing to the tagged pointer `data`. - /// - /// # Safety - /// - /// The given `data` should have been created by `Pointer::into_ptr()`, and one `data` should - /// not be converted back by `Pointer::from_ptr()` multiple times. - unsafe fn from_ptr(data: *mut ()) -> Self; -} - -/// An owned heap-allocated object. -/// -/// This type is very similar to `Box`. -/// -/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused -/// least significant bits of the address. -pub struct Owned { - data: *mut (), - _marker: PhantomData>, -} - -impl crate::sealed::Sealed for Owned {} -impl Pointer for Owned { - #[inline] - fn into_ptr(self) -> *mut () { - let data = self.data; - mem::forget(self); - data - } - - /// Returns a new pointer pointing to the tagged pointer `data`. - /// - /// # Panics - /// - /// Panics if the pointer is null, but only in debug mode. - #[inline] - unsafe fn from_ptr(data: *mut ()) -> Self { - debug_assert!(!data.is_null(), "converting null into `Owned`"); - Self { - data, - _marker: PhantomData, - } - } -} - -impl Owned { - /// Returns a new owned pointer pointing to `raw`. - /// - /// This function is unsafe because improper use may lead to memory problems. Argument `raw` - /// must be a valid pointer. Also, a double-free may occur if the function is called twice on - /// the same raw pointer. - /// - /// # Panics - /// - /// Panics if `raw` is not properly aligned. - /// - /// # Safety - /// - /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted - /// back by `Owned::from_raw()` multiple times. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::Owned; - /// - /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; - /// ``` - pub unsafe fn from_raw(raw: *mut T) -> Self { - let raw = raw.cast::<()>(); - ensure_aligned::(raw); - unsafe { Self::from_ptr(raw) } - } - - /// Converts the owned pointer into a `Box`. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::Owned; - /// - /// let o = Owned::new(1234); - /// let b: Box = o.into_box(); - /// assert_eq!(*b, 1234); - /// ``` - pub fn into_box(self) -> Box { - let (raw, _) = decompose_tag::(self.data); - mem::forget(self); - unsafe { Box::from_raw(raw.cast::()) } - } - - /// Allocates `value` on the heap and returns a new owned pointer pointing to it. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::Owned; - /// - /// let o = Owned::new(1234); - /// ``` - pub fn new(init: T) -> Self { - Self::init(init) - } -} - -impl Owned { - /// Allocates `value` on the heap and returns a new owned pointer pointing to it. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::Owned; - /// - /// let o = Owned::::init(1234); - /// ``` - pub fn init(init: T::Init) -> Self { - unsafe { Self::from_ptr(T::init(init)) } - } - - /// Converts the owned pointer into a [`Shared`]. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Owned}; - /// - /// let o = Owned::new(1234); - /// let guard = &epoch::pin(); - /// let p = o.into_shared(guard); - /// # unsafe { drop(p.into_owned()); } // avoid leak - /// ``` - #[allow(clippy::needless_lifetimes)] - pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> { - unsafe { Shared::from_ptr(self.into_ptr()) } - } - - /// Returns the tag stored within the pointer. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::Owned; - /// - /// assert_eq!(Owned::new(1234).tag(), 0); - /// ``` - pub fn tag(&self) -> usize { - let (_, tag) = decompose_tag::(self.data); - tag - } - - /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the - /// unused bits of the pointer to `T`. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::Owned; - /// - /// let o = Owned::new(0u64); - /// assert_eq!(o.tag(), 0); - /// let o = o.with_tag(2); - /// assert_eq!(o.tag(), 2); - /// ``` - pub fn with_tag(self, tag: usize) -> Self { - let data = self.into_ptr(); - unsafe { Self::from_ptr(compose_tag::(data, tag)) } - } -} - -impl Drop for Owned { - fn drop(&mut self) { - let (raw, _) = decompose_tag::(self.data); - unsafe { - T::drop(raw); - } - } -} - -impl fmt::Debug for Owned { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let (raw, tag) = decompose_tag::(self.data); - - f.debug_struct("Owned") - .field("raw", &raw) - .field("tag", &tag) - .finish() - } -} - -impl Clone for Owned { - fn clone(&self) -> Self { - Self::new((**self).clone()).with_tag(self.tag()) - } -} - -impl Deref for Owned { - type Target = T; - - fn deref(&self) -> &T { - let (raw, _) = decompose_tag::(self.data); - unsafe { T::deref(raw) } - } -} - -impl DerefMut for Owned { - fn deref_mut(&mut self) -> &mut T { - let (raw, _) = decompose_tag::(self.data); - unsafe { T::deref_mut(raw) } - } -} - -impl From for Owned { - fn from(t: T) -> Self { - Self::new(t) - } -} - -impl From> for Owned { - /// Returns a new owned pointer pointing to `b`. - /// - /// # Panics - /// - /// Panics if the pointer (the `Box`) is not properly aligned. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::Owned; - /// - /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) }; - /// ``` - fn from(b: Box) -> Self { - unsafe { Self::from_raw(Box::into_raw(b)) } - } -} - -impl Borrow for Owned { - fn borrow(&self) -> &T { - self.deref() - } -} - -impl BorrowMut for Owned { - fn borrow_mut(&mut self) -> &mut T { - self.deref_mut() - } -} - -impl AsRef for Owned { - fn as_ref(&self) -> &T { - self.deref() - } -} - -impl AsMut for Owned { - fn as_mut(&mut self) -> &mut T { - self.deref_mut() - } -} - -/// A pointer to an object protected by the epoch GC. -/// -/// The pointer is valid for use only during the lifetime `'g`. -/// -/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused -/// least significant bits of the address. -pub struct Shared<'g, T: 'g + ?Sized + Pointable> { - data: *mut (), - _marker: PhantomData<(&'g (), *const T)>, -} - -impl Clone for Shared<'_, T> { - fn clone(&self) -> Self { - *self - } -} - -impl Copy for Shared<'_, T> {} - -impl crate::sealed::Sealed for Shared<'_, T> {} -impl Pointer for Shared<'_, T> { - #[inline] - fn into_ptr(self) -> *mut () { - self.data - } - - #[inline] - unsafe fn from_ptr(data: *mut ()) -> Self { - Shared { - data, - _marker: PhantomData, - } - } -} - -impl Shared<'_, T> { - /// Converts the pointer to a raw pointer (without the tag). - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let o = Owned::new(1234); - /// let raw = &*o as *const _; - /// let a = Atomic::from(o); - /// - /// let guard = &epoch::pin(); - /// let p = a.load(SeqCst, guard); - /// assert_eq!(p.as_raw(), raw); - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub fn as_raw(&self) -> *const T { - let (raw, _) = decompose_tag::(self.data); - raw as *const _ - } -} - -impl<'g, T: ?Sized + Pointable> Shared<'g, T> { - /// Returns a new null pointer. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::Shared; - /// - /// let p = Shared::::null(); - /// assert!(p.is_null()); - /// ``` - pub fn null() -> Self { - Self { - data: ptr::null_mut(), - _marker: PhantomData, - } - } - - /// Returns `true` if the pointer is null. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::null(); - /// let guard = &epoch::pin(); - /// assert!(a.load(SeqCst, guard).is_null()); - /// a.store(Owned::new(1234), SeqCst); - /// assert!(!a.load(SeqCst, guard).is_null()); - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub fn is_null(&self) -> bool { - let (raw, _) = decompose_tag::(self.data); - raw.is_null() - } - - /// Dereferences the pointer. - /// - /// Returns a reference to the pointee that is valid during the lifetime `'g`. - /// - /// # Safety - /// - /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. - /// - /// Another concern is the possibility of data races due to lack of proper synchronization. - /// For example, consider the following scenario: - /// - /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` - /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` - /// - /// The problem is that relaxed orderings don't synchronize initialization of the object with - /// the read from the second thread. This is a data race. A possible solution would be to use - /// `Release` and `Acquire` orderings. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(1234); - /// let guard = &epoch::pin(); - /// let p = a.load(SeqCst, guard); - /// unsafe { - /// assert_eq!(p.deref(), &1234); - /// } - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub unsafe fn deref(&self) -> &'g T { - let (raw, _) = decompose_tag::(self.data); - unsafe { T::deref(raw) } - } - - /// Dereferences the pointer. - /// - /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`. - /// - /// # Safety - /// - /// * There is no guarantee that there are no more threads attempting to read/write from/to the - /// actual object at the same time. - /// - /// The user must know that there are no concurrent accesses towards the object itself. - /// - /// * Other than the above, all safety concerns of `deref()` applies here. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(vec![1, 2, 3, 4]); - /// let guard = &epoch::pin(); - /// - /// let mut p = a.load(SeqCst, guard); - /// unsafe { - /// assert!(!p.is_null()); - /// let b = p.deref_mut(); - /// assert_eq!(b, &vec![1, 2, 3, 4]); - /// b.push(5); - /// assert_eq!(b, &vec![1, 2, 3, 4, 5]); - /// } - /// - /// let p = a.load(SeqCst, guard); - /// unsafe { - /// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]); - /// } - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub unsafe fn deref_mut(&mut self) -> &'g mut T { - let (raw, _) = decompose_tag::(self.data); - unsafe { T::deref_mut(raw) } - } - - /// Converts the pointer to a reference. - /// - /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`. - /// - /// # Safety - /// - /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory. - /// - /// Another concern is the possibility of data races due to lack of proper synchronization. - /// For example, consider the following scenario: - /// - /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)` - /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()` - /// - /// The problem is that relaxed orderings don't synchronize initialization of the object with - /// the read from the second thread. This is a data race. A possible solution would be to use - /// `Release` and `Acquire` orderings. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(1234); - /// let guard = &epoch::pin(); - /// let p = a.load(SeqCst, guard); - /// unsafe { - /// assert_eq!(p.as_ref(), Some(&1234)); - /// } - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub unsafe fn as_ref(&self) -> Option<&'g T> { - let (raw, _) = decompose_tag::(self.data); - if raw.is_null() { - None - } else { - Some(unsafe { T::deref(raw) }) - } - } - - /// Takes ownership of the pointee. - /// - /// # Panics - /// - /// Panics if this pointer is null, but only in debug mode. - /// - /// # Safety - /// - /// This method may be called only if the pointer is valid and nobody else is holding a - /// reference to the same object. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(1234); - /// unsafe { - /// let guard = &epoch::unprotected(); - /// let p = a.load(SeqCst, guard); - /// drop(p.into_owned()); - /// } - /// ``` - pub unsafe fn into_owned(self) -> Owned { - debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`"); - unsafe { Owned::from_ptr(self.data) } - } - - /// Takes ownership of the pointee if it is not null. - /// - /// # Safety - /// - /// This method may be called only if the pointer is valid and nobody else is holding a - /// reference to the same object, or if the pointer is null. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(1234); - /// unsafe { - /// let guard = &epoch::unprotected(); - /// let p = a.load(SeqCst, guard); - /// if let Some(x) = p.try_into_owned() { - /// drop(x); - /// } - /// } - /// ``` - pub unsafe fn try_into_owned(self) -> Option> { - if self.is_null() { - None - } else { - Some(unsafe { Owned::from_ptr(self.data) }) - } - } - - /// Returns the tag stored within the pointer. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::::from(Owned::new(0u64).with_tag(2)); - /// let guard = &epoch::pin(); - /// let p = a.load(SeqCst, guard); - /// assert_eq!(p.tag(), 2); - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub fn tag(&self) -> usize { - let (_, tag) = decompose_tag::(self.data); - tag - } - - /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the - /// unused bits of the pointer to `T`. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(0u64); - /// let guard = &epoch::pin(); - /// let p1 = a.load(SeqCst, guard); - /// let p2 = p1.with_tag(2); - /// - /// assert_eq!(p1.tag(), 0); - /// assert_eq!(p2.tag(), 2); - /// assert_eq!(p1.as_raw(), p2.as_raw()); - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub fn with_tag(&self, tag: usize) -> Shared<'g, T> { - unsafe { Self::from_ptr(compose_tag::(self.data, tag)) } - } -} - -impl From<*const T> for Shared<'_, T> { - /// Returns a new pointer pointing to `raw`. - /// - /// # Panics - /// - /// Panics if `raw` is not properly aligned. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::Shared; - /// - /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _); - /// assert!(!p.is_null()); - /// # unsafe { drop(p.into_owned()); } // avoid leak - /// ``` - fn from(raw: *const T) -> Self { - let raw = raw as *mut (); - ensure_aligned::(raw); - unsafe { Self::from_ptr(raw) } - } -} - -impl<'g, T: ?Sized + Pointable> PartialEq> for Shared<'g, T> { - fn eq(&self, other: &Self) -> bool { - self.data == other.data - } -} - -impl Eq for Shared<'_, T> {} - -impl<'g, T: ?Sized + Pointable> PartialOrd> for Shared<'g, T> { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.data.cmp(&other.data)) - } -} - -impl Ord for Shared<'_, T> { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.data.cmp(&other.data) - } -} - -impl fmt::Debug for Shared<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let (raw, tag) = decompose_tag::(self.data); - - f.debug_struct("Shared") - .field("raw", &raw) - .field("tag", &tag) - .finish() - } -} - -impl fmt::Pointer for Shared<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f) - } -} - -impl Default for Shared<'_, T> { - fn default() -> Self { - Self::null() - } -} - -#[cfg(all(test, not(crossbeam_loom)))] -mod tests { - use super::{Owned, Shared}; - use std::mem::MaybeUninit; - - #[test] - fn valid_tag_i8() { - Shared::::null().with_tag(0); - } - - #[test] - fn valid_tag_i64() { - Shared::::null().with_tag(7); - } - - #[test] - fn const_atomic_null() { - use super::Atomic; - static _U: Atomic = Atomic::::null(); - } - - #[test] - fn array_init() { - let owned = Owned::<[MaybeUninit]>::init(10); - let arr: &[MaybeUninit] = &owned; - assert_eq!(arr.len(), 10); - } -} diff --git a/crossbeam-epoch/src/collector.rs b/crossbeam-epoch/src/collector.rs deleted file mode 100644 index 26136c5e3..000000000 --- a/crossbeam-epoch/src/collector.rs +++ /dev/null @@ -1,466 +0,0 @@ -/// Epoch-based garbage collector. -/// -/// # Examples -/// -/// ``` -/// use crossbeam_epoch::Collector; -/// -/// let collector = Collector::new(); -/// -/// let handle = collector.register(); -/// drop(collector); // `handle` still works after dropping `collector` -/// -/// handle.pin().flush(); -/// ``` -use core::fmt; - -use crate::guard::Guard; -use crate::internal::{Global, Local}; -use crate::primitive::sync::Arc; - -/// An epoch-based garbage collector. -pub struct Collector { - pub(crate) global: Arc, -} - -unsafe impl Send for Collector {} -unsafe impl Sync for Collector {} - -impl Default for Collector { - #[allow(clippy::arc_with_non_send_sync)] // https://github.com/rust-lang/rust-clippy/issues/11382 - fn default() -> Self { - Self { - global: Arc::new(Global::new()), - } - } -} - -impl Collector { - /// Creates a new collector. - pub fn new() -> Self { - Self::default() - } - - /// Registers a new handle for the collector. - pub fn register(&self) -> LocalHandle { - Local::register(self) - } -} - -impl Clone for Collector { - /// Creates another reference to the same garbage collector. - fn clone(&self) -> Self { - Self { - global: self.global.clone(), - } - } -} - -impl fmt::Debug for Collector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Collector { .. }") - } -} - -impl PartialEq for Collector { - /// Checks if both handles point to the same collector. - fn eq(&self, rhs: &Self) -> bool { - Arc::ptr_eq(&self.global, &rhs.global) - } -} -impl Eq for Collector {} - -/// A handle to a garbage collector. -pub struct LocalHandle { - pub(crate) local: *const Local, -} - -impl LocalHandle { - /// Pins the handle. - #[inline] - pub fn pin(&self) -> Guard { - unsafe { (*self.local).pin() } - } - - /// Returns `true` if the handle is pinned. - #[inline] - pub fn is_pinned(&self) -> bool { - unsafe { (*self.local).is_pinned() } - } - - /// Returns the `Collector` associated with this handle. - #[inline] - pub fn collector(&self) -> &Collector { - unsafe { (*self.local).collector() } - } -} - -impl Drop for LocalHandle { - #[inline] - fn drop(&mut self) { - unsafe { - Local::release_handle(&*self.local); - } - } -} - -impl fmt::Debug for LocalHandle { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("LocalHandle { .. }") - } -} - -#[cfg(all(test, not(crossbeam_loom)))] -mod tests { - use std::mem::ManuallyDrop; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::vec::Vec; - - use crossbeam_utils::thread; - - use crate::{Collector, Owned}; - - const NUM_THREADS: usize = 8; - - #[test] - fn pin_reentrant() { - let collector = Collector::new(); - let handle = collector.register(); - drop(collector); - - assert!(!handle.is_pinned()); - { - let _guard = &handle.pin(); - assert!(handle.is_pinned()); - { - let _guard = &handle.pin(); - assert!(handle.is_pinned()); - } - assert!(handle.is_pinned()); - } - assert!(!handle.is_pinned()); - } - - #[test] - fn flush_local_bag() { - let collector = Collector::new(); - let handle = collector.register(); - drop(collector); - - for _ in 0..100 { - let guard = &handle.pin(); - unsafe { - let a = Owned::new(7).into_shared(guard); - guard.defer_destroy(a); - - assert!(!(*guard.local).bag.with(|b| (*b).is_empty())); - - while !(*guard.local).bag.with(|b| (*b).is_empty()) { - guard.flush(); - } - } - } - } - - #[test] - fn garbage_buffering() { - let collector = Collector::new(); - let handle = collector.register(); - drop(collector); - - let guard = &handle.pin(); - unsafe { - for _ in 0..10 { - let a = Owned::new(7).into_shared(guard); - guard.defer_destroy(a); - } - assert!(!(*guard.local).bag.with(|b| (*b).is_empty())); - } - } - - #[test] - fn pin_holds_advance() { - #[cfg(miri)] - const N: usize = 500; - #[cfg(not(miri))] - const N: usize = 500_000; - - let collector = Collector::new(); - - thread::scope(|scope| { - for _ in 0..NUM_THREADS { - scope.spawn(|_| { - let handle = collector.register(); - for _ in 0..N { - let guard = &handle.pin(); - - let before = collector.global.epoch.load(Ordering::Relaxed); - collector.global.collect(guard); - let after = collector.global.epoch.load(Ordering::Relaxed); - - assert!(after.wrapping_sub(before) <= 2); - } - }); - } - }) - .unwrap(); - } - - #[cfg(not(crossbeam_sanitize))] // TODO: assertions failed due to `cfg(crossbeam_sanitize)` reduce `internal::MAX_OBJECTS` - #[test] - fn incremental() { - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - static DESTROYS: AtomicUsize = AtomicUsize::new(0); - - let collector = Collector::new(); - let handle = collector.register(); - - unsafe { - let guard = &handle.pin(); - for _ in 0..COUNT { - let a = Owned::new(7i32).into_shared(guard); - guard.defer_unchecked(move || { - drop(a.into_owned()); - DESTROYS.fetch_add(1, Ordering::Relaxed); - }); - } - guard.flush(); - } - - let mut last = 0; - - while last < COUNT { - let curr = DESTROYS.load(Ordering::Relaxed); - assert!(curr - last <= 1024); - last = curr; - - let guard = &handle.pin(); - collector.global.collect(guard); - } - assert!(DESTROYS.load(Ordering::Relaxed) == COUNT); - } - - #[test] - fn buffering() { - const COUNT: usize = 10; - #[cfg(miri)] - const N: usize = 500; - #[cfg(not(miri))] - const N: usize = 100_000; - static DESTROYS: AtomicUsize = AtomicUsize::new(0); - - let collector = Collector::new(); - let handle = collector.register(); - - unsafe { - let guard = &handle.pin(); - for _ in 0..COUNT { - let a = Owned::new(7i32).into_shared(guard); - guard.defer_unchecked(move || { - drop(a.into_owned()); - DESTROYS.fetch_add(1, Ordering::Relaxed); - }); - } - } - - for _ in 0..N { - collector.global.collect(&handle.pin()); - } - assert!(DESTROYS.load(Ordering::Relaxed) < COUNT); - - handle.pin().flush(); - - while DESTROYS.load(Ordering::Relaxed) < COUNT { - let guard = &handle.pin(); - collector.global.collect(guard); - } - assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); - } - - #[test] - fn count_drops() { - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - static DROPS: AtomicUsize = AtomicUsize::new(0); - - struct Elem(#[allow(dead_code)] i32); - - impl Drop for Elem { - fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::Relaxed); - } - } - - let collector = Collector::new(); - let handle = collector.register(); - - unsafe { - let guard = &handle.pin(); - - for _ in 0..COUNT { - let a = Owned::new(Elem(7i32)).into_shared(guard); - guard.defer_destroy(a); - } - guard.flush(); - } - - while DROPS.load(Ordering::Relaxed) < COUNT { - let guard = &handle.pin(); - collector.global.collect(guard); - } - assert_eq!(DROPS.load(Ordering::Relaxed), COUNT); - } - - #[test] - fn count_destroy() { - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - static DESTROYS: AtomicUsize = AtomicUsize::new(0); - - let collector = Collector::new(); - let handle = collector.register(); - - unsafe { - let guard = &handle.pin(); - - for _ in 0..COUNT { - let a = Owned::new(7i32).into_shared(guard); - guard.defer_unchecked(move || { - drop(a.into_owned()); - DESTROYS.fetch_add(1, Ordering::Relaxed); - }); - } - guard.flush(); - } - - while DESTROYS.load(Ordering::Relaxed) < COUNT { - let guard = &handle.pin(); - collector.global.collect(guard); - } - assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); - } - - #[test] - fn drop_array() { - const COUNT: usize = 700; - static DROPS: AtomicUsize = AtomicUsize::new(0); - - struct Elem(#[allow(dead_code)] i32); - - impl Drop for Elem { - fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::Relaxed); - } - } - - let collector = Collector::new(); - let handle = collector.register(); - - let mut guard = handle.pin(); - - let mut v = Vec::with_capacity(COUNT); - for i in 0..COUNT { - v.push(Elem(i as i32)); - } - - { - let a = Owned::new(v).into_shared(&guard); - unsafe { - guard.defer_destroy(a); - } - guard.flush(); - } - - while DROPS.load(Ordering::Relaxed) < COUNT { - guard.repin(); - collector.global.collect(&guard); - } - assert_eq!(DROPS.load(Ordering::Relaxed), COUNT); - } - - #[test] - fn destroy_array() { - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - static DESTROYS: AtomicUsize = AtomicUsize::new(0); - - let collector = Collector::new(); - let handle = collector.register(); - - unsafe { - let guard = &handle.pin(); - - let mut v = Vec::with_capacity(COUNT); - for i in 0..COUNT { - v.push(i as i32); - } - - let len = v.len(); - let cap = v.capacity(); - let ptr = ManuallyDrop::new(v).as_mut_ptr(); - guard.defer_unchecked(move || { - drop(Vec::from_raw_parts(ptr, len, cap)); - DESTROYS.fetch_add(len, Ordering::Relaxed); - }); - guard.flush(); - } - - while DESTROYS.load(Ordering::Relaxed) < COUNT { - let guard = &handle.pin(); - collector.global.collect(guard); - } - assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT); - } - - #[test] - fn stress() { - const THREADS: usize = 8; - #[cfg(miri)] - const COUNT: usize = 500; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - static DROPS: AtomicUsize = AtomicUsize::new(0); - - struct Elem(#[allow(dead_code)] i32); - - impl Drop for Elem { - fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::Relaxed); - } - } - - let collector = Collector::new(); - - thread::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - let handle = collector.register(); - for _ in 0..COUNT { - let guard = &handle.pin(); - unsafe { - let a = Owned::new(Elem(7i32)).into_shared(guard); - guard.defer_destroy(a); - } - } - }); - } - }) - .unwrap(); - - let handle = collector.register(); - while DROPS.load(Ordering::Relaxed) < COUNT * THREADS { - let guard = &handle.pin(); - collector.global.collect(guard); - } - assert_eq!(DROPS.load(Ordering::Relaxed), COUNT * THREADS); - } -} diff --git a/crossbeam-epoch/src/default.rs b/crossbeam-epoch/src/default.rs deleted file mode 100644 index 14f0a9a6e..000000000 --- a/crossbeam-epoch/src/default.rs +++ /dev/null @@ -1,93 +0,0 @@ -//! The default garbage collector. -//! -//! For each thread, a participant is lazily initialized on its first use, when the current thread -//! is registered in the default collector. If initialized, the thread's participant will get -//! destructed on thread exit, which in turn unregisters the thread. - -use crate::collector::{Collector, LocalHandle}; -use crate::guard::Guard; -use crate::primitive::thread_local; -#[cfg(not(crossbeam_loom))] -use crate::sync::once_lock::OnceLock; - -fn collector() -> &'static Collector { - #[cfg(not(crossbeam_loom))] - { - /// The global data for the default garbage collector. - static COLLECTOR: OnceLock = OnceLock::new(); - COLLECTOR.get_or_init(Collector::new) - } - // FIXME: loom does not currently provide the equivalent of Lazy: - // https://github.com/tokio-rs/loom/issues/263 - #[cfg(crossbeam_loom)] - { - loom::lazy_static! { - /// The global data for the default garbage collector. - static ref COLLECTOR: Collector = Collector::new(); - } - &COLLECTOR - } -} - -thread_local! { - /// The per-thread participant for the default garbage collector. - static HANDLE: LocalHandle = collector().register(); -} - -/// Pins the current thread. -#[inline] -pub fn pin() -> Guard { - with_handle(|handle| handle.pin()) -} - -/// Returns `true` if the current thread is pinned. -#[inline] -pub fn is_pinned() -> bool { - with_handle(|handle| handle.is_pinned()) -} - -/// Returns the default global collector. -pub fn default_collector() -> &'static Collector { - collector() -} - -#[inline] -fn with_handle(mut f: F) -> R -where - F: FnMut(&LocalHandle) -> R, -{ - HANDLE - .try_with(|h| f(h)) - .unwrap_or_else(|_| f(&collector().register())) -} - -#[cfg(all(test, not(crossbeam_loom)))] -mod tests { - use crossbeam_utils::thread; - - #[test] - fn pin_while_exiting() { - struct Foo; - - impl Drop for Foo { - fn drop(&mut self) { - // Pin after `HANDLE` has been dropped. This must not panic. - super::pin(); - } - } - - std::thread_local! { - static FOO: Foo = const { Foo }; - } - - thread::scope(|scope| { - scope.spawn(|_| { - // Initialize `FOO` and then `HANDLE`. - FOO.with(|_| ()); - super::pin(); - // At thread exit, `HANDLE` gets dropped first and `FOO` second. - }); - }) - .unwrap(); - } -} diff --git a/crossbeam-epoch/src/deferred.rs b/crossbeam-epoch/src/deferred.rs deleted file mode 100644 index 6b9ecc656..000000000 --- a/crossbeam-epoch/src/deferred.rs +++ /dev/null @@ -1,149 +0,0 @@ -use alloc::boxed::Box; -use core::fmt; -use core::marker::PhantomData; -use core::mem::{self, MaybeUninit}; -use core::ptr; - -/// Number of words a piece of `Data` can hold. -/// -/// Three words should be enough for the majority of cases. For example, you can fit inside it the -/// function pointer together with a fat pointer representing an object that needs to be destroyed. -const DATA_WORDS: usize = 3; - -/// Some space to keep a `FnOnce()` object on the stack. -type Data = [usize; DATA_WORDS]; - -/// A `FnOnce()` that is stored inline if small, or otherwise boxed on the heap. -/// -/// This is a handy way of keeping an unsized `FnOnce()` within a sized structure. -pub(crate) struct Deferred { - call: unsafe fn(*mut u8), - data: MaybeUninit, - _marker: PhantomData<*mut ()>, // !Send + !Sync -} - -impl fmt::Debug for Deferred { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { - f.pad("Deferred { .. }") - } -} - -impl Deferred { - pub(crate) const NO_OP: Self = { - fn no_op_call(_raw: *mut u8) {} - Self { - call: no_op_call, - data: MaybeUninit::uninit(), - _marker: PhantomData, - } - }; - - /// Constructs a new `Deferred` from a `FnOnce()`. - pub(crate) fn new(f: F) -> Self { - let size = mem::size_of::(); - let align = mem::align_of::(); - - unsafe { - if size <= mem::size_of::() && align <= mem::align_of::() { - let mut data = MaybeUninit::::uninit(); - ptr::write(data.as_mut_ptr().cast::(), f); - - unsafe fn call(raw: *mut u8) { - let f: F = unsafe { ptr::read(raw.cast::()) }; - f(); - } - - Self { - call: call::, - data, - _marker: PhantomData, - } - } else { - let b: Box = Box::new(f); - let mut data = MaybeUninit::::uninit(); - ptr::write(data.as_mut_ptr().cast::>(), b); - - unsafe fn call(raw: *mut u8) { - // It's safe to cast `raw` from `*mut u8` to `*mut Box`, because `raw` is - // originally derived from `*mut Box`. - let b: Box = unsafe { ptr::read(raw.cast::>()) }; - (*b)(); - } - - Self { - call: call::, - data, - _marker: PhantomData, - } - } - } - } - - /// Calls the function. - #[inline] - pub(crate) fn call(mut self) { - let call = self.call; - unsafe { call(self.data.as_mut_ptr().cast::()) }; - } -} - -#[cfg(all(test, not(crossbeam_loom)))] -mod tests { - use super::Deferred; - use std::boxed::Box; - use std::cell::Cell; - use std::convert::identity; - use std::string::ToString; - use std::vec; - - #[test] - fn on_stack() { - let fired = &Cell::new(false); - let a = [0usize; 1]; - - let d = Deferred::new(move || { - let _ = identity(a); - fired.set(true); - }); - - assert!(!fired.get()); - d.call(); - assert!(fired.get()); - } - - #[test] - fn on_heap() { - let fired = &Cell::new(false); - let a = [0usize; 10]; - - let d = Deferred::new(move || { - let _ = identity(a); - fired.set(true); - }); - - assert!(!fired.get()); - d.call(); - assert!(fired.get()); - } - - #[test] - fn string() { - let a = "hello".to_string(); - let d = Deferred::new(move || assert_eq!(a, "hello")); - d.call(); - } - - #[test] - fn boxed_slice_i32() { - let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice(); - let d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7])); - d.call(); - } - - #[test] - fn long_slice_usize() { - let a: [usize; 5] = [2, 3, 5, 7, 11]; - let d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11])); - d.call(); - } -} diff --git a/crossbeam-epoch/src/epoch.rs b/crossbeam-epoch/src/epoch.rs deleted file mode 100644 index 0c1c99dca..000000000 --- a/crossbeam-epoch/src/epoch.rs +++ /dev/null @@ -1,132 +0,0 @@ -//! The global epoch -//! -//! The last bit in this number is unused and is always zero. Every so often the global epoch is -//! incremented, i.e. we say it "advances". A pinned participant may advance the global epoch only -//! if all currently pinned participants have been pinned in the current epoch. -//! -//! If an object became garbage in some epoch, then we can be sure that after two advancements no -//! participant will hold a reference to it. That is the crux of safe memory reclamation. - -use crate::primitive::sync::atomic::{AtomicUsize, Ordering}; - -/// An epoch that can be marked as pinned or unpinned. -/// -/// Internally, the epoch is represented as an integer that wraps around at some unspecified point -/// and a flag that represents whether it is pinned or unpinned. -#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)] -pub(crate) struct Epoch { - /// The least significant bit is set if pinned. The rest of the bits hold the epoch. - data: usize, -} - -impl Epoch { - /// Returns the starting epoch in unpinned state. - #[inline] - pub(crate) fn starting() -> Self { - Self::default() - } - - /// Returns the number of epochs `self` is ahead of `rhs`. - /// - /// Internally, epochs are represented as numbers in the range `(isize::MIN / 2) .. (isize::MAX - /// / 2)`, so the returned distance will be in the same interval. - pub(crate) fn wrapping_sub(self, rhs: Self) -> isize { - // The result is the same with `(self.data & !1).wrapping_sub(rhs.data & !1) as isize >> 1`, - // because the possible difference of LSB in `(self.data & !1).wrapping_sub(rhs.data & !1)` - // will be ignored in the shift operation. - self.data.wrapping_sub(rhs.data & !1) as isize >> 1 - } - - /// Returns `true` if the epoch is marked as pinned. - #[inline] - pub(crate) fn is_pinned(self) -> bool { - (self.data & 1) == 1 - } - - /// Returns the same epoch, but marked as pinned. - #[inline] - pub(crate) fn pinned(self) -> Self { - Self { - data: self.data | 1, - } - } - - /// Returns the same epoch, but marked as unpinned. - #[inline] - pub(crate) fn unpinned(self) -> Self { - Self { - data: self.data & !1, - } - } - - /// Returns the successor epoch. - /// - /// The returned epoch will be marked as pinned only if the previous one was as well. - #[inline] - pub(crate) fn successor(self) -> Self { - Self { - data: self.data.wrapping_add(2), - } - } -} - -/// An atomic value that holds an `Epoch`. -#[derive(Default, Debug)] -pub(crate) struct AtomicEpoch { - /// Since `Epoch` is just a wrapper around `usize`, an `AtomicEpoch` is similarly represented - /// using an `AtomicUsize`. - data: AtomicUsize, -} - -impl AtomicEpoch { - /// Creates a new atomic epoch. - #[inline] - pub(crate) fn new(epoch: Epoch) -> Self { - let data = AtomicUsize::new(epoch.data); - Self { data } - } - - /// Loads a value from the atomic epoch. - #[inline] - pub(crate) fn load(&self, ord: Ordering) -> Epoch { - Epoch { - data: self.data.load(ord), - } - } - - /// Stores a value into the atomic epoch. - #[inline] - pub(crate) fn store(&self, epoch: Epoch, ord: Ordering) { - self.data.store(epoch.data, ord); - } - - /// Stores a value into the atomic epoch if the current value is the same as `current`. - /// - /// The return value is a result indicating whether the new value was written and containing - /// the previous value. On success this value is guaranteed to be equal to `current`. - /// - /// This method takes two `Ordering` arguments to describe the memory - /// ordering of this operation. `success` describes the required ordering for the - /// read-modify-write operation that takes place if the comparison with `current` succeeds. - /// `failure` describes the required ordering for the load operation that takes place when - /// the comparison fails. Using `Acquire` as success ordering makes the store part - /// of this operation `Relaxed`, and using `Release` makes the successful load - /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed` - /// and must be equivalent to or weaker than the success ordering. - #[inline] - pub(crate) fn compare_exchange( - &self, - current: Epoch, - new: Epoch, - success: Ordering, - failure: Ordering, - ) -> Result { - match self - .data - .compare_exchange(current.data, new.data, success, failure) - { - Ok(data) => Ok(Epoch { data }), - Err(data) => Err(Epoch { data }), - } - } -} diff --git a/crossbeam-epoch/src/guard.rs b/crossbeam-epoch/src/guard.rs deleted file mode 100644 index 57224e977..000000000 --- a/crossbeam-epoch/src/guard.rs +++ /dev/null @@ -1,525 +0,0 @@ -use core::fmt; -use core::mem; - -use crate::atomic::Shared; -use crate::collector::Collector; -use crate::deferred::Deferred; -use crate::internal::Local; - -/// A guard that keeps the current thread pinned. -/// -/// # Pinning -/// -/// The current thread is pinned by calling [`pin`], which returns a new guard: -/// -/// ``` -/// use crossbeam_epoch as epoch; -/// -/// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference. -/// // This is not really necessary, but makes passing references to the guard a bit easier. -/// let guard = &epoch::pin(); -/// ``` -/// -/// When a guard gets dropped, the current thread is automatically unpinned. -/// -/// # Pointers on the stack -/// -/// Having a guard allows us to create pointers on the stack to heap-allocated objects. -/// For example: -/// -/// ``` -/// use crossbeam_epoch::{self as epoch, Atomic}; -/// use std::sync::atomic::Ordering::SeqCst; -/// -/// // Create a heap-allocated number. -/// let a = Atomic::new(777); -/// -/// // Pin the current thread. -/// let guard = &epoch::pin(); -/// -/// // Load the heap-allocated object and create pointer `p` on the stack. -/// let p = a.load(SeqCst, guard); -/// -/// // Dereference the pointer and print the value: -/// if let Some(num) = unsafe { p.as_ref() } { -/// println!("The number is {}.", num); -/// } -/// # unsafe { drop(a.into_owned()); } // avoid leak -/// ``` -/// -/// # Multiple guards -/// -/// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the -/// thread will actually be pinned only when the first guard is created and unpinned when the last -/// one is dropped: -/// -/// ``` -/// use crossbeam_epoch as epoch; -/// -/// let guard1 = epoch::pin(); -/// let guard2 = epoch::pin(); -/// assert!(epoch::is_pinned()); -/// drop(guard1); -/// assert!(epoch::is_pinned()); -/// drop(guard2); -/// assert!(!epoch::is_pinned()); -/// ``` -/// -/// [`pin`]: super::pin -pub struct Guard { - pub(crate) local: *const Local, -} - -impl Guard { - /// Stores a function so that it can be executed at some point after all currently pinned - /// threads get unpinned. - /// - /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache - /// becomes full, some functions are moved into the global cache. At the same time, some - /// functions from both local and global caches may get executed in order to incrementally - /// clean up the caches as they fill up. - /// - /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it - /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might - /// never run, but the epoch-based garbage collection will make an effort to execute it - /// reasonably soon. - /// - /// If this method is called from an [`unprotected`] guard, the function will simply be - /// executed immediately. - pub fn defer(&self, f: F) - where - F: FnOnce() -> R, - F: Send + 'static, - { - unsafe { - self.defer_unchecked(f); - } - } - - /// Stores a function so that it can be executed at some point after all currently pinned - /// threads get unpinned. - /// - /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache - /// becomes full, some functions are moved into the global cache. At the same time, some - /// functions from both local and global caches may get executed in order to incrementally - /// clean up the caches as they fill up. - /// - /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it - /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might - /// never run, but the epoch-based garbage collection will make an effort to execute it - /// reasonably soon. - /// - /// If this method is called from an [`unprotected`] guard, the function will simply be - /// executed immediately. - /// - /// # Safety - /// - /// The given function must not hold reference onto the stack. It is highly recommended that - /// the passed function is **always** marked with `move` in order to prevent accidental - /// borrows. - /// - /// ``` - /// use crossbeam_epoch as epoch; - /// - /// let guard = &epoch::pin(); - /// let message = "Hello!"; - /// unsafe { - /// // ALWAYS use `move` when sending a closure into `defer_unchecked`. - /// guard.defer_unchecked(move || { - /// println!("{}", message); - /// }); - /// } - /// ``` - /// - /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by - /// the closure must be `Send`. - /// - /// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove - /// `F: Send` for typical use cases. For example, consider the following code snippet, which - /// exemplifies the typical use case of deferring the deallocation of a shared reference: - /// - /// ```ignore - /// let shared = Owned::new(7i32).into_shared(guard); - /// guard.defer_unchecked(move || shared.into_owned()); // `Shared` is not `Send`! - /// ``` - /// - /// While `Shared` is not `Send`, it's safe for another thread to call the deferred function, - /// because it's called only after the grace period and `shared` is no longer shared with other - /// threads. But we don't expect type systems to prove this. - /// - /// # Examples - /// - /// When a heap-allocated object in a data structure becomes unreachable, it has to be - /// deallocated. However, the current thread and other threads may be still holding references - /// on the stack to that same object. Therefore it cannot be deallocated before those references - /// get dropped. This method can defer deallocation until all those threads get unpinned and - /// consequently drop all their references on the stack. - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new("foo"); - /// - /// // Now suppose that `a` is shared among multiple threads and concurrently - /// // accessed and modified... - /// - /// // Pin the current thread. - /// let guard = &epoch::pin(); - /// - /// // Steal the object currently stored in `a` and swap it with another one. - /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard); - /// - /// if !p.is_null() { - /// // The object `p` is pointing to is now unreachable. - /// // Defer its deallocation until all currently pinned threads get unpinned. - /// unsafe { - /// // ALWAYS use `move` when sending a closure into `defer_unchecked`. - /// guard.defer_unchecked(move || { - /// println!("{} is now being deallocated.", p.deref()); - /// // Now we have unique access to the object pointed to by `p` and can turn it - /// // into an `Owned`. Dropping the `Owned` will deallocate the object. - /// drop(p.into_owned()); - /// }); - /// } - /// } - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub unsafe fn defer_unchecked(&self, f: F) - where - F: FnOnce() -> R, - { - unsafe { - if let Some(local) = self.local.as_ref() { - local.defer(Deferred::new(move || drop(f())), self); - } else { - drop(f()); - } - } - } - - /// Stores a destructor for an object so that it can be deallocated and dropped at some point - /// after all currently pinned threads get unpinned. - /// - /// This method first stores the destructor into the thread-local (or handle-local) cache. If - /// this cache becomes full, some destructors are moved into the global cache. At the same - /// time, some destructors from both local and global caches may get executed in order to - /// incrementally clean up the caches as they fill up. - /// - /// There is no guarantee when exactly the destructor will be executed. The only guarantee is - /// that it won't be executed until all currently pinned threads get unpinned. In theory, the - /// destructor might never run, but the epoch-based garbage collection will make an effort to - /// execute it reasonably soon. - /// - /// If this method is called from an [`unprotected`] guard, the destructor will simply be - /// executed immediately. - /// - /// # Safety - /// - /// The object must not be reachable by other threads anymore, otherwise it might be still in - /// use when the destructor runs. - /// - /// Apart from that, keep in mind that another thread may execute the destructor, so the object - /// must be sendable to other threads. - /// - /// We intentionally didn't require `T: Send`, because Rust's type systems usually cannot prove - /// `T: Send` for typical use cases. For example, consider the following code snippet, which - /// exemplifies the typical use case of deferring the deallocation of a shared reference: - /// - /// ```ignore - /// let shared = Owned::new(7i32).into_shared(guard); - /// guard.defer_destroy(shared); // `Shared` is not `Send`! - /// ``` - /// - /// While `Shared` is not `Send`, it's safe for another thread to call the destructor, because - /// it's called only after the grace period and `shared` is no longer shared with other - /// threads. But we don't expect type systems to prove this. - /// - /// # Examples - /// - /// When a heap-allocated object in a data structure becomes unreachable, it has to be - /// deallocated. However, the current thread and other threads may be still holding references - /// on the stack to that same object. Therefore it cannot be deallocated before those references - /// get dropped. This method can defer deallocation until all those threads get unpinned and - /// consequently drop all their references on the stack. - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic, Owned}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new("foo"); - /// - /// // Now suppose that `a` is shared among multiple threads and concurrently - /// // accessed and modified... - /// - /// // Pin the current thread. - /// let guard = &epoch::pin(); - /// - /// // Steal the object currently stored in `a` and swap it with another one. - /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard); - /// - /// if !p.is_null() { - /// // The object `p` is pointing to is now unreachable. - /// // Defer its deallocation until all currently pinned threads get unpinned. - /// unsafe { - /// guard.defer_destroy(p); - /// } - /// } - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub unsafe fn defer_destroy(&self, ptr: Shared<'_, T>) { - unsafe { self.defer_unchecked(move || ptr.into_owned()) } - } - - /// Clears up the thread-local cache of deferred functions by executing them or moving into the - /// global cache. - /// - /// Call this method after deferring execution of a function if you want to get it executed as - /// soon as possible. Flushing will make sure it is residing in the global cache, so that - /// any thread has a chance of taking the function and executing it. - /// - /// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens). - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch as epoch; - /// - /// let guard = &epoch::pin(); - /// guard.defer(move || { - /// println!("This better be printed as soon as possible!"); - /// }); - /// guard.flush(); - /// ``` - pub fn flush(&self) { - if let Some(local) = unsafe { self.local.as_ref() } { - local.flush(self); - } - } - - /// Unpins and then immediately re-pins the thread. - /// - /// This method is useful when you don't want delay the advancement of the global epoch by - /// holding an old epoch. For safety, you should not maintain any guard-based reference across - /// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this - /// is the only active guard for the current thread. - /// - /// If this method is called from an [`unprotected`] guard, then the call will be just no-op. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic}; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// let a = Atomic::new(777); - /// let mut guard = epoch::pin(); - /// { - /// let p = a.load(SeqCst, &guard); - /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); - /// } - /// guard.repin(); - /// { - /// let p = a.load(SeqCst, &guard); - /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); - /// } - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub fn repin(&mut self) { - if let Some(local) = unsafe { self.local.as_ref() } { - local.repin(); - } - } - - /// Temporarily unpins the thread, executes the given function and then re-pins the thread. - /// - /// This method is useful when you need to perform a long-running operation (e.g. sleeping) - /// and don't need to maintain any guard-based reference across the call (the latter is enforced - /// by `&mut self`). The thread will only be unpinned if this is the only active guard for the - /// current thread. - /// - /// If this method is called from an [`unprotected`] guard, then the passed function is called - /// directly without unpinning the thread. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch::{self as epoch, Atomic}; - /// use std::sync::atomic::Ordering::SeqCst; - /// use std::thread; - /// use std::time::Duration; - /// - /// let a = Atomic::new(777); - /// let mut guard = epoch::pin(); - /// { - /// let p = a.load(SeqCst, &guard); - /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); - /// } - /// guard.repin_after(|| thread::sleep(Duration::from_millis(50))); - /// { - /// let p = a.load(SeqCst, &guard); - /// assert_eq!(unsafe { p.as_ref() }, Some(&777)); - /// } - /// # unsafe { drop(a.into_owned()); } // avoid leak - /// ``` - pub fn repin_after(&mut self, f: F) -> R - where - F: FnOnce() -> R, - { - // Ensure the Guard is re-pinned even if the function panics - struct ScopeGuard(*const Local); - impl Drop for ScopeGuard { - fn drop(&mut self) { - if let Some(local) = unsafe { self.0.as_ref() } { - mem::forget(local.pin()); - local.release_handle(); - } - } - } - - if let Some(local) = unsafe { self.local.as_ref() } { - // We need to acquire a handle here to ensure the Local doesn't - // disappear from under us. - local.acquire_handle(); - local.unpin(); - } - - let _guard = ScopeGuard(self.local); - - f() - } - - /// Returns the `Collector` associated with this guard. - /// - /// This method is useful when you need to ensure that all guards used with - /// a data structure come from the same collector. - /// - /// If this method is called from an [`unprotected`] guard, then `None` is returned. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_epoch as epoch; - /// - /// let guard1 = epoch::pin(); - /// let guard2 = epoch::pin(); - /// assert!(guard1.collector() == guard2.collector()); - /// ``` - pub fn collector(&self) -> Option<&Collector> { - unsafe { self.local.as_ref().map(|local| local.collector()) } - } -} - -impl Drop for Guard { - #[inline] - fn drop(&mut self) { - if let Some(local) = unsafe { self.local.as_ref() } { - local.unpin(); - } - } -} - -impl fmt::Debug for Guard { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Guard { .. }") - } -} - -/// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s. -/// -/// This guard should be used in special occasions only. Note that it doesn't actually keep any -/// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely. -/// -/// Note that calling [`defer`] with a dummy guard will not defer the function - it will just -/// execute the function immediately. -/// -/// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`. -/// -/// # Safety -/// -/// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the -/// [`Atomic`] is not being concurrently modified by other threads. -/// -/// # Examples -/// -/// ``` -/// use crossbeam_epoch::{self as epoch, Atomic}; -/// use std::sync::atomic::Ordering::Relaxed; -/// -/// let a = Atomic::new(7); -/// -/// unsafe { -/// // Load `a` without pinning the current thread. -/// a.load(Relaxed, epoch::unprotected()); -/// -/// // It's possible to create more dummy guards. -/// let dummy = epoch::unprotected(); -/// -/// dummy.defer(move || { -/// println!("This gets executed immediately."); -/// }); -/// -/// // Dropping `dummy` doesn't affect the current thread - it's just a noop. -/// } -/// # unsafe { drop(a.into_owned()); } // avoid leak -/// ``` -/// -/// The most common use of this function is when constructing or destructing a data structure. -/// -/// For example, we can use a dummy guard in the destructor of a Treiber stack because at that -/// point no other thread could concurrently modify the [`Atomic`]s we are accessing. -/// -/// If we were to actually pin the current thread during destruction, that would just unnecessarily -/// delay garbage collection and incur some performance cost, so in cases like these `unprotected` -/// is very helpful. -/// -/// ``` -/// use crossbeam_epoch::{self as epoch, Atomic}; -/// use std::mem::ManuallyDrop; -/// use std::sync::atomic::Ordering::Relaxed; -/// -/// struct Stack { -/// head: Atomic>, -/// } -/// -/// struct Node { -/// data: ManuallyDrop, -/// next: Atomic>, -/// } -/// -/// impl Drop for Stack { -/// fn drop(&mut self) { -/// unsafe { -/// // Unprotected load. -/// let mut node = self.head.load(Relaxed, epoch::unprotected()); -/// -/// while let Some(n) = node.as_ref() { -/// // Unprotected load. -/// let next = n.next.load(Relaxed, epoch::unprotected()); -/// -/// // Take ownership of the node, then drop its data and deallocate it. -/// let mut o = node.into_owned(); -/// ManuallyDrop::drop(&mut o.data); -/// drop(o); -/// -/// node = next; -/// } -/// } -/// } -/// } -/// ``` -/// -/// [`Atomic`]: super::Atomic -/// [`defer`]: Guard::defer -#[inline] -pub unsafe fn unprotected() -> &'static Guard { - // An unprotected guard is just a `Guard` with its field `local` set to null. - // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in - // a `static` - struct GuardWrapper(Guard); - unsafe impl Sync for GuardWrapper {} - static UNPROTECTED: GuardWrapper = GuardWrapper(Guard { - local: core::ptr::null(), - }); - &UNPROTECTED.0 -} diff --git a/crossbeam-epoch/src/internal.rs b/crossbeam-epoch/src/internal.rs deleted file mode 100644 index 0a57606e9..000000000 --- a/crossbeam-epoch/src/internal.rs +++ /dev/null @@ -1,602 +0,0 @@ -//! The global data and participant for garbage collection. -//! -//! # Registration -//! -//! In order to track all participants in one place, we need some form of participant -//! registration. When a participant is created, it is registered to a global lock-free -//! singly-linked list of registries; and when a participant is leaving, it is unregistered from the -//! list. -//! -//! # Pinning -//! -//! Every participant contains an integer that tells whether the participant is pinned and if so, -//! what was the global epoch at the time it was pinned. Participants also hold a pin counter that -//! aids in periodic global epoch advancement. -//! -//! When a participant is pinned, a `Guard` is returned as a witness that the participant is pinned. -//! Guards are necessary for performing atomic operations, and for freeing/dropping locations. -//! -//! # Thread-local bag -//! -//! Objects that get unlinked from concurrent data structures must be stashed away until the global -//! epoch sufficiently advances so that they become safe for destruction. Pointers to such objects -//! are pushed into a thread-local bag, and when it becomes full, the bag is marked with the current -//! global epoch and pushed into the global queue of bags. We store objects in thread-local storages -//! for amortizing the synchronization cost of pushing the garbages to a global queue. -//! -//! # Global queue -//! -//! Whenever a bag is pushed into a queue, the objects in some bags in the queue are collected and -//! destroyed along the way. This design reduces contention on data structures. The global queue -//! cannot be explicitly accessed: the only way to interact with it is by calling functions -//! `defer()` that adds an object to the thread-local bag, or `collect()` that manually triggers -//! garbage collection. -//! -//! Ideally each instance of concurrent data structure may have its own queue that gets fully -//! destroyed as soon as the data structure gets dropped. - -use crate::primitive::cell::UnsafeCell; -use crate::primitive::sync::atomic::{self, Ordering}; -use core::cell::Cell; -use core::mem::{self, ManuallyDrop}; -use core::num::Wrapping; -use core::{fmt, ptr}; - -use crossbeam_utils::CachePadded; - -use crate::atomic::{Owned, Shared}; -use crate::collector::{Collector, LocalHandle}; -use crate::deferred::Deferred; -use crate::epoch::{AtomicEpoch, Epoch}; -use crate::guard::{unprotected, Guard}; -use crate::sync::list::{Entry, IsElement, IterError, List}; -use crate::sync::queue::Queue; - -/// Maximum number of objects a bag can contain. -#[cfg(not(any(crossbeam_sanitize, miri)))] -const MAX_OBJECTS: usize = 64; -// Makes it more likely to trigger any potential data races. -#[cfg(any(crossbeam_sanitize, miri))] -const MAX_OBJECTS: usize = 4; - -/// A bag of deferred functions. -pub(crate) struct Bag { - /// Stashed objects. - deferreds: [Deferred; MAX_OBJECTS], - len: usize, -} - -/// `Bag::try_push()` requires that it is safe for another thread to execute the given functions. -unsafe impl Send for Bag {} - -impl Bag { - /// Returns a new, empty bag. - pub(crate) fn new() -> Self { - Self::default() - } - - /// Returns `true` if the bag is empty. - pub(crate) fn is_empty(&self) -> bool { - self.len == 0 - } - - /// Attempts to insert a deferred function into the bag. - /// - /// Returns `Ok(())` if successful, and `Err(deferred)` for the given `deferred` if the bag is - /// full. - /// - /// # Safety - /// - /// It should be safe for another thread to execute the given function. - pub(crate) unsafe fn try_push(&mut self, deferred: Deferred) -> Result<(), Deferred> { - if self.len < MAX_OBJECTS { - self.deferreds[self.len] = deferred; - self.len += 1; - Ok(()) - } else { - Err(deferred) - } - } - - /// Seals the bag with the given epoch. - fn seal(self, epoch: Epoch) -> SealedBag { - SealedBag { epoch, _bag: self } - } -} - -impl Default for Bag { - fn default() -> Self { - Self { - len: 0, - deferreds: [Deferred::NO_OP; MAX_OBJECTS], - } - } -} - -impl Drop for Bag { - fn drop(&mut self) { - // Call all deferred functions. - for deferred in &mut self.deferreds[..self.len] { - let no_op = Deferred::NO_OP; - let owned_deferred = mem::replace(deferred, no_op); - owned_deferred.call(); - } - } -} - -// can't #[derive(Debug)] because Debug is not implemented for arrays 64 items long -impl fmt::Debug for Bag { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Bag") - .field("deferreds", &&self.deferreds[..self.len]) - .finish() - } -} - -/// A pair of an epoch and a bag. -#[derive(Default, Debug)] -struct SealedBag { - epoch: Epoch, - _bag: Bag, -} - -/// It is safe to share `SealedBag` because `is_expired` only inspects the epoch. -unsafe impl Sync for SealedBag {} - -impl SealedBag { - /// Checks if it is safe to drop the bag w.r.t. the given global epoch. - fn is_expired(&self, global_epoch: Epoch) -> bool { - // A pinned participant can witness at most one epoch advancement. Therefore, any bag that - // is within one epoch of the current one cannot be destroyed yet. - global_epoch.wrapping_sub(self.epoch) >= 2 - } -} - -/// The global data for a garbage collector. -pub(crate) struct Global { - /// The intrusive linked list of `Local`s. - locals: List, - - /// The global queue of bags of deferred functions. - queue: Queue, - - /// The global epoch. - pub(crate) epoch: CachePadded, -} - -impl Global { - /// Number of bags to destroy. - const COLLECT_STEPS: usize = 8; - - /// Creates a new global data for garbage collection. - #[inline] - pub(crate) fn new() -> Self { - Self { - locals: List::new(), - queue: Queue::new(), - epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())), - } - } - - /// Pushes the bag into the global queue and replaces the bag with a new empty bag. - pub(crate) fn push_bag(&self, bag: &mut Bag, guard: &Guard) { - let bag = mem::replace(bag, Bag::new()); - - atomic::fence(Ordering::SeqCst); - - let epoch = self.epoch.load(Ordering::Relaxed); - self.queue.push(bag.seal(epoch), guard); - } - - /// Collects several bags from the global queue and executes deferred functions in them. - /// - /// Note: This may itself produce garbage and in turn allocate new bags. - /// - /// `pin()` rarely calls `collect()`, so we want the compiler to place that call on a cold - /// path. In other words, we want the compiler to optimize branching for the case when - /// `collect()` is not called. - #[cold] - pub(crate) fn collect(&self, guard: &Guard) { - let global_epoch = self.try_advance(guard); - - let steps = if cfg!(crossbeam_sanitize) { - usize::MAX - } else { - Self::COLLECT_STEPS - }; - - for _ in 0..steps { - match self.queue.try_pop_if( - |sealed_bag: &SealedBag| sealed_bag.is_expired(global_epoch), - guard, - ) { - None => break, - Some(sealed_bag) => drop(sealed_bag), - } - } - } - - /// Attempts to advance the global epoch. - /// - /// The global epoch can advance only if all currently pinned participants have been pinned in - /// the current epoch. - /// - /// Returns the current global epoch. - /// - /// `try_advance()` is annotated `#[cold]` because it is rarely called. - #[cold] - pub(crate) fn try_advance(&self, guard: &Guard) -> Epoch { - let global_epoch = self.epoch.load(Ordering::Relaxed); - atomic::fence(Ordering::SeqCst); - - // TODO(stjepang): `Local`s are stored in a linked list because linked lists are fairly - // easy to implement in a lock-free manner. However, traversal can be slow due to cache - // misses and data dependencies. We should experiment with other data structures as well. - for local in self.locals.iter(guard) { - match local { - Err(IterError::Stalled) => { - // A concurrent thread stalled this iteration. That thread might also try to - // advance the epoch, in which case we leave the job to it. Otherwise, the - // epoch will not be advanced. - return global_epoch; - } - Ok(local) => { - let local_epoch = local.epoch.load(Ordering::Relaxed); - - // If the participant was pinned in a different epoch, we cannot advance the - // global epoch just yet. - if local_epoch.is_pinned() && local_epoch.unpinned() != global_epoch { - return global_epoch; - } - } - } - } - atomic::fence(Ordering::Acquire); - - // All pinned participants were pinned in the current global epoch. - // Now let's advance the global epoch... - // - // Note that if another thread already advanced it before us, this store will simply - // overwrite the global epoch with the same value. This is true because `try_advance` was - // called from a thread that was pinned in `global_epoch`, and the global epoch cannot be - // advanced two steps ahead of it. - let new_epoch = global_epoch.successor(); - self.epoch.store(new_epoch, Ordering::Release); - new_epoch - } -} - -/// Participant for garbage collection. -#[repr(C)] // Note: `entry` must be the first field -pub(crate) struct Local { - /// A node in the intrusive linked list of `Local`s. - entry: Entry, - - /// A reference to the global data. - /// - /// When all guards and handles get dropped, this reference is destroyed. - collector: UnsafeCell>, - - /// The local bag of deferred functions. - pub(crate) bag: UnsafeCell, - - /// The number of guards keeping this participant pinned. - guard_count: Cell, - - /// The number of active handles. - handle_count: Cell, - - /// Total number of pinnings performed. - /// - /// This is just an auxiliary counter that sometimes kicks off collection. - pin_count: Cell>, - - /// The local epoch. - epoch: CachePadded, -} - -// Make sure `Local` is less than or equal to 2048 bytes. -// https://github.com/crossbeam-rs/crossbeam/issues/551 -#[cfg(not(any(crossbeam_sanitize, miri)))] // `crossbeam_sanitize` and `miri` reduce the size of `Local` -#[test] -fn local_size() { - // TODO: https://github.com/crossbeam-rs/crossbeam/issues/869 - // assert!( - // core::mem::size_of::() <= 2048, - // "An allocation of `Local` should be <= 2048 bytes." - // ); -} - -impl Local { - /// Number of pinnings after which a participant will execute some deferred functions from the - /// global queue. - const PINNINGS_BETWEEN_COLLECT: usize = 128; - - /// Registers a new `Local` in the provided `Global`. - pub(crate) fn register(collector: &Collector) -> LocalHandle { - unsafe { - // Since we dereference no pointers in this block, it is safe to use `unprotected`. - - let local = Owned::new(Self { - entry: Entry::default(), - collector: UnsafeCell::new(ManuallyDrop::new(collector.clone())), - bag: UnsafeCell::new(Bag::new()), - guard_count: Cell::new(0), - handle_count: Cell::new(1), - pin_count: Cell::new(Wrapping(0)), - epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())), - }) - .into_shared(unprotected()); - collector.global.locals.insert(local, unprotected()); - LocalHandle { - local: local.as_raw(), - } - } - } - - /// Returns a reference to the `Global` in which this `Local` resides. - #[inline] - pub(crate) fn global(&self) -> &Global { - &self.collector().global - } - - /// Returns a reference to the `Collector` in which this `Local` resides. - #[inline] - pub(crate) fn collector(&self) -> &Collector { - self.collector.with(|c| unsafe { &**c }) - } - - /// Returns `true` if the current participant is pinned. - #[inline] - pub(crate) fn is_pinned(&self) -> bool { - self.guard_count.get() > 0 - } - - /// Adds `deferred` to the thread-local bag. - /// - /// # Safety - /// - /// It should be safe for another thread to execute the given function. - pub(crate) unsafe fn defer(&self, mut deferred: Deferred, guard: &Guard) { - let bag = self.bag.with_mut(|b| unsafe { &mut *b }); - - while let Err(d) = unsafe { bag.try_push(deferred) } { - self.global().push_bag(bag, guard); - deferred = d; - } - } - - pub(crate) fn flush(&self, guard: &Guard) { - let bag = self.bag.with_mut(|b| unsafe { &mut *b }); - - if !bag.is_empty() { - self.global().push_bag(bag, guard); - } - - self.global().collect(guard); - } - - /// Pins the `Local`. - #[inline] - pub(crate) fn pin(&self) -> Guard { - let guard = Guard { local: self }; - - let guard_count = self.guard_count.get(); - self.guard_count.set(guard_count.checked_add(1).unwrap()); - - if guard_count == 0 { - let global_epoch = self.global().epoch.load(Ordering::Relaxed); - let new_epoch = global_epoch.pinned(); - - // Now we must store `new_epoch` into `self.epoch` and execute a `SeqCst` fence. - // The fence makes sure that any future loads from `Atomic`s will not happen before - // this store. - if cfg!(all( - any(target_arch = "x86", target_arch = "x86_64"), - not(miri) - )) { - // HACK(stjepang): On x86 architectures there are two different ways of executing - // a `SeqCst` fence. - // - // 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction. - // 2. `_.compare_exchange(_, _, SeqCst, SeqCst)`, which compiles into a `lock cmpxchg` - // instruction. - // - // Both instructions have the effect of a full barrier, but benchmarks have shown - // that the second one makes pinning faster in this particular case. It is not - // clear that this is permitted by the C++ memory model (SC fences work very - // differently from SC accesses), but experimental evidence suggests that this - // works fine. Using inline assembly would be a viable (and correct) alternative, - // but alas, that is not possible on stable Rust. - let current = Epoch::starting(); - let res = self.epoch.compare_exchange( - current, - new_epoch, - Ordering::SeqCst, - Ordering::SeqCst, - ); - debug_assert!(res.is_ok(), "participant was expected to be unpinned"); - // We add a compiler fence to make it less likely for LLVM to do something wrong - // here. Formally, this is not enough to get rid of data races; practically, - // it should go a long way. - atomic::compiler_fence(Ordering::SeqCst); - } else { - self.epoch.store(new_epoch, Ordering::Relaxed); - atomic::fence(Ordering::SeqCst); - } - - // Increment the pin counter. - let count = self.pin_count.get(); - self.pin_count.set(count + Wrapping(1)); - - // After every `PINNINGS_BETWEEN_COLLECT` try advancing the epoch and collecting - // some garbage. - if count.0 % Self::PINNINGS_BETWEEN_COLLECT == 0 { - self.global().collect(&guard); - } - } - - guard - } - - /// Unpins the `Local`. - #[inline] - pub(crate) fn unpin(&self) { - let guard_count = self.guard_count.get(); - self.guard_count.set(guard_count - 1); - - if guard_count == 1 { - self.epoch.store(Epoch::starting(), Ordering::Release); - - if self.handle_count.get() == 0 { - self.finalize(); - } - } - } - - /// Unpins and then pins the `Local`. - #[inline] - pub(crate) fn repin(&self) { - let guard_count = self.guard_count.get(); - - // Update the local epoch only if there's only one guard. - if guard_count == 1 { - let epoch = self.epoch.load(Ordering::Relaxed); - let global_epoch = self.global().epoch.load(Ordering::Relaxed).pinned(); - - // Update the local epoch only if the global epoch is greater than the local epoch. - if epoch != global_epoch { - // We store the new epoch with `Release` because we need to ensure any memory - // accesses from the previous epoch do not leak into the new one. - self.epoch.store(global_epoch, Ordering::Release); - - // However, we don't need a following `SeqCst` fence, because it is safe for memory - // accesses from the new epoch to be executed before updating the local epoch. At - // worse, other threads will see the new epoch late and delay GC slightly. - } - } - } - - /// Increments the handle count. - #[inline] - pub(crate) fn acquire_handle(&self) { - let handle_count = self.handle_count.get(); - debug_assert!(handle_count >= 1); - self.handle_count.set(handle_count + 1); - } - - /// Decrements the handle count. - #[inline] - pub(crate) fn release_handle(&self) { - let guard_count = self.guard_count.get(); - let handle_count = self.handle_count.get(); - debug_assert!(handle_count >= 1); - self.handle_count.set(handle_count - 1); - - if guard_count == 0 && handle_count == 1 { - self.finalize(); - } - } - - /// Removes the `Local` from the global linked list. - #[cold] - fn finalize(&self) { - debug_assert_eq!(self.guard_count.get(), 0); - debug_assert_eq!(self.handle_count.get(), 0); - - // Temporarily increment handle count. This is required so that the following call to `pin` - // doesn't call `finalize` again. - self.handle_count.set(1); - unsafe { - // Pin and move the local bag into the global queue. It's important that `push_bag` - // doesn't defer destruction on any new garbage. - let guard = &self.pin(); - self.global() - .push_bag(self.bag.with_mut(|b| &mut *b), guard); - } - // Revert the handle count back to zero. - self.handle_count.set(0); - - unsafe { - // Take the reference to the `Global` out of this `Local`. Since we're not protected - // by a guard at this time, it's crucial that the reference is read before marking the - // `Local` as deleted. - let collector: Collector = ptr::read(self.collector.with(|c| &*(*c))); - - // Mark this node in the linked list as deleted. - self.entry.delete(unprotected()); - - // Finally, drop the reference to the global. Note that this might be the last reference - // to the `Global`. If so, the global data will be destroyed and all deferred functions - // in its queue will be executed. - drop(collector); - } - } -} - -impl IsElement for Local { - fn entry_of(local: &Self) -> &Entry { - // SAFETY: `Local` is `repr(C)` and `entry` is the first field of it. - unsafe { - let entry_ptr = (local as *const Self).cast::(); - &*entry_ptr - } - } - - unsafe fn element_of(entry: &Entry) -> &Self { - // SAFETY: `Local` is `repr(C)` and `entry` is the first field of it. - unsafe { - let local_ptr = (entry as *const Entry).cast::(); - &*local_ptr - } - } - - unsafe fn finalize(entry: &Entry, guard: &Guard) { - unsafe { guard.defer_destroy(Shared::from(Self::element_of(entry) as *const _)) } - } -} - -#[cfg(all(test, not(crossbeam_loom)))] -mod tests { - use std::sync::atomic::AtomicUsize; - - use super::*; - - #[test] - fn check_defer() { - static FLAG: AtomicUsize = AtomicUsize::new(0); - fn set() { - FLAG.store(42, Ordering::Relaxed); - } - - let d = Deferred::new(set); - assert_eq!(FLAG.load(Ordering::Relaxed), 0); - d.call(); - assert_eq!(FLAG.load(Ordering::Relaxed), 42); - } - - #[test] - fn check_bag() { - static FLAG: AtomicUsize = AtomicUsize::new(0); - fn incr() { - FLAG.fetch_add(1, Ordering::Relaxed); - } - - let mut bag = Bag::new(); - assert!(bag.is_empty()); - - for _ in 0..MAX_OBJECTS { - assert!(unsafe { bag.try_push(Deferred::new(incr)).is_ok() }); - assert!(!bag.is_empty()); - assert_eq!(FLAG.load(Ordering::Relaxed), 0); - } - - let result = unsafe { bag.try_push(Deferred::new(incr)) }; - assert!(result.is_err()); - assert!(!bag.is_empty()); - assert_eq!(FLAG.load(Ordering::Relaxed), 0); - - drop(bag); - assert_eq!(FLAG.load(Ordering::Relaxed), MAX_OBJECTS); - } -} diff --git a/crossbeam-epoch/src/lib.rs b/crossbeam-epoch/src/lib.rs deleted file mode 100644 index 6ecba1b8c..000000000 --- a/crossbeam-epoch/src/lib.rs +++ /dev/null @@ -1,161 +0,0 @@ -//! Epoch-based memory reclamation. -//! -//! An interesting problem concurrent collections deal with comes from the remove operation. -//! Suppose that a thread removes an element from a lock-free map, while another thread is reading -//! that same element at the same time. The first thread must wait until the second thread stops -//! reading the element. Only then it is safe to destruct it. -//! -//! Programming languages that come with garbage collectors solve this problem trivially. The -//! garbage collector will destruct the removed element when no thread can hold a reference to it -//! anymore. -//! -//! This crate implements a basic memory reclamation mechanism, which is based on epochs. When an -//! element gets removed from a concurrent collection, it is inserted into a pile of garbage and -//! marked with the current epoch. Every time a thread accesses a collection, it checks the current -//! epoch, attempts to increment it, and destructs some garbage that became so old that no thread -//! can be referencing it anymore. -//! -//! That is the general mechanism behind epoch-based memory reclamation, but the details are a bit -//! more complicated. Anyhow, memory reclamation is designed to be fully automatic and something -//! users of concurrent collections don't have to worry much about. -//! -//! # Pointers -//! -//! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which -//! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a -//! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely -//! read. -//! -//! # Pinning -//! -//! Before an [`Atomic`] can be loaded, a participant must be [`pin`]ned. By pinning a participant -//! we declare that any object that gets removed from now on must not be destructed just -//! yet. Garbage collection of newly removed objects is suspended until the participant gets -//! unpinned. -//! -//! # Garbage -//! -//! Objects that get removed from concurrent collections must be stashed away until all currently -//! pinned participants get unpinned. Such objects can be stored into a thread-local or global -//! storage, where they are kept until the right time for their destruction comes. -//! -//! There is a global shared instance of garbage queue. You can [`defer`](Guard::defer) the execution of an -//! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data -//! structures may defer the deallocation of an object. -//! -//! # APIs -//! -//! For majority of use cases, just use the default garbage collector by invoking [`pin`]. If you -//! want to create your own garbage collector, use the [`Collector`] API. - -#![no_std] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] -#![warn(missing_docs, unsafe_op_in_unsafe_fn)] - -#[cfg(crossbeam_loom)] -extern crate loom_crate as loom; -#[cfg(feature = "std")] -extern crate std; - -#[cfg(crossbeam_loom)] -#[allow(unused_imports, dead_code)] -mod primitive { - pub(crate) mod cell { - pub(crate) use loom::cell::UnsafeCell; - } - pub(crate) mod sync { - pub(crate) mod atomic { - pub(crate) use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering}; - - // FIXME: loom does not support compiler_fence at the moment. - // https://github.com/tokio-rs/loom/issues/117 - // we use fence as a stand-in for compiler_fence for the time being. - // this may miss some races since fence is stronger than compiler_fence, - // but it's the best we can do for the time being. - pub(crate) use self::fence as compiler_fence; - } - pub(crate) use loom::sync::Arc; - } - pub(crate) use loom::thread_local; -} -#[cfg(target_has_atomic = "ptr")] -#[cfg(not(crossbeam_loom))] -#[allow(unused_imports, dead_code)] -mod primitive { - pub(crate) mod cell { - #[derive(Debug)] - #[repr(transparent)] - pub(crate) struct UnsafeCell(::core::cell::UnsafeCell); - - // loom's UnsafeCell has a slightly different API than the standard library UnsafeCell. - // Since we want the rest of the code to be agnostic to whether it's running under loom or - // not, we write this small wrapper that provides the loom-supported API for the standard - // library UnsafeCell. This is also what the loom documentation recommends: - // https://github.com/tokio-rs/loom#handling-loom-api-differences - impl UnsafeCell { - #[inline] - pub(crate) const fn new(data: T) -> Self { - Self(::core::cell::UnsafeCell::new(data)) - } - - #[inline] - pub(crate) fn with(&self, f: impl FnOnce(*const T) -> R) -> R { - f(self.0.get()) - } - - #[inline] - pub(crate) fn with_mut(&self, f: impl FnOnce(*mut T) -> R) -> R { - f(self.0.get()) - } - } - } - pub(crate) mod sync { - #[cfg(feature = "alloc")] - pub(crate) use alloc::sync::Arc; - pub(crate) use core::sync::atomic; - } - - #[cfg(feature = "std")] - pub(crate) use std::thread_local; -} - -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -extern crate alloc; - -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -mod atomic; -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -mod collector; -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -mod deferred; -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -mod epoch; -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -mod guard; -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -mod internal; -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -mod sync; - -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -pub use crate::{ - atomic::{Atomic, CompareExchangeError, Owned, Pointable, Pointer, Shared}, - collector::{Collector, LocalHandle}, - guard::{unprotected, Guard}, -}; - -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -mod sealed { - pub trait Sealed {} -} - -#[cfg(feature = "std")] -mod default; -#[cfg(feature = "std")] -pub use crate::default::{default_collector, is_pinned, pin}; diff --git a/crossbeam-epoch/src/sync/list.rs b/crossbeam-epoch/src/sync/list.rs deleted file mode 100644 index 09e402e17..000000000 --- a/crossbeam-epoch/src/sync/list.rs +++ /dev/null @@ -1,488 +0,0 @@ -//! Lock-free intrusive linked list. -//! -//! Ideas from Michael. High Performance Dynamic Lock-Free Hash Tables and List-Based Sets. SPAA -//! 2002. - -use core::marker::PhantomData; -use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; - -use crate::{unprotected, Atomic, Guard, Shared}; - -/// An entry in a linked list. -/// -/// An Entry is accessed from multiple threads, so it would be beneficial to put it in a different -/// cache-line than thread-local data in terms of performance. -#[derive(Debug)] -pub(crate) struct Entry { - /// The next entry in the linked list. - /// If the tag is 1, this entry is marked as deleted. - next: Atomic, -} - -/// Implementing this trait asserts that the type `T` can be used as an element in the intrusive -/// linked list defined in this module. `T` has to contain (or otherwise be linked to) an instance -/// of `Entry`. -/// -/// # Example -/// -/// ```ignore -/// struct A { -/// entry: Entry, -/// data: usize, -/// } -/// -/// impl IsElement for A { -/// fn entry_of(a: &A) -> &Entry { -/// let entry_ptr = ((a as usize) + offset_of!(A, entry)) as *const Entry; -/// unsafe { &*entry_ptr } -/// } -/// -/// unsafe fn element_of(entry: &Entry) -> &T { -/// let elem_ptr = ((entry as usize) - offset_of!(A, entry)) as *const T; -/// &*elem_ptr -/// } -/// -/// unsafe fn finalize(entry: &Entry, guard: &Guard) { -/// guard.defer_destroy(Shared::from(Self::element_of(entry) as *const _)); -/// } -/// } -/// ``` -/// -/// This trait is implemented on a type separate from `T` (although it can be just `T`), because -/// one type might be placeable into multiple lists, in which case it would require multiple -/// implementations of `IsElement`. In such cases, each struct implementing `IsElement` -/// represents a distinct `Entry` in `T`. -/// -/// For example, we can insert the following struct into two lists using `entry1` for one -/// and `entry2` for the other: -/// -/// ```ignore -/// struct B { -/// entry1: Entry, -/// entry2: Entry, -/// data: usize, -/// } -/// ``` -/// -pub(crate) trait IsElement { - /// Returns a reference to this element's `Entry`. - fn entry_of(_: &T) -> &Entry; - - /// Given a reference to an element's entry, returns that element. - /// - /// ```ignore - /// let elem = ListElement::new(); - /// assert_eq!(elem.entry_of(), - /// unsafe { ListElement::element_of(elem.entry_of()) } ); - /// ``` - /// - /// # Safety - /// - /// The caller has to guarantee that the `Entry` is called with was retrieved from an instance - /// of the element type (`T`). - unsafe fn element_of(_: &Entry) -> &T; - - /// The function that is called when an entry is unlinked from list. - /// - /// # Safety - /// - /// The caller has to guarantee that the `Entry` is called with was retrieved from an instance - /// of the element type (`T`). - unsafe fn finalize(_: &Entry, _: &Guard); -} - -/// A lock-free, intrusive linked list of type `T`. -#[derive(Debug)] -pub(crate) struct List = T> { - /// The head of the linked list. - head: Atomic, - - /// The phantom data for using `T` and `C`. - _marker: PhantomData<(T, C)>, -} - -/// An iterator used for retrieving values from the list. -pub(crate) struct Iter<'g, T, C: IsElement> { - /// The guard that protects the iteration. - guard: &'g Guard, - - /// Pointer from the predecessor to the current entry. - pred: &'g Atomic, - - /// The current entry. - curr: Shared<'g, Entry>, - - /// The list head, needed for restarting iteration. - head: &'g Atomic, - - /// Logically, we store a borrow of an instance of `T` and - /// use the type information from `C`. - _marker: PhantomData<(&'g T, C)>, -} - -/// An error that occurs during iteration over the list. -#[derive(PartialEq, Debug)] -pub(crate) enum IterError { - /// A concurrent thread modified the state of the list at the same place that this iterator - /// was inspecting. Subsequent iteration will restart from the beginning of the list. - Stalled, -} - -impl Default for Entry { - /// Returns the empty entry. - fn default() -> Self { - Self { - next: Atomic::null(), - } - } -} - -impl Entry { - /// Marks this entry as deleted, deferring the actual deallocation to a later iteration. - /// - /// # Safety - /// - /// The entry should be a member of a linked list, and it should not have been deleted. - /// It should be safe to call `C::finalize` on the entry after the `guard` is dropped, where `C` - /// is the associated helper for the linked list. - pub(crate) unsafe fn delete(&self, guard: &Guard) { - self.next.fetch_or(1, Release, guard); - } -} - -impl> List { - /// Returns a new, empty linked list. - pub(crate) fn new() -> Self { - Self { - head: Atomic::null(), - _marker: PhantomData, - } - } - - /// Inserts `entry` into the head of the list. - /// - /// # Safety - /// - /// You should guarantee that: - /// - /// - `container` is not null - /// - `container` is immovable, e.g. inside an `Owned` - /// - the same `Entry` is not inserted more than once - /// - the inserted object will be removed before the list is dropped - pub(crate) unsafe fn insert<'g>(&'g self, container: Shared<'g, T>, guard: &'g Guard) { - // Insert right after head, i.e. at the beginning of the list. - let to = &self.head; - // Get the intrusively stored Entry of the new element to insert. - let entry: &Entry = C::entry_of(unsafe { container.deref() }); - // Make a Shared ptr to that Entry. - let entry_ptr = Shared::from(entry as *const _); - // Read the current successor of where we want to insert. - let mut next = to.load(Relaxed, guard); - - loop { - // Set the Entry of the to-be-inserted element to point to the previous successor of - // `to`. - entry.next.store(next, Relaxed); - match to.compare_exchange_weak(next, entry_ptr, Release, Relaxed, guard) { - Ok(_) => break, - // We lost the race or weak CAS failed spuriously. Update the successor and try - // again. - Err(err) => next = err.current, - } - } - } - - /// Returns an iterator over all objects. - /// - /// # Caveat - /// - /// Every object that is inserted at the moment this function is called and persists at least - /// until the end of iteration will be returned. Since this iterator traverses a lock-free - /// linked list that may be concurrently modified, some additional caveats apply: - /// - /// 1. If a new object is inserted during iteration, it may or may not be returned. - /// 2. If an object is deleted during iteration, it may or may not be returned. - /// 3. The iteration may be aborted when it lost in a race condition. In this case, the winning - /// thread will continue to iterate over the same list. - pub(crate) fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> { - Iter { - guard, - pred: &self.head, - curr: self.head.load(Acquire, guard), - head: &self.head, - _marker: PhantomData, - } - } -} - -impl> Drop for List { - fn drop(&mut self) { - unsafe { - let guard = unprotected(); - let mut curr = self.head.load(Relaxed, guard); - while let Some(c) = curr.as_ref() { - let succ = c.next.load(Relaxed, guard); - // Verify that all elements have been removed from the list. - assert_eq!(succ.tag(), 1); - - C::finalize(curr.deref(), guard); - curr = succ; - } - } - } -} - -impl<'g, T: 'g, C: IsElement> Iterator for Iter<'g, T, C> { - type Item = Result<&'g T, IterError>; - - fn next(&mut self) -> Option { - while let Some(c) = unsafe { self.curr.as_ref() } { - let succ = c.next.load(Acquire, self.guard); - - if succ.tag() == 1 { - // This entry was removed. Try unlinking it from the list. - let succ = succ.with_tag(0); - - // The tag should always be zero, because removing a node after a logically deleted - // node leaves the list in an invalid state. - debug_assert!(self.curr.tag() == 0); - - // Try to unlink `curr` from the list, and get the new value of `self.pred`. - let succ = match self - .pred - .compare_exchange(self.curr, succ, Acquire, Acquire, self.guard) - { - Ok(_) => { - // We succeeded in unlinking `curr`, so we have to schedule - // deallocation. Deferred drop is okay, because `list.delete()` can only be - // called if `T: 'static`. - unsafe { - C::finalize(self.curr.deref(), self.guard); - } - - // `succ` is the new value of `self.pred`. - succ - } - Err(e) => { - // `e.current` is the current value of `self.pred`. - e.current - } - }; - - // If the predecessor node is already marked as deleted, we need to restart from - // `head`. - if succ.tag() != 0 { - self.pred = self.head; - self.curr = self.head.load(Acquire, self.guard); - - return Some(Err(IterError::Stalled)); - } - - // Move over the removed by only advancing `curr`, not `pred`. - self.curr = succ; - continue; - } - - // Move one step forward. - self.pred = &c.next; - self.curr = succ; - - return Some(Ok(unsafe { C::element_of(c) })); - } - - // We reached the end of the list. - None - } -} - -#[cfg(all(test, not(crossbeam_loom)))] -mod tests { - use super::*; - use crate::{Collector, Owned}; - use crossbeam_utils::thread; - use std::sync::Barrier; - use std::vec::Vec; - - impl IsElement for Entry { - fn entry_of(entry: &Self) -> &Entry { - entry - } - - unsafe fn element_of(entry: &Entry) -> &Self { - entry - } - - unsafe fn finalize(entry: &Entry, guard: &Guard) { - unsafe { guard.defer_destroy(Shared::from(Self::element_of(entry) as *const _)) } - } - } - - /// Checks whether the list retains inserted elements - /// and returns them in the correct order. - #[test] - fn insert() { - let collector = Collector::new(); - let handle = collector.register(); - let guard = handle.pin(); - - let l: List = List::new(); - - let e1 = Owned::new(Entry::default()).into_shared(&guard); - let e2 = Owned::new(Entry::default()).into_shared(&guard); - let e3 = Owned::new(Entry::default()).into_shared(&guard); - - unsafe { - l.insert(e1, &guard); - l.insert(e2, &guard); - l.insert(e3, &guard); - } - - let mut iter = l.iter(&guard); - let maybe_e3 = iter.next(); - assert!(maybe_e3.is_some()); - assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw()); - let maybe_e2 = iter.next(); - assert!(maybe_e2.is_some()); - assert!(maybe_e2.unwrap().unwrap() as *const Entry == e2.as_raw()); - let maybe_e1 = iter.next(); - assert!(maybe_e1.is_some()); - assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw()); - assert!(iter.next().is_none()); - - unsafe { - e1.as_ref().unwrap().delete(&guard); - e2.as_ref().unwrap().delete(&guard); - e3.as_ref().unwrap().delete(&guard); - } - } - - /// Checks whether elements can be removed from the list and whether - /// the correct elements are removed. - #[test] - fn delete() { - let collector = Collector::new(); - let handle = collector.register(); - let guard = handle.pin(); - - let l: List = List::new(); - - let e1 = Owned::new(Entry::default()).into_shared(&guard); - let e2 = Owned::new(Entry::default()).into_shared(&guard); - let e3 = Owned::new(Entry::default()).into_shared(&guard); - unsafe { - l.insert(e1, &guard); - l.insert(e2, &guard); - l.insert(e3, &guard); - e2.as_ref().unwrap().delete(&guard); - } - - let mut iter = l.iter(&guard); - let maybe_e3 = iter.next(); - assert!(maybe_e3.is_some()); - assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw()); - let maybe_e1 = iter.next(); - assert!(maybe_e1.is_some()); - assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw()); - assert!(iter.next().is_none()); - - unsafe { - e1.as_ref().unwrap().delete(&guard); - e3.as_ref().unwrap().delete(&guard); - } - - let mut iter = l.iter(&guard); - assert!(iter.next().is_none()); - } - - const THREADS: usize = 8; - const ITERS: usize = 512; - - /// Contends the list on insert and delete operations to make sure they can run concurrently. - #[test] - fn insert_delete_multi() { - let collector = Collector::new(); - - let l: List = List::new(); - let b = Barrier::new(THREADS); - - thread::scope(|s| { - for _ in 0..THREADS { - s.spawn(|_| { - b.wait(); - - let handle = collector.register(); - let guard: Guard = handle.pin(); - let mut v = Vec::with_capacity(ITERS); - - for _ in 0..ITERS { - let e = Owned::new(Entry::default()).into_shared(&guard); - v.push(e); - unsafe { - l.insert(e, &guard); - } - } - - for e in v { - unsafe { - e.as_ref().unwrap().delete(&guard); - } - } - }); - } - }) - .unwrap(); - - let handle = collector.register(); - let guard = handle.pin(); - - let mut iter = l.iter(&guard); - assert!(iter.next().is_none()); - } - - /// Contends the list on iteration to make sure that it can be iterated over concurrently. - #[test] - fn iter_multi() { - let collector = Collector::new(); - - let l: List = List::new(); - let b = Barrier::new(THREADS); - - thread::scope(|s| { - for _ in 0..THREADS { - s.spawn(|_| { - b.wait(); - - let handle = collector.register(); - let guard: Guard = handle.pin(); - let mut v = Vec::with_capacity(ITERS); - - for _ in 0..ITERS { - let e = Owned::new(Entry::default()).into_shared(&guard); - v.push(e); - unsafe { - l.insert(e, &guard); - } - } - - let mut iter = l.iter(&guard); - for _ in 0..ITERS { - assert!(iter.next().is_some()); - } - - for e in v { - unsafe { - e.as_ref().unwrap().delete(&guard); - } - } - }); - } - }) - .unwrap(); - - let handle = collector.register(); - let guard = handle.pin(); - - let mut iter = l.iter(&guard); - assert!(iter.next().is_none()); - } -} diff --git a/crossbeam-epoch/src/sync/mod.rs b/crossbeam-epoch/src/sync/mod.rs deleted file mode 100644 index 08981be25..000000000 --- a/crossbeam-epoch/src/sync/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Synchronization primitives. - -pub(crate) mod list; -#[cfg(feature = "std")] -#[cfg(not(crossbeam_loom))] -pub(crate) mod once_lock; -pub(crate) mod queue; diff --git a/crossbeam-epoch/src/sync/once_lock.rs b/crossbeam-epoch/src/sync/once_lock.rs deleted file mode 120000 index 6e7443034..000000000 --- a/crossbeam-epoch/src/sync/once_lock.rs +++ /dev/null @@ -1 +0,0 @@ -../../../crossbeam-utils/src/sync/once_lock.rs \ No newline at end of file diff --git a/crossbeam-epoch/src/sync/queue.rs b/crossbeam-epoch/src/sync/queue.rs deleted file mode 100644 index bc020e9a1..000000000 --- a/crossbeam-epoch/src/sync/queue.rs +++ /dev/null @@ -1,469 +0,0 @@ -//! Michael-Scott lock-free queue. -//! -//! Usable with any number of producers and consumers. -//! -//! Michael and Scott. Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue -//! Algorithms. PODC 1996. -//! -//! Simon Doherty, Lindsay Groves, Victor Luchangco, and Mark Moir. 2004b. Formal Verification of a -//! Practical Lock-Free Queue Algorithm. - -use core::mem::MaybeUninit; -use core::sync::atomic::Ordering::{Acquire, Relaxed, Release}; - -use crossbeam_utils::CachePadded; - -use crate::{unprotected, Atomic, Guard, Owned, Shared}; - -// The representation here is a singly-linked list, with a sentinel node at the front. In general -// the `tail` pointer may lag behind the actual tail. Non-sentinel nodes are either all `Data` or -// all `Blocked` (requests for data from blocked threads). -#[derive(Debug)] -pub(crate) struct Queue { - head: CachePadded>>, - tail: CachePadded>>, -} - -struct Node { - /// The slot in which a value of type `T` can be stored. - /// - /// The type of `data` is `MaybeUninit` because a `Node` doesn't always contain a `T`. - /// For example, the sentinel node in a queue never contains a value: its slot is always empty. - /// Other nodes start their life with a push operation and contain a value until it gets popped - /// out. After that such empty nodes get added to the collector for destruction. - data: MaybeUninit, - - next: Atomic>, -} - -// Any particular `T` should never be accessed concurrently, so no need for `Sync`. -unsafe impl Sync for Queue {} -unsafe impl Send for Queue {} - -impl Queue { - /// Create a new, empty queue. - pub(crate) fn new() -> Self { - let q = Self { - head: CachePadded::new(Atomic::null()), - tail: CachePadded::new(Atomic::null()), - }; - let sentinel = Owned::new(Node { - data: MaybeUninit::uninit(), - next: Atomic::null(), - }); - unsafe { - let guard = unprotected(); - let sentinel = sentinel.into_shared(guard); - q.head.store(sentinel, Relaxed); - q.tail.store(sentinel, Relaxed); - q - } - } - - /// Attempts to atomically place `n` into the `next` pointer of `onto`, and returns `true` on - /// success. The queue's `tail` pointer may be updated. - #[inline(always)] - fn push_internal( - &self, - onto: Shared<'_, Node>, - new: Shared<'_, Node>, - guard: &Guard, - ) -> bool { - // is `onto` the actual tail? - let o = unsafe { onto.deref() }; - let next = o.next.load(Acquire, guard); - if unsafe { next.as_ref().is_some() } { - // if not, try to "help" by moving the tail pointer forward - let _ = self - .tail - .compare_exchange(onto, next, Release, Relaxed, guard); - false - } else { - // looks like the actual tail; attempt to link in `n` - let result = o - .next - .compare_exchange(Shared::null(), new, Release, Relaxed, guard) - .is_ok(); - if result { - // try to move the tail pointer forward - let _ = self - .tail - .compare_exchange(onto, new, Release, Relaxed, guard); - } - result - } - } - - /// Adds `t` to the back of the queue, possibly waking up threads blocked on `pop`. - pub(crate) fn push(&self, t: T, guard: &Guard) { - let new = Owned::new(Node { - data: MaybeUninit::new(t), - next: Atomic::null(), - }); - let new = Owned::into_shared(new, guard); - - loop { - // We push onto the tail, so we'll start optimistically by looking there first. - let tail = self.tail.load(Acquire, guard); - - // Attempt to push onto the `tail` snapshot; fails if `tail.next` has changed. - if self.push_internal(tail, new, guard) { - break; - } - } - } - - /// Attempts to pop a data node. `Ok(None)` if queue is empty; `Err(())` if lost race to pop. - #[inline(always)] - fn pop_internal(&self, guard: &Guard) -> Result, ()> { - let head = self.head.load(Acquire, guard); - let h = unsafe { head.deref() }; - let next = h.next.load(Acquire, guard); - match unsafe { next.as_ref() } { - Some(n) => unsafe { - self.head - .compare_exchange(head, next, Release, Relaxed, guard) - .map(|_| { - let tail = self.tail.load(Relaxed, guard); - // Advance the tail so that we don't retire a pointer to a reachable node. - if head == tail { - let _ = self - .tail - .compare_exchange(tail, next, Release, Relaxed, guard); - } - guard.defer_destroy(head); - Some(n.data.assume_init_read()) - }) - .map_err(|_| ()) - }, - None => Ok(None), - } - } - - /// Attempts to pop a data node, if the data satisfies the given condition. `Ok(None)` if queue - /// is empty or the data does not satisfy the condition; `Err(())` if lost race to pop. - #[inline(always)] - fn pop_if_internal(&self, condition: F, guard: &Guard) -> Result, ()> - where - T: Sync, - F: Fn(&T) -> bool, - { - let head = self.head.load(Acquire, guard); - let h = unsafe { head.deref() }; - let next = h.next.load(Acquire, guard); - match unsafe { next.as_ref() } { - Some(n) if condition(unsafe { &*n.data.as_ptr() }) => unsafe { - self.head - .compare_exchange(head, next, Release, Relaxed, guard) - .map(|_| { - let tail = self.tail.load(Relaxed, guard); - // Advance the tail so that we don't retire a pointer to a reachable node. - if head == tail { - let _ = self - .tail - .compare_exchange(tail, next, Release, Relaxed, guard); - } - guard.defer_destroy(head); - Some(n.data.assume_init_read()) - }) - .map_err(|_| ()) - }, - None | Some(_) => Ok(None), - } - } - - /// Attempts to dequeue from the front. - /// - /// Returns `None` if the queue is observed to be empty. - pub(crate) fn try_pop(&self, guard: &Guard) -> Option { - loop { - if let Ok(head) = self.pop_internal(guard) { - return head; - } - } - } - - /// Attempts to dequeue from the front, if the item satisfies the given condition. - /// - /// Returns `None` if the queue is observed to be empty, or the head does not satisfy the given - /// condition. - pub(crate) fn try_pop_if(&self, condition: F, guard: &Guard) -> Option - where - T: Sync, - F: Fn(&T) -> bool, - { - loop { - if let Ok(head) = self.pop_if_internal(&condition, guard) { - return head; - } - } - } -} - -impl Drop for Queue { - fn drop(&mut self) { - unsafe { - let guard = unprotected(); - - while self.try_pop(guard).is_some() {} - - // Destroy the remaining sentinel node. - let sentinel = self.head.load(Relaxed, guard); - drop(sentinel.into_owned()); - } - } -} - -#[cfg(all(test, not(crossbeam_loom)))] -mod test { - use super::*; - use crate::pin; - use crossbeam_utils::thread; - use std::vec; - - struct Queue { - queue: super::Queue, - } - - impl Queue { - pub(crate) fn new() -> Self { - Self { - queue: super::Queue::new(), - } - } - - pub(crate) fn push(&self, t: T) { - let guard = &pin(); - self.queue.push(t, guard); - } - - pub(crate) fn is_empty(&self) -> bool { - let guard = &pin(); - let head = self.queue.head.load(Acquire, guard); - let h = unsafe { head.deref() }; - h.next.load(Acquire, guard).is_null() - } - - pub(crate) fn try_pop(&self) -> Option { - let guard = &pin(); - self.queue.try_pop(guard) - } - - pub(crate) fn pop(&self) -> T { - loop { - match self.try_pop() { - None => continue, - Some(t) => return t, - } - } - } - } - - #[cfg(miri)] - const CONC_COUNT: i64 = 1000; - #[cfg(not(miri))] - const CONC_COUNT: i64 = 1000000; - - #[test] - fn push_try_pop_1() { - let q: Queue = Queue::new(); - assert!(q.is_empty()); - q.push(37); - assert!(!q.is_empty()); - assert_eq!(q.try_pop(), Some(37)); - assert!(q.is_empty()); - } - - #[test] - fn push_try_pop_2() { - let q: Queue = Queue::new(); - assert!(q.is_empty()); - q.push(37); - q.push(48); - assert_eq!(q.try_pop(), Some(37)); - assert!(!q.is_empty()); - assert_eq!(q.try_pop(), Some(48)); - assert!(q.is_empty()); - } - - #[test] - fn push_try_pop_many_seq() { - let q: Queue = Queue::new(); - assert!(q.is_empty()); - for i in 0..200 { - q.push(i) - } - assert!(!q.is_empty()); - for i in 0..200 { - assert_eq!(q.try_pop(), Some(i)); - } - assert!(q.is_empty()); - } - - #[test] - fn push_pop_1() { - let q: Queue = Queue::new(); - assert!(q.is_empty()); - q.push(37); - assert!(!q.is_empty()); - assert_eq!(q.pop(), 37); - assert!(q.is_empty()); - } - - #[test] - fn push_pop_2() { - let q: Queue = Queue::new(); - q.push(37); - q.push(48); - assert_eq!(q.pop(), 37); - assert_eq!(q.pop(), 48); - } - - #[test] - fn push_pop_many_seq() { - let q: Queue = Queue::new(); - assert!(q.is_empty()); - for i in 0..200 { - q.push(i) - } - assert!(!q.is_empty()); - for i in 0..200 { - assert_eq!(q.pop(), i); - } - assert!(q.is_empty()); - } - - #[test] - fn push_try_pop_many_spsc() { - let q: Queue = Queue::new(); - assert!(q.is_empty()); - - thread::scope(|scope| { - scope.spawn(|_| { - let mut next = 0; - - while next < CONC_COUNT { - if let Some(elem) = q.try_pop() { - assert_eq!(elem, next); - next += 1; - } - } - }); - - for i in 0..CONC_COUNT { - q.push(i) - } - }) - .unwrap(); - } - - #[test] - fn push_try_pop_many_spmc() { - fn recv(_t: i32, q: &Queue) { - let mut cur = -1; - for _i in 0..CONC_COUNT { - if let Some(elem) = q.try_pop() { - assert!(elem > cur); - cur = elem; - - if cur == CONC_COUNT - 1 { - break; - } - } - } - } - - let q: Queue = Queue::new(); - assert!(q.is_empty()); - thread::scope(|scope| { - for i in 0..3 { - let q = &q; - scope.spawn(move |_| recv(i, q)); - } - - scope.spawn(|_| { - for i in 0..CONC_COUNT { - q.push(i); - } - }); - }) - .unwrap(); - } - - #[test] - fn push_try_pop_many_mpmc() { - enum LR { - Left(i64), - Right(i64), - } - - let q: Queue = Queue::new(); - assert!(q.is_empty()); - - thread::scope(|scope| { - for _t in 0..2 { - scope.spawn(|_| { - for i in CONC_COUNT - 1..CONC_COUNT { - q.push(LR::Left(i)) - } - }); - scope.spawn(|_| { - for i in CONC_COUNT - 1..CONC_COUNT { - q.push(LR::Right(i)) - } - }); - scope.spawn(|_| { - let mut vl = vec![]; - let mut vr = vec![]; - for _i in 0..CONC_COUNT { - match q.try_pop() { - Some(LR::Left(x)) => vl.push(x), - Some(LR::Right(x)) => vr.push(x), - _ => {} - } - } - - let mut vl2 = vl.clone(); - let mut vr2 = vr.clone(); - vl2.sort_unstable(); - vr2.sort_unstable(); - - assert_eq!(vl, vl2); - assert_eq!(vr, vr2); - }); - } - }) - .unwrap(); - } - - #[test] - fn push_pop_many_spsc() { - let q: Queue = Queue::new(); - - thread::scope(|scope| { - scope.spawn(|_| { - let mut next = 0; - while next < CONC_COUNT { - assert_eq!(q.pop(), next); - next += 1; - } - }); - - for i in 0..CONC_COUNT { - q.push(i) - } - }) - .unwrap(); - assert!(q.is_empty()); - } - - #[test] - fn is_empty_dont_pop() { - let q: Queue = Queue::new(); - q.push(20); - q.push(20); - assert!(!q.is_empty()); - assert!(!q.is_empty()); - assert!(q.try_pop().is_some()); - } -} diff --git a/crossbeam-epoch/tests/loom.rs b/crossbeam-epoch/tests/loom.rs deleted file mode 100644 index b56531844..000000000 --- a/crossbeam-epoch/tests/loom.rs +++ /dev/null @@ -1,157 +0,0 @@ -#![cfg(crossbeam_loom)] - -use crossbeam_epoch as epoch; -use loom_crate as loom; - -use epoch::*; -use epoch::{Atomic, Owned}; -use loom::sync::atomic::Ordering::{self, Acquire, Relaxed, Release}; -use loom::sync::Arc; -use loom::thread::spawn; -use std::mem::ManuallyDrop; -use std::ptr; - -#[test] -fn it_works() { - loom::model(|| { - let collector = Collector::new(); - let item: Atomic = Atomic::from(Owned::new(String::from("boom"))); - let item2 = item.clone(); - let collector2 = collector.clone(); - let guard = collector.register().pin(); - - let jh = loom::thread::spawn(move || { - let guard = collector2.register().pin(); - guard.defer(move || { - // this isn't really safe, since other threads may still have pointers to the - // value, but in this limited test scenario it's okay, since we know the test won't - // access item after all the pins are released. - let mut item = unsafe { item2.into_owned() }; - // mutate it as a second measure to make sure the assert_eq below would fail - item.retain(|c| c == 'o'); - drop(item); - }); - }); - - let item = item.load(Ordering::SeqCst, &guard); - // we pinned strictly before the call to defer_destroy, - // so item cannot have been dropped yet - assert_eq!(*unsafe { item.deref() }, "boom"); - drop(guard); - - jh.join().unwrap(); - - drop(collector); - }) -} - -#[test] -fn treiber_stack() { - /// Treiber's lock-free stack. - /// - /// Usable with any number of producers and consumers. - #[derive(Debug)] - struct TreiberStack { - head: Atomic>, - } - - #[derive(Debug)] - struct Node { - data: ManuallyDrop, - next: Atomic>, - } - - impl TreiberStack { - /// Creates a new, empty stack. - fn new() -> Self { - Self { - head: Atomic::null(), - } - } - - /// Pushes a value on top of the stack. - fn push(&self, t: T) { - let mut n = Owned::new(Node { - data: ManuallyDrop::new(t), - next: Atomic::null(), - }); - - let guard = epoch::pin(); - - loop { - let head = self.head.load(Relaxed, &guard); - n.next.store(head, Relaxed); - - match self - .head - .compare_exchange(head, n, Release, Relaxed, &guard) - { - Ok(_) => break, - Err(e) => n = e.new, - } - } - } - - /// Attempts to pop the top element from the stack. - /// - /// Returns `None` if the stack is empty. - fn pop(&self) -> Option { - let guard = epoch::pin(); - loop { - let head = self.head.load(Acquire, &guard); - - match unsafe { head.as_ref() } { - Some(h) => { - let next = h.next.load(Relaxed, &guard); - - if self - .head - .compare_exchange(head, next, Relaxed, Relaxed, &guard) - .is_ok() - { - unsafe { - guard.defer_destroy(head); - return Some(ManuallyDrop::into_inner(ptr::read(&(*h).data))); - } - } - } - None => return None, - } - } - } - - /// Returns `true` if the stack is empty. - fn is_empty(&self) -> bool { - let guard = epoch::pin(); - self.head.load(Acquire, &guard).is_null() - } - } - - impl Drop for TreiberStack { - fn drop(&mut self) { - while self.pop().is_some() {} - } - } - - loom::model(|| { - let stack1 = Arc::new(TreiberStack::new()); - let stack2 = Arc::clone(&stack1); - - // use 5 since it's greater than the 4 used for the sanitize feature - let jh = spawn(move || { - for i in 0..5 { - stack2.push(i); - assert!(stack2.pop().is_some()); - } - }); - - for i in 0..5 { - stack1.push(i); - assert!(stack1.pop().is_some()); - } - - jh.join().unwrap(); - assert!(stack1.pop().is_none()); - assert!(stack1.is_empty()); - }); -} diff --git a/crossbeam-queue/CHANGELOG.md b/crossbeam-queue/CHANGELOG.md deleted file mode 100644 index e19a63926..000000000 --- a/crossbeam-queue/CHANGELOG.md +++ /dev/null @@ -1,88 +0,0 @@ -# Version 0.3.12 - -- Fix stack overflow when pushing large value to `SegQueue`. (#1146, #1147, #1159) - -# Version 0.3.11 - -- Remove dependency on `cfg-if`. (#1072) - -# Version 0.3.10 - -- Relax the minimum supported Rust version to 1.60. (#1056) -- Implement `UnwindSafe` and `RefUnwindSafe` for `ArrayQueue` and `SegQueue`. (#1053) -- Optimize `Drop` implementation of `ArrayQueue`. (#1057) - -# Version 0.3.9 - -- Bump the minimum supported Rust version to 1.61. (#1037) -- Improve support for targets without atomic CAS. (#1037) -- Remove build script. (#1037) - -# Version 0.3.8 - -- Fix build script bug introduced in 0.3.7. (#932) - -# Version 0.3.7 - -**Note:** This release has been yanked due to regression fixed in 0.3.8. - -- Improve support for custom targets. (#922) - -# Version 0.3.6 - -- Bump the minimum supported Rust version to 1.38. (#877) - -# Version 0.3.5 - -- Add `ArrayQueue::force_push`. (#789) - -# Version 0.3.4 - -- Implement `IntoIterator` for `ArrayQueue` and `SegQueue`. (#772) - -# Version 0.3.3 - -- Fix stacked borrows violation in `ArrayQueue` when `-Zmiri-tag-raw-pointers` is enabled. (#763) - -# Version 0.3.2 - -- Support targets that do not have atomic CAS on stable Rust. (#698) - -# Version 0.3.1 - -- Make `SegQueue::new` const fn. (#584) -- Change license to "MIT OR Apache-2.0". - -# Version 0.3.0 - -- Bump the minimum supported Rust version to 1.36. -- Remove `PushError` and `PopError`. - -# Version 0.2.3 - -- Fix bug in release (yanking 0.2.2) - -# Version 0.2.2 - -- Fix unsoundness issues by adopting `MaybeUninit`. (#458) - -# Version 0.2.1 - -- Add `no_std` support. - -# Version 0.2.0 - -- Bump the minimum required version to 1.28. -- Bump `crossbeam-utils` to `0.7`. - -# Version 0.1.2 - -- Update `crossbeam-utils` to `0.6.5`. - -# Version 0.1.1 - -- Update `crossbeam-utils` to `0.6.4`. - -# Version 0.1.0 - -- Initial version with `ArrayQueue` and `SegQueue`. diff --git a/crossbeam-queue/Cargo.toml b/crossbeam-queue/Cargo.toml deleted file mode 100644 index b3d268f13..000000000 --- a/crossbeam-queue/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "crossbeam-queue" -# When publishing a new version: -# - Update CHANGELOG.md -# - Update README.md (when increasing major or minor version) -# - Run './tools/publish.sh crossbeam-queue ' -version = "0.3.12" -edition = "2021" -rust-version = "1.60" -license = "MIT OR Apache-2.0" -repository = "https://github.com/crossbeam-rs/crossbeam" -homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-queue" -description = "Concurrent queues" -keywords = ["queue", "mpmc", "lock-free", "producer", "consumer"] -categories = ["concurrency", "data-structures", "no-std"] - -[features] -default = ["std"] - -# Enable to use APIs that require `std`. -# This is enabled by default. -std = ["alloc", "crossbeam-utils/std"] - -# Enable to use APIs that require `alloc`. -# This is enabled by default and also enabled if the `std` feature is enabled. -# -# NOTE: Disabling both `std` *and* `alloc` features is not supported yet. -alloc = [] - -[dependencies] -crossbeam-utils = { version = "0.8.18", path = "../crossbeam-utils", default-features = false } - -[dev-dependencies] -rand = "0.8" - -[lints] -workspace = true diff --git a/crossbeam-queue/LICENSE-APACHE b/crossbeam-queue/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/crossbeam-queue/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/crossbeam-queue/LICENSE-MIT b/crossbeam-queue/LICENSE-MIT deleted file mode 100644 index 068d491fd..000000000 --- a/crossbeam-queue/LICENSE-MIT +++ /dev/null @@ -1,27 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2019 The Crossbeam Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/crossbeam-queue/README.md b/crossbeam-queue/README.md deleted file mode 100644 index fff84dedf..000000000 --- a/crossbeam-queue/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# Crossbeam Queue - -[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)]( -https://github.com/crossbeam-rs/crossbeam/actions) -[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)]( -https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-queue#license) -[![Cargo](https://img.shields.io/crates/v/crossbeam-queue.svg)]( -https://crates.io/crates/crossbeam-queue) -[![Documentation](https://docs.rs/crossbeam-queue/badge.svg)]( -https://docs.rs/crossbeam-queue) -[![Rust 1.60+](https://img.shields.io/badge/rust-1.60+-lightgray.svg)]( -https://www.rust-lang.org) -[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ) - -This crate provides concurrent queues that can be shared among threads: - -* [`ArrayQueue`], a bounded MPMC queue that allocates a fixed-capacity buffer on construction. -* [`SegQueue`], an unbounded MPMC queue that allocates small buffers, segments, on demand. - -Everything in this crate can be used in `no_std` environments, provided that `alloc` feature is -enabled. - -[`ArrayQueue`]: https://docs.rs/crossbeam-queue/latest/crossbeam_queue/struct.ArrayQueue.html -[`SegQueue`]: https://docs.rs/crossbeam-queue/latest/crossbeam_queue/struct.SegQueue.html - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -crossbeam-queue = "0.3" -``` - -## Compatibility - -Crossbeam Queue supports stable Rust releases going back at least six months, -and every time the minimum supported Rust version is increased, a new minor -version is released. Currently, the minimum supported Rust version is 1.60. - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. - -#### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff --git a/crossbeam-queue/src/array_queue.rs b/crossbeam-queue/src/array_queue.rs deleted file mode 100644 index 52ea05541..000000000 --- a/crossbeam-queue/src/array_queue.rs +++ /dev/null @@ -1,538 +0,0 @@ -//! The implementation is based on Dmitry Vyukov's bounded MPMC queue. -//! -//! Source: -//! - - -use alloc::boxed::Box; -use core::cell::UnsafeCell; -use core::fmt; -use core::mem::{self, MaybeUninit}; -use core::panic::{RefUnwindSafe, UnwindSafe}; -use core::sync::atomic::{self, AtomicUsize, Ordering}; - -use crossbeam_utils::{Backoff, CachePadded}; - -/// A slot in a queue. -struct Slot { - /// The current stamp. - /// - /// If the stamp equals the tail, this node will be next written to. If it equals head + 1, - /// this node will be next read from. - stamp: AtomicUsize, - - /// The value in this slot. - value: UnsafeCell>, -} - -/// A bounded multi-producer multi-consumer queue. -/// -/// This queue allocates a fixed-capacity buffer on construction, which is used to store pushed -/// elements. The queue cannot hold more elements than the buffer allows. Attempting to push an -/// element into a full queue will fail. Alternatively, [`force_push`] makes it possible for -/// this queue to be used as a ring-buffer. Having a buffer allocated upfront makes this queue -/// a bit faster than [`SegQueue`]. -/// -/// [`force_push`]: ArrayQueue::force_push -/// [`SegQueue`]: super::SegQueue -/// -/// # Examples -/// -/// ``` -/// use crossbeam_queue::ArrayQueue; -/// -/// let q = ArrayQueue::new(2); -/// -/// assert_eq!(q.push('a'), Ok(())); -/// assert_eq!(q.push('b'), Ok(())); -/// assert_eq!(q.push('c'), Err('c')); -/// assert_eq!(q.pop(), Some('a')); -/// ``` -pub struct ArrayQueue { - /// The head of the queue. - /// - /// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a - /// single `usize`. The lower bits represent the index, while the upper bits represent the lap. - /// - /// Elements are popped from the head of the queue. - head: CachePadded, - - /// The tail of the queue. - /// - /// This value is a "stamp" consisting of an index into the buffer and a lap, but packed into a - /// single `usize`. The lower bits represent the index, while the upper bits represent the lap. - /// - /// Elements are pushed into the tail of the queue. - tail: CachePadded, - - /// The buffer holding slots. - buffer: Box<[Slot]>, - - /// A stamp with the value of `{ lap: 1, index: 0 }`. - one_lap: usize, -} - -unsafe impl Sync for ArrayQueue {} -unsafe impl Send for ArrayQueue {} - -impl UnwindSafe for ArrayQueue {} -impl RefUnwindSafe for ArrayQueue {} - -impl ArrayQueue { - /// Creates a new bounded queue with the given capacity. - /// - /// # Panics - /// - /// Panics if the capacity is zero. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::ArrayQueue; - /// - /// let q = ArrayQueue::::new(100); - /// ``` - pub fn new(cap: usize) -> Self { - assert!(cap > 0, "capacity must be non-zero"); - - // Head is initialized to `{ lap: 0, index: 0 }`. - // Tail is initialized to `{ lap: 0, index: 0 }`. - let head = 0; - let tail = 0; - - // Allocate a buffer of `cap` slots initialized - // with stamps. - let buffer: Box<[Slot]> = (0..cap) - .map(|i| { - // Set the stamp to `{ lap: 0, index: i }`. - Slot { - stamp: AtomicUsize::new(i), - value: UnsafeCell::new(MaybeUninit::uninit()), - } - }) - .collect(); - - // One lap is the smallest power of two greater than `cap`. - let one_lap = (cap + 1).next_power_of_two(); - - Self { - buffer, - one_lap, - head: CachePadded::new(AtomicUsize::new(head)), - tail: CachePadded::new(AtomicUsize::new(tail)), - } - } - - fn push_or_else(&self, mut value: T, f: F) -> Result<(), T> - where - F: Fn(T, usize, usize, &Slot) -> Result, - { - let backoff = Backoff::new(); - let mut tail = self.tail.load(Ordering::Relaxed); - - loop { - // Deconstruct the tail. - let index = tail & (self.one_lap - 1); - let lap = tail & !(self.one_lap - 1); - - let new_tail = if index + 1 < self.capacity() { - // Same lap, incremented index. - // Set to `{ lap: lap, index: index + 1 }`. - tail + 1 - } else { - // One lap forward, index wraps around to zero. - // Set to `{ lap: lap.wrapping_add(1), index: 0 }`. - lap.wrapping_add(self.one_lap) - }; - - // Inspect the corresponding slot. - debug_assert!(index < self.buffer.len()); - let slot = unsafe { self.buffer.get_unchecked(index) }; - let stamp = slot.stamp.load(Ordering::Acquire); - - // If the tail and the stamp match, we may attempt to push. - if tail == stamp { - // Try moving the tail. - match self.tail.compare_exchange_weak( - tail, - new_tail, - Ordering::SeqCst, - Ordering::Relaxed, - ) { - Ok(_) => { - // Write the value into the slot and update the stamp. - unsafe { - slot.value.get().write(MaybeUninit::new(value)); - } - slot.stamp.store(tail + 1, Ordering::Release); - return Ok(()); - } - Err(t) => { - tail = t; - backoff.spin(); - } - } - } else if stamp.wrapping_add(self.one_lap) == tail + 1 { - atomic::fence(Ordering::SeqCst); - value = f(value, tail, new_tail, slot)?; - backoff.spin(); - tail = self.tail.load(Ordering::Relaxed); - } else { - // Snooze because we need to wait for the stamp to get updated. - backoff.snooze(); - tail = self.tail.load(Ordering::Relaxed); - } - } - } - - /// Attempts to push an element into the queue. - /// - /// If the queue is full, the element is returned back as an error. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::ArrayQueue; - /// - /// let q = ArrayQueue::new(1); - /// - /// assert_eq!(q.push(10), Ok(())); - /// assert_eq!(q.push(20), Err(20)); - /// ``` - pub fn push(&self, value: T) -> Result<(), T> { - self.push_or_else(value, |v, tail, _, _| { - let head = self.head.load(Ordering::Relaxed); - - // If the head lags one lap behind the tail as well... - if head.wrapping_add(self.one_lap) == tail { - // ...then the queue is full. - Err(v) - } else { - Ok(v) - } - }) - } - - /// Pushes an element into the queue, replacing the oldest element if necessary. - /// - /// If the queue is full, the oldest element is replaced and returned, - /// otherwise `None` is returned. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::ArrayQueue; - /// - /// let q = ArrayQueue::new(2); - /// - /// assert_eq!(q.force_push(10), None); - /// assert_eq!(q.force_push(20), None); - /// assert_eq!(q.force_push(30), Some(10)); - /// assert_eq!(q.pop(), Some(20)); - /// ``` - pub fn force_push(&self, value: T) -> Option { - self.push_or_else(value, |v, tail, new_tail, slot| { - let head = tail.wrapping_sub(self.one_lap); - let new_head = new_tail.wrapping_sub(self.one_lap); - - // Try moving the head. - if self - .head - .compare_exchange_weak(head, new_head, Ordering::SeqCst, Ordering::Relaxed) - .is_ok() - { - // Move the tail. - self.tail.store(new_tail, Ordering::SeqCst); - - // Swap the previous value. - let old = unsafe { slot.value.get().replace(MaybeUninit::new(v)).assume_init() }; - - // Update the stamp. - slot.stamp.store(tail + 1, Ordering::Release); - - Err(old) - } else { - Ok(v) - } - }) - .err() - } - - /// Attempts to pop an element from the queue. - /// - /// If the queue is empty, `None` is returned. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::ArrayQueue; - /// - /// let q = ArrayQueue::new(1); - /// assert_eq!(q.push(10), Ok(())); - /// - /// assert_eq!(q.pop(), Some(10)); - /// assert!(q.pop().is_none()); - /// ``` - pub fn pop(&self) -> Option { - let backoff = Backoff::new(); - let mut head = self.head.load(Ordering::Relaxed); - - loop { - // Deconstruct the head. - let index = head & (self.one_lap - 1); - let lap = head & !(self.one_lap - 1); - - // Inspect the corresponding slot. - debug_assert!(index < self.buffer.len()); - let slot = unsafe { self.buffer.get_unchecked(index) }; - let stamp = slot.stamp.load(Ordering::Acquire); - - // If the stamp is ahead of the head by 1, we may attempt to pop. - if head + 1 == stamp { - let new = if index + 1 < self.capacity() { - // Same lap, incremented index. - // Set to `{ lap: lap, index: index + 1 }`. - head + 1 - } else { - // One lap forward, index wraps around to zero. - // Set to `{ lap: lap.wrapping_add(1), index: 0 }`. - lap.wrapping_add(self.one_lap) - }; - - // Try moving the head. - match self.head.compare_exchange_weak( - head, - new, - Ordering::SeqCst, - Ordering::Relaxed, - ) { - Ok(_) => { - // Read the value from the slot and update the stamp. - let msg = unsafe { slot.value.get().read().assume_init() }; - slot.stamp - .store(head.wrapping_add(self.one_lap), Ordering::Release); - return Some(msg); - } - Err(h) => { - head = h; - backoff.spin(); - } - } - } else if stamp == head { - atomic::fence(Ordering::SeqCst); - let tail = self.tail.load(Ordering::Relaxed); - - // If the tail equals the head, that means the channel is empty. - if tail == head { - return None; - } - - backoff.spin(); - head = self.head.load(Ordering::Relaxed); - } else { - // Snooze because we need to wait for the stamp to get updated. - backoff.snooze(); - head = self.head.load(Ordering::Relaxed); - } - } - } - - /// Returns the capacity of the queue. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::ArrayQueue; - /// - /// let q = ArrayQueue::::new(100); - /// - /// assert_eq!(q.capacity(), 100); - /// ``` - #[inline] - pub fn capacity(&self) -> usize { - self.buffer.len() - } - - /// Returns `true` if the queue is empty. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::ArrayQueue; - /// - /// let q = ArrayQueue::new(100); - /// - /// assert!(q.is_empty()); - /// q.push(1).unwrap(); - /// assert!(!q.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - let head = self.head.load(Ordering::SeqCst); - let tail = self.tail.load(Ordering::SeqCst); - - // Is the tail lagging one lap behind head? - // Is the tail equal to the head? - // - // Note: If the head changes just before we load the tail, that means there was a moment - // when the channel was not empty, so it is safe to just return `false`. - tail == head - } - - /// Returns `true` if the queue is full. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::ArrayQueue; - /// - /// let q = ArrayQueue::new(1); - /// - /// assert!(!q.is_full()); - /// q.push(1).unwrap(); - /// assert!(q.is_full()); - /// ``` - pub fn is_full(&self) -> bool { - let tail = self.tail.load(Ordering::SeqCst); - let head = self.head.load(Ordering::SeqCst); - - // Is the head lagging one lap behind tail? - // - // Note: If the tail changes just before we load the head, that means there was a moment - // when the queue was not full, so it is safe to just return `false`. - head.wrapping_add(self.one_lap) == tail - } - - /// Returns the number of elements in the queue. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::ArrayQueue; - /// - /// let q = ArrayQueue::new(100); - /// assert_eq!(q.len(), 0); - /// - /// q.push(10).unwrap(); - /// assert_eq!(q.len(), 1); - /// - /// q.push(20).unwrap(); - /// assert_eq!(q.len(), 2); - /// ``` - pub fn len(&self) -> usize { - loop { - // Load the tail, then load the head. - let tail = self.tail.load(Ordering::SeqCst); - let head = self.head.load(Ordering::SeqCst); - - // If the tail didn't change, we've got consistent values to work with. - if self.tail.load(Ordering::SeqCst) == tail { - let hix = head & (self.one_lap - 1); - let tix = tail & (self.one_lap - 1); - - return if hix < tix { - tix - hix - } else if hix > tix { - self.capacity() - hix + tix - } else if tail == head { - 0 - } else { - self.capacity() - }; - } - } - } -} - -impl Drop for ArrayQueue { - fn drop(&mut self) { - if mem::needs_drop::() { - // Get the index of the head. - let head = *self.head.get_mut(); - let tail = *self.tail.get_mut(); - - let hix = head & (self.one_lap - 1); - let tix = tail & (self.one_lap - 1); - - let len = if hix < tix { - tix - hix - } else if hix > tix { - self.capacity() - hix + tix - } else if tail == head { - 0 - } else { - self.capacity() - }; - - // Loop over all slots that hold a message and drop them. - for i in 0..len { - // Compute the index of the next slot holding a message. - let index = if hix + i < self.capacity() { - hix + i - } else { - hix + i - self.capacity() - }; - - unsafe { - debug_assert!(index < self.buffer.len()); - let slot = self.buffer.get_unchecked_mut(index); - (*slot.value.get()).assume_init_drop(); - } - } - } - } -} - -impl fmt::Debug for ArrayQueue { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("ArrayQueue { .. }") - } -} - -impl IntoIterator for ArrayQueue { - type Item = T; - - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter { value: self } - } -} - -#[derive(Debug)] -pub struct IntoIter { - value: ArrayQueue, -} - -impl Iterator for IntoIter { - type Item = T; - - fn next(&mut self) -> Option { - let value = &mut self.value; - let head = *value.head.get_mut(); - if value.head.get_mut() != value.tail.get_mut() { - let index = head & (value.one_lap - 1); - let lap = head & !(value.one_lap - 1); - // SAFETY: We have mutable access to this, so we can read without - // worrying about concurrency. Furthermore, we know this is - // initialized because it is the value pointed at by `value.head` - // and this is a non-empty queue. - let val = unsafe { - debug_assert!(index < value.buffer.len()); - let slot = value.buffer.get_unchecked_mut(index); - slot.value.get().read().assume_init() - }; - let new = if index + 1 < value.capacity() { - // Same lap, incremented index. - // Set to `{ lap: lap, index: index + 1 }`. - head + 1 - } else { - // One lap forward, index wraps around to zero. - // Set to `{ lap: lap.wrapping_add(1), index: 0 }`. - lap.wrapping_add(value.one_lap) - }; - *value.head.get_mut() = new; - Some(val) - } else { - None - } - } -} diff --git a/crossbeam-queue/src/lib.rs b/crossbeam-queue/src/lib.rs deleted file mode 100644 index 09a9f95b8..000000000 --- a/crossbeam-queue/src/lib.rs +++ /dev/null @@ -1,29 +0,0 @@ -//! Concurrent queues. -//! -//! This crate provides concurrent queues that can be shared among threads: -//! -//! * [`ArrayQueue`], a bounded MPMC queue that allocates a fixed-capacity buffer on construction. -//! * [`SegQueue`], an unbounded MPMC queue that allocates small buffers, segments, on demand. - -#![no_std] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] -#![warn(missing_docs, unsafe_op_in_unsafe_fn)] - -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -extern crate alloc; -#[cfg(feature = "std")] -extern crate std; - -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -mod array_queue; -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -mod seg_queue; - -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -pub use crate::{array_queue::ArrayQueue, seg_queue::SegQueue}; diff --git a/crossbeam-queue/src/seg_queue.rs b/crossbeam-queue/src/seg_queue.rs deleted file mode 100644 index c9344fe42..000000000 --- a/crossbeam-queue/src/seg_queue.rs +++ /dev/null @@ -1,566 +0,0 @@ -use alloc::alloc::{alloc_zeroed, handle_alloc_error, Layout}; -use alloc::boxed::Box; -use core::cell::UnsafeCell; -use core::fmt; -use core::marker::PhantomData; -use core::mem::MaybeUninit; -use core::panic::{RefUnwindSafe, UnwindSafe}; -use core::ptr; -use core::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering}; - -use crossbeam_utils::{Backoff, CachePadded}; - -// Bits indicating the state of a slot: -// * If a value has been written into the slot, `WRITE` is set. -// * If a value has been read from the slot, `READ` is set. -// * If the block is being destroyed, `DESTROY` is set. -const WRITE: usize = 1; -const READ: usize = 2; -const DESTROY: usize = 4; - -// Each block covers one "lap" of indices. -const LAP: usize = 32; -// The maximum number of values a block can hold. -const BLOCK_CAP: usize = LAP - 1; -// How many lower bits are reserved for metadata. -const SHIFT: usize = 1; -// Indicates that the block is not the last one. -const HAS_NEXT: usize = 1; - -/// A slot in a block. -struct Slot { - /// The value. - value: UnsafeCell>, - - /// The state of the slot. - state: AtomicUsize, -} - -impl Slot { - /// Waits until a value is written into the slot. - fn wait_write(&self) { - let backoff = Backoff::new(); - while self.state.load(Ordering::Acquire) & WRITE == 0 { - backoff.snooze(); - } - } -} - -/// A block in a linked list. -/// -/// Each block in the list can hold up to `BLOCK_CAP` values. -struct Block { - /// The next block in the linked list. - next: AtomicPtr>, - - /// Slots for values. - slots: [Slot; BLOCK_CAP], -} - -impl Block { - const LAYOUT: Layout = { - let layout = Layout::new::(); - assert!( - layout.size() != 0, - "Block should never be zero-sized, as it has an AtomicPtr field" - ); - layout - }; - - /// Creates an empty block. - fn new() -> Box { - // SAFETY: layout is not zero-sized - let ptr = unsafe { alloc_zeroed(Self::LAYOUT) }; - // Handle allocation failure - if ptr.is_null() { - handle_alloc_error(Self::LAYOUT) - } - // SAFETY: This is safe because: - // [1] `Block::next` (AtomicPtr) may be safely zero initialized. - // [2] `Block::slots` (Array) may be safely zero initialized because of [3, 4]. - // [3] `Slot::value` (UnsafeCell) may be safely zero initialized because it - // holds a MaybeUninit. - // [4] `Slot::state` (AtomicUsize) may be safely zero initialized. - // TODO: unsafe { Box::new_zeroed().assume_init() } - unsafe { Box::from_raw(ptr.cast()) } - } - - /// Waits until the next pointer is set. - fn wait_next(&self) -> *mut Self { - let backoff = Backoff::new(); - loop { - let next = self.next.load(Ordering::Acquire); - if !next.is_null() { - return next; - } - backoff.snooze(); - } - } - - /// Sets the `DESTROY` bit in slots starting from `start` and destroys the block. - unsafe fn destroy(this: *mut Self, start: usize) { - // It is not necessary to set the `DESTROY` bit in the last slot because that slot has - // begun destruction of the block. - for i in start..BLOCK_CAP - 1 { - let slot = unsafe { (*this).slots.get_unchecked(i) }; - - // Mark the `DESTROY` bit if a thread is still using the slot. - if slot.state.load(Ordering::Acquire) & READ == 0 - && slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0 - { - // If a thread is still using the slot, it will continue destruction of the block. - return; - } - } - - // No thread is using the block, now it is safe to destroy it. - drop(unsafe { Box::from_raw(this) }); - } -} - -/// A position in a queue. -struct Position { - /// The index in the queue. - index: AtomicUsize, - - /// The block in the linked list. - block: AtomicPtr>, -} - -/// An unbounded multi-producer multi-consumer queue. -/// -/// This queue is implemented as a linked list of segments, where each segment is a small buffer -/// that can hold a handful of elements. There is no limit to how many elements can be in the queue -/// at a time. However, since segments need to be dynamically allocated as elements get pushed, -/// this queue is somewhat slower than [`ArrayQueue`]. -/// -/// [`ArrayQueue`]: super::ArrayQueue -/// -/// # Examples -/// -/// ``` -/// use crossbeam_queue::SegQueue; -/// -/// let q = SegQueue::new(); -/// -/// q.push('a'); -/// q.push('b'); -/// -/// assert_eq!(q.pop(), Some('a')); -/// assert_eq!(q.pop(), Some('b')); -/// assert!(q.pop().is_none()); -/// ``` -pub struct SegQueue { - /// The head of the queue. - head: CachePadded>, - - /// The tail of the queue. - tail: CachePadded>, - - /// Indicates that dropping a `SegQueue` may drop values of type `T`. - _marker: PhantomData, -} - -unsafe impl Send for SegQueue {} -unsafe impl Sync for SegQueue {} - -impl UnwindSafe for SegQueue {} -impl RefUnwindSafe for SegQueue {} - -impl SegQueue { - /// Creates a new unbounded queue. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::SegQueue; - /// - /// let q = SegQueue::::new(); - /// ``` - pub const fn new() -> Self { - Self { - head: CachePadded::new(Position { - block: AtomicPtr::new(ptr::null_mut()), - index: AtomicUsize::new(0), - }), - tail: CachePadded::new(Position { - block: AtomicPtr::new(ptr::null_mut()), - index: AtomicUsize::new(0), - }), - _marker: PhantomData, - } - } - - /// Pushes back an element to the tail. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::SegQueue; - /// - /// let q = SegQueue::new(); - /// - /// q.push(10); - /// q.push(20); - /// ``` - pub fn push(&self, value: T) { - let backoff = Backoff::new(); - let mut tail = self.tail.index.load(Ordering::Acquire); - let mut block = self.tail.block.load(Ordering::Acquire); - let mut next_block = None; - - loop { - // Calculate the offset of the index into the block. - let offset = (tail >> SHIFT) % LAP; - - // If we reached the end of the block, wait until the next one is installed. - if offset == BLOCK_CAP { - backoff.snooze(); - tail = self.tail.index.load(Ordering::Acquire); - block = self.tail.block.load(Ordering::Acquire); - continue; - } - - // If we're going to have to install the next block, allocate it in advance in order to - // make the wait for other threads as short as possible. - if offset + 1 == BLOCK_CAP && next_block.is_none() { - next_block = Some(Block::::new()); - } - - // If this is the first push operation, we need to allocate the first block. - if block.is_null() { - let new = Box::into_raw(Block::::new()); - - if self - .tail - .block - .compare_exchange(block, new, Ordering::Release, Ordering::Relaxed) - .is_ok() - { - self.head.block.store(new, Ordering::Release); - block = new; - } else { - next_block = unsafe { Some(Box::from_raw(new)) }; - tail = self.tail.index.load(Ordering::Acquire); - block = self.tail.block.load(Ordering::Acquire); - continue; - } - } - - let new_tail = tail + (1 << SHIFT); - - // Try advancing the tail forward. - match self.tail.index.compare_exchange_weak( - tail, - new_tail, - Ordering::SeqCst, - Ordering::Acquire, - ) { - Ok(_) => unsafe { - // If we've reached the end of the block, install the next one. - if offset + 1 == BLOCK_CAP { - let next_block = Box::into_raw(next_block.unwrap()); - let next_index = new_tail.wrapping_add(1 << SHIFT); - - self.tail.block.store(next_block, Ordering::Release); - self.tail.index.store(next_index, Ordering::Release); - (*block).next.store(next_block, Ordering::Release); - } - - // Write the value into the slot. - let slot = (*block).slots.get_unchecked(offset); - slot.value.get().write(MaybeUninit::new(value)); - slot.state.fetch_or(WRITE, Ordering::Release); - - return; - }, - Err(t) => { - tail = t; - block = self.tail.block.load(Ordering::Acquire); - backoff.spin(); - } - } - } - } - - /// Pops the head element from the queue. - /// - /// If the queue is empty, `None` is returned. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::SegQueue; - /// - /// let q = SegQueue::new(); - /// - /// q.push(10); - /// q.push(20); - /// assert_eq!(q.pop(), Some(10)); - /// assert_eq!(q.pop(), Some(20)); - /// assert!(q.pop().is_none()); - /// ``` - pub fn pop(&self) -> Option { - let backoff = Backoff::new(); - let mut head = self.head.index.load(Ordering::Acquire); - let mut block = self.head.block.load(Ordering::Acquire); - - loop { - // Calculate the offset of the index into the block. - let offset = (head >> SHIFT) % LAP; - - // If we reached the end of the block, wait until the next one is installed. - if offset == BLOCK_CAP { - backoff.snooze(); - head = self.head.index.load(Ordering::Acquire); - block = self.head.block.load(Ordering::Acquire); - continue; - } - - let mut new_head = head + (1 << SHIFT); - - if new_head & HAS_NEXT == 0 { - atomic::fence(Ordering::SeqCst); - let tail = self.tail.index.load(Ordering::Relaxed); - - // If the tail equals the head, that means the queue is empty. - if head >> SHIFT == tail >> SHIFT { - return None; - } - - // If head and tail are not in the same block, set `HAS_NEXT` in head. - if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP { - new_head |= HAS_NEXT; - } - } - - // The block can be null here only if the first push operation is in progress. In that - // case, just wait until it gets initialized. - if block.is_null() { - backoff.snooze(); - head = self.head.index.load(Ordering::Acquire); - block = self.head.block.load(Ordering::Acquire); - continue; - } - - // Try moving the head index forward. - match self.head.index.compare_exchange_weak( - head, - new_head, - Ordering::SeqCst, - Ordering::Acquire, - ) { - Ok(_) => unsafe { - // If we've reached the end of the block, move to the next one. - if offset + 1 == BLOCK_CAP { - let next = (*block).wait_next(); - let mut next_index = (new_head & !HAS_NEXT).wrapping_add(1 << SHIFT); - if !(*next).next.load(Ordering::Relaxed).is_null() { - next_index |= HAS_NEXT; - } - - self.head.block.store(next, Ordering::Release); - self.head.index.store(next_index, Ordering::Release); - } - - // Read the value. - let slot = (*block).slots.get_unchecked(offset); - slot.wait_write(); - let value = slot.value.get().read().assume_init(); - - // Destroy the block if we've reached the end, or if another thread wanted to - // destroy but couldn't because we were busy reading from the slot. - if offset + 1 == BLOCK_CAP { - Block::destroy(block, 0); - } else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 { - Block::destroy(block, offset + 1); - } - - return Some(value); - }, - Err(h) => { - head = h; - block = self.head.block.load(Ordering::Acquire); - backoff.spin(); - } - } - } - } - - /// Returns `true` if the queue is empty. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::SegQueue; - /// - /// let q = SegQueue::new(); - /// - /// assert!(q.is_empty()); - /// q.push(1); - /// assert!(!q.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - let head = self.head.index.load(Ordering::SeqCst); - let tail = self.tail.index.load(Ordering::SeqCst); - head >> SHIFT == tail >> SHIFT - } - - /// Returns the number of elements in the queue. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_queue::SegQueue; - /// - /// let q = SegQueue::new(); - /// assert_eq!(q.len(), 0); - /// - /// q.push(10); - /// assert_eq!(q.len(), 1); - /// - /// q.push(20); - /// assert_eq!(q.len(), 2); - /// ``` - pub fn len(&self) -> usize { - loop { - // Load the tail index, then load the head index. - let mut tail = self.tail.index.load(Ordering::SeqCst); - let mut head = self.head.index.load(Ordering::SeqCst); - - // If the tail index didn't change, we've got consistent indices to work with. - if self.tail.index.load(Ordering::SeqCst) == tail { - // Erase the lower bits. - tail &= !((1 << SHIFT) - 1); - head &= !((1 << SHIFT) - 1); - - // Fix up indices if they fall onto block ends. - if (tail >> SHIFT) & (LAP - 1) == LAP - 1 { - tail = tail.wrapping_add(1 << SHIFT); - } - if (head >> SHIFT) & (LAP - 1) == LAP - 1 { - head = head.wrapping_add(1 << SHIFT); - } - - // Rotate indices so that head falls into the first block. - let lap = (head >> SHIFT) / LAP; - tail = tail.wrapping_sub((lap * LAP) << SHIFT); - head = head.wrapping_sub((lap * LAP) << SHIFT); - - // Remove the lower bits. - tail >>= SHIFT; - head >>= SHIFT; - - // Return the difference minus the number of blocks between tail and head. - return tail - head - tail / LAP; - } - } - } -} - -impl Drop for SegQueue { - fn drop(&mut self) { - let mut head = *self.head.index.get_mut(); - let mut tail = *self.tail.index.get_mut(); - let mut block = *self.head.block.get_mut(); - - // Erase the lower bits. - head &= !((1 << SHIFT) - 1); - tail &= !((1 << SHIFT) - 1); - - unsafe { - // Drop all values between `head` and `tail` and deallocate the heap-allocated blocks. - while head != tail { - let offset = (head >> SHIFT) % LAP; - - if offset < BLOCK_CAP { - // Drop the value in the slot. - let slot = (*block).slots.get_unchecked(offset); - (*slot.value.get()).assume_init_drop(); - } else { - // Deallocate the block and move to the next one. - let next = *(*block).next.get_mut(); - drop(Box::from_raw(block)); - block = next; - } - - head = head.wrapping_add(1 << SHIFT); - } - - // Deallocate the last remaining block. - if !block.is_null() { - drop(Box::from_raw(block)); - } - } - } -} - -impl fmt::Debug for SegQueue { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("SegQueue { .. }") - } -} - -impl Default for SegQueue { - fn default() -> Self { - Self::new() - } -} - -impl IntoIterator for SegQueue { - type Item = T; - - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter { value: self } - } -} - -#[derive(Debug)] -pub struct IntoIter { - value: SegQueue, -} - -impl Iterator for IntoIter { - type Item = T; - - fn next(&mut self) -> Option { - let value = &mut self.value; - let head = *value.head.index.get_mut(); - let tail = *value.tail.index.get_mut(); - if head >> SHIFT == tail >> SHIFT { - None - } else { - let block = *value.head.block.get_mut(); - let offset = (head >> SHIFT) % LAP; - - // SAFETY: We have mutable access to this, so we can read without - // worrying about concurrency. Furthermore, we know this is - // initialized because it is the value pointed at by `value.head` - // and this is a non-empty queue. - let item = unsafe { - let slot = (*block).slots.get_unchecked(offset); - slot.value.get().read().assume_init() - }; - if offset + 1 == BLOCK_CAP { - // Deallocate the block and move to the next one. - // SAFETY: The block is initialized because we've been reading - // from it this entire time. We can drop it b/c everything has - // been read out of it, so nothing is pointing to it anymore. - unsafe { - let next = *(*block).next.get_mut(); - drop(Box::from_raw(block)); - *value.head.block.get_mut() = next; - } - // The last value in a block is empty, so skip it - *value.head.index.get_mut() = head.wrapping_add(2 << SHIFT); - // Double-check that we're pointing to the first item in a block. - debug_assert_eq!((*value.head.index.get_mut() >> SHIFT) % LAP, 0); - } else { - *value.head.index.get_mut() = head.wrapping_add(1 << SHIFT); - } - Some(item) - } - } -} diff --git a/crossbeam-queue/tests/array_queue.rs b/crossbeam-queue/tests/array_queue.rs deleted file mode 100644 index b9d4e5fd4..000000000 --- a/crossbeam-queue/tests/array_queue.rs +++ /dev/null @@ -1,374 +0,0 @@ -use std::sync::atomic::{AtomicUsize, Ordering}; - -use crossbeam_queue::ArrayQueue; -use crossbeam_utils::thread::scope; -use rand::{thread_rng, Rng}; - -#[test] -fn smoke() { - let q = ArrayQueue::new(1); - - q.push(7).unwrap(); - assert_eq!(q.pop(), Some(7)); - - q.push(8).unwrap(); - assert_eq!(q.pop(), Some(8)); - assert!(q.pop().is_none()); -} - -#[test] -fn capacity() { - for i in 1..10 { - let q = ArrayQueue::::new(i); - assert_eq!(q.capacity(), i); - } -} - -#[test] -#[should_panic(expected = "capacity must be non-zero")] -fn zero_capacity() { - let _ = ArrayQueue::::new(0); -} - -#[test] -fn len_empty_full() { - let q = ArrayQueue::new(2); - - assert_eq!(q.len(), 0); - assert!(q.is_empty()); - assert!(!q.is_full()); - - q.push(()).unwrap(); - - assert_eq!(q.len(), 1); - assert!(!q.is_empty()); - assert!(!q.is_full()); - - q.push(()).unwrap(); - - assert_eq!(q.len(), 2); - assert!(!q.is_empty()); - assert!(q.is_full()); - - q.pop().unwrap(); - - assert_eq!(q.len(), 1); - assert!(!q.is_empty()); - assert!(!q.is_full()); -} - -#[test] -fn len() { - #[cfg(miri)] - const COUNT: usize = 30; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - #[cfg(miri)] - const CAP: usize = 40; - #[cfg(not(miri))] - const CAP: usize = 1000; - const ITERS: usize = CAP / 20; - - let q = ArrayQueue::new(CAP); - assert_eq!(q.len(), 0); - - for _ in 0..CAP / 10 { - for i in 0..ITERS { - q.push(i).unwrap(); - assert_eq!(q.len(), i + 1); - } - - for i in 0..ITERS { - q.pop().unwrap(); - assert_eq!(q.len(), ITERS - i - 1); - } - } - assert_eq!(q.len(), 0); - - for i in 0..CAP { - q.push(i).unwrap(); - assert_eq!(q.len(), i + 1); - } - - for _ in 0..CAP { - q.pop().unwrap(); - } - assert_eq!(q.len(), 0); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - loop { - if let Some(x) = q.pop() { - assert_eq!(x, i); - break; - } - } - let len = q.len(); - assert!(len <= CAP); - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - while q.push(i).is_err() {} - let len = q.len(); - assert!(len <= CAP); - } - }); - }) - .unwrap(); - assert_eq!(q.len(), 0); -} - -#[test] -fn spsc() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - let q = ArrayQueue::new(3); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - loop { - if let Some(x) = q.pop() { - assert_eq!(x, i); - break; - } - } - } - assert!(q.pop().is_none()); - }); - - scope.spawn(|_| { - for i in 0..COUNT { - while q.push(i).is_err() {} - } - }); - }) - .unwrap(); -} - -#[test] -fn spsc_ring_buffer() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - let t = AtomicUsize::new(1); - let q = ArrayQueue::::new(3); - let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); - - scope(|scope| { - scope.spawn(|_| loop { - match t.load(Ordering::SeqCst) { - 0 if q.is_empty() => break, - - _ => { - while let Some(n) = q.pop() { - v[n].fetch_add(1, Ordering::SeqCst); - } - } - } - }); - - scope.spawn(|_| { - for i in 0..COUNT { - if let Some(n) = q.force_push(i) { - v[n].fetch_add(1, Ordering::SeqCst); - } - } - - t.fetch_sub(1, Ordering::SeqCst); - }); - }) - .unwrap(); - - for c in v { - assert_eq!(c.load(Ordering::SeqCst), 1); - } -} - -#[test] -fn mpmc() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let q = ArrayQueue::::new(3); - let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - let n = loop { - if let Some(x) = q.pop() { - break x; - } - }; - v[n].fetch_add(1, Ordering::SeqCst); - } - }); - } - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..COUNT { - while q.push(i).is_err() {} - } - }); - } - }) - .unwrap(); - - for c in v { - assert_eq!(c.load(Ordering::SeqCst), THREADS); - } -} - -#[test] -fn mpmc_ring_buffer() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let t = AtomicUsize::new(THREADS); - let q = ArrayQueue::::new(3); - let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| loop { - match t.load(Ordering::SeqCst) { - 0 if q.is_empty() => break, - - _ => { - while let Some(n) = q.pop() { - v[n].fetch_add(1, Ordering::SeqCst); - } - } - } - }); - } - - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..COUNT { - if let Some(n) = q.force_push(i) { - v[n].fetch_add(1, Ordering::SeqCst); - } - } - - t.fetch_sub(1, Ordering::SeqCst); - }); - } - }) - .unwrap(); - - for c in v { - assert_eq!(c.load(Ordering::SeqCst), THREADS); - } -} - -#[test] -fn drops() { - let runs: usize = if cfg!(miri) { 3 } else { 100 }; - let steps: usize = if cfg!(miri) { 50 } else { 10_000 }; - let additional: usize = if cfg!(miri) { 10 } else { 50 }; - - static DROPS: AtomicUsize = AtomicUsize::new(0); - - #[derive(Debug, PartialEq)] - struct DropCounter; - - impl Drop for DropCounter { - fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::SeqCst); - } - } - - let mut rng = thread_rng(); - - for _ in 0..runs { - let steps = rng.gen_range(0..steps); - let additional = rng.gen_range(0..additional); - - DROPS.store(0, Ordering::SeqCst); - let q = ArrayQueue::new(50); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..steps { - while q.pop().is_none() {} - } - }); - - scope.spawn(|_| { - for _ in 0..steps { - while q.push(DropCounter).is_err() { - DROPS.fetch_sub(1, Ordering::SeqCst); - } - } - }); - }) - .unwrap(); - - for _ in 0..additional { - q.push(DropCounter).unwrap(); - } - - assert_eq!(DROPS.load(Ordering::SeqCst), steps); - drop(q); - assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); - } -} - -#[test] -fn linearizable() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let q = ArrayQueue::new(THREADS); - - scope(|scope| { - for _ in 0..THREADS / 2 { - scope.spawn(|_| { - for _ in 0..COUNT { - while q.push(0).is_err() {} - q.pop().unwrap(); - } - }); - - scope.spawn(|_| { - for _ in 0..COUNT { - if q.force_push(0).is_none() { - q.pop().unwrap(); - } - } - }); - } - }) - .unwrap(); -} - -#[test] -fn into_iter() { - let q = ArrayQueue::new(100); - for i in 0..100 { - q.push(i).unwrap(); - } - for (i, j) in q.into_iter().enumerate() { - assert_eq!(i, j); - } -} diff --git a/crossbeam-queue/tests/seg_queue.rs b/crossbeam-queue/tests/seg_queue.rs deleted file mode 100644 index d2ad1e472..000000000 --- a/crossbeam-queue/tests/seg_queue.rs +++ /dev/null @@ -1,210 +0,0 @@ -use std::sync::atomic::{AtomicUsize, Ordering}; - -use crossbeam_queue::SegQueue; -use crossbeam_utils::thread::scope; -use rand::{thread_rng, Rng}; - -#[test] -fn smoke() { - let q = SegQueue::new(); - q.push(7); - assert_eq!(q.pop(), Some(7)); - - q.push(8); - assert_eq!(q.pop(), Some(8)); - assert!(q.pop().is_none()); -} - -#[test] -fn len_empty_full() { - let q = SegQueue::new(); - - assert_eq!(q.len(), 0); - assert!(q.is_empty()); - - q.push(()); - - assert_eq!(q.len(), 1); - assert!(!q.is_empty()); - - q.pop().unwrap(); - - assert_eq!(q.len(), 0); - assert!(q.is_empty()); -} - -#[test] -fn len() { - let q = SegQueue::new(); - - assert_eq!(q.len(), 0); - - for i in 0..50 { - q.push(i); - assert_eq!(q.len(), i + 1); - } - - for i in 0..50 { - q.pop().unwrap(); - assert_eq!(q.len(), 50 - i - 1); - } - - assert_eq!(q.len(), 0); -} - -#[test] -fn spsc() { - #[cfg(miri)] - const COUNT: usize = 100; - #[cfg(not(miri))] - const COUNT: usize = 100_000; - - let q = SegQueue::new(); - - scope(|scope| { - scope.spawn(|_| { - for i in 0..COUNT { - loop { - if let Some(x) = q.pop() { - assert_eq!(x, i); - break; - } - } - } - assert!(q.pop().is_none()); - }); - scope.spawn(|_| { - for i in 0..COUNT { - q.push(i); - } - }); - }) - .unwrap(); -} - -#[test] -fn mpmc() { - #[cfg(miri)] - const COUNT: usize = 50; - #[cfg(not(miri))] - const COUNT: usize = 25_000; - const THREADS: usize = 4; - - let q = SegQueue::::new(); - let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::>(); - - scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - for _ in 0..COUNT { - let n = loop { - if let Some(x) = q.pop() { - break x; - } - }; - v[n].fetch_add(1, Ordering::SeqCst); - } - }); - } - for _ in 0..THREADS { - scope.spawn(|_| { - for i in 0..COUNT { - q.push(i); - } - }); - } - }) - .unwrap(); - - for c in v { - assert_eq!(c.load(Ordering::SeqCst), THREADS); - } -} - -#[test] -fn drops() { - let runs: usize = if cfg!(miri) { 5 } else { 100 }; - let steps: usize = if cfg!(miri) { 50 } else { 10_000 }; - let additional: usize = if cfg!(miri) { 100 } else { 1_000 }; - - static DROPS: AtomicUsize = AtomicUsize::new(0); - - #[derive(Debug, PartialEq)] - struct DropCounter; - - impl Drop for DropCounter { - fn drop(&mut self) { - DROPS.fetch_add(1, Ordering::SeqCst); - } - } - - let mut rng = thread_rng(); - - for _ in 0..runs { - let steps = rng.gen_range(0..steps); - let additional = rng.gen_range(0..additional); - - DROPS.store(0, Ordering::SeqCst); - let q = SegQueue::new(); - - scope(|scope| { - scope.spawn(|_| { - for _ in 0..steps { - while q.pop().is_none() {} - } - }); - - scope.spawn(|_| { - for _ in 0..steps { - q.push(DropCounter); - } - }); - }) - .unwrap(); - - for _ in 0..additional { - q.push(DropCounter); - } - - assert_eq!(DROPS.load(Ordering::SeqCst), steps); - drop(q); - assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional); - } -} - -#[test] -fn into_iter() { - let q = SegQueue::new(); - for i in 0..100 { - q.push(i); - } - for (i, j) in q.into_iter().enumerate() { - assert_eq!(i, j); - } -} - -#[test] -fn into_iter_drop() { - let q = SegQueue::new(); - for i in 0..100 { - q.push(i); - } - for (i, j) in q.into_iter().enumerate().take(50) { - assert_eq!(i, j); - } -} - -// If `Block` is created on the stack, the array of slots will multiply this `BigStruct` and -// probably overflow the thread stack. It's now directly created on the heap to avoid this. -#[test] -fn stack_overflow() { - const N: usize = 32_768; - struct BigStruct { - _data: [u8; N], - } - - let q = SegQueue::new(); - q.push(BigStruct { _data: [0u8; N] }); - - for _data in q.into_iter() {} -} diff --git a/crossbeam-skiplist/CHANGELOG.md b/crossbeam-skiplist/CHANGELOG.md deleted file mode 100644 index 6fe172889..000000000 --- a/crossbeam-skiplist/CHANGELOG.md +++ /dev/null @@ -1,21 +0,0 @@ -# Version 0.1.3 - -- Remove dependency on `cfg-if`. (#1072) - -# Version 0.1.2 - -- Bump the minimum supported Rust version to 1.61. (#1037) -- Add `compare_insert`. (#976) -- Improve support for targets without atomic CAS. (#1037) -- Remove build script. (#1037) -- Remove dependency on `scopeguard`. (#1045) - -# Version 0.1.1 - -- Fix `get_unchecked` panic by raw pointer calculation. (#940) - -# Version 0.1.0 - -**Note:** This release has been yanked due to bug fixed in 0.1.1. - -- Initial implementation. diff --git a/crossbeam-skiplist/Cargo.toml b/crossbeam-skiplist/Cargo.toml deleted file mode 100644 index 33cf3cd4f..000000000 --- a/crossbeam-skiplist/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "crossbeam-skiplist" -# When publishing a new version: -# - Update CHANGELOG.md -# - Update README.md (when increasing major or minor version) -# - Run './tools/publish.sh crossbeam-skiplist ' -version = "0.1.3" -edition = "2021" -rust-version = "1.61" -license = "MIT OR Apache-2.0" -repository = "https://github.com/crossbeam-rs/crossbeam" -homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-skiplist" -description = "A concurrent skip list" -keywords = ["map", "set", "skiplist", "lock-free"] -categories = ["algorithms", "concurrency", "data-structures", "no-std"] - -[features] -default = ["std"] - -# Enable to use APIs that require `std`. -# This is enabled by default. -std = ["alloc", "crossbeam-epoch/std", "crossbeam-utils/std"] - -# Enable to use APIs that require `alloc`. -# This is enabled by default and also enabled if the `std` feature is enabled. -# -# NOTE: Disabling both `std` *and* `alloc` features is not supported yet. -alloc = ["crossbeam-epoch/alloc"] - -[dependencies] -crossbeam-epoch = { version = "0.9.17", path = "../crossbeam-epoch", default-features = false } -crossbeam-utils = { version = "0.8.18", path = "../crossbeam-utils", default-features = false } -equivalent-flipped = "1" - -[dev-dependencies] -rand = "0.8" - -[lints] -workspace = true diff --git a/crossbeam-skiplist/LICENSE-APACHE b/crossbeam-skiplist/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/crossbeam-skiplist/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/crossbeam-skiplist/LICENSE-MIT b/crossbeam-skiplist/LICENSE-MIT deleted file mode 100644 index 068d491fd..000000000 --- a/crossbeam-skiplist/LICENSE-MIT +++ /dev/null @@ -1,27 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2019 The Crossbeam Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/crossbeam-skiplist/README.md b/crossbeam-skiplist/README.md deleted file mode 100644 index 8fa9dd4ee..000000000 --- a/crossbeam-skiplist/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# Crossbeam Skiplist - -[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)]( -https://github.com/crossbeam-rs/crossbeam/actions) -[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)]( -https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-skiplist#license) -[![Cargo](https://img.shields.io/crates/v/crossbeam-skiplist.svg)]( -https://crates.io/crates/crossbeam-skiplist) -[![Documentation](https://docs.rs/crossbeam-skiplist/badge.svg)]( -https://docs.rs/crossbeam-skiplist) -[![Rust 1.61+](https://img.shields.io/badge/rust-1.61+-lightgray.svg)]( -https://www.rust-lang.org) -[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ) - -This crate provides the types [`SkipMap`] and [`SkipSet`]. -These data structures provide an interface similar to `BTreeMap` and `BTreeSet`, -respectively, except they support safe concurrent access across multiple threads. - -This crate can be used in `no_std` environments that implement `alloc`. The `alloc` feature of this crate needs to be enabled in `no_std` environments. - -[`SkipMap`]: https://docs.rs/crossbeam-skiplist/latest/crossbeam_skiplist/struct.SkipMap.html -[`SkipSet`]: https://docs.rs/crossbeam-skiplist/latest/crossbeam_skiplist/struct.SkipSet.html - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -crossbeam-skiplist = "0.1" -``` - -## Compatibility - -Crossbeam Skiplist supports stable Rust releases going back at least six months, -and every time the minimum supported Rust version is increased, a new minor -version is released. Currently, the minimum supported Rust version is 1.61. - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. - -#### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff --git a/crossbeam-skiplist/src/equivalent.rs b/crossbeam-skiplist/src/equivalent.rs deleted file mode 100644 index 5ee5e8abd..000000000 --- a/crossbeam-skiplist/src/equivalent.rs +++ /dev/null @@ -1,6 +0,0 @@ -// These traits are based on `equivalent` crate, but `K` and `Q` are flipped to avoid type inference issues: -// https://github.com/indexmap-rs/equivalent/issues/5 - -//! Traits for key comparison in maps. - -pub use equivalent_flipped::*; diff --git a/crossbeam-skiplist/src/lib.rs b/crossbeam-skiplist/src/lib.rs deleted file mode 100644 index f045f6ddb..000000000 --- a/crossbeam-skiplist/src/lib.rs +++ /dev/null @@ -1,262 +0,0 @@ -//! Concurrent maps and sets based on [skip lists]. -//! -//! This crate provides the types [`SkipMap`] and [`SkipSet`]. -//! These data structures provide an interface similar to [`BTreeMap`] and [`BTreeSet`], -//! respectively, except they support safe concurrent access across -//! multiple threads. -//! -//! # Concurrent access -//! [`SkipMap`] and [`SkipSet`] implement [`Send`] and [`Sync`], -//! so they can be shared across threads with ease. -//! -//! Methods which mutate the map, such as [`insert`], -//! take `&self` rather than `&mut self`. This allows -//! them to be invoked concurrently. -//! -//! ``` -//! use crossbeam_skiplist::SkipMap; -//! use crossbeam_utils::thread::scope; -//! -//! let person_ages = SkipMap::new(); -//! -//! scope(|s| { -//! // Insert entries into the map from multiple threads. -//! s.spawn(|_| { -//! person_ages.insert("Spike Garrett", 22); -//! person_ages.insert("Stan Hancock", 47); -//! person_ages.insert("Rea Bryan", 234); -//! -//! assert_eq!(person_ages.get("Spike Garrett").unwrap().value(), &22); -//! }); -//! s.spawn(|_| { -//! person_ages.insert("Bryon Conroy", 65); -//! person_ages.insert("Lauren Reilly", 2); -//! }); -//! }).unwrap(); -//! -//! assert!(person_ages.contains_key("Spike Garrett")); -//! person_ages.remove("Rea Bryan"); -//! assert!(!person_ages.contains_key("Rea Bryan")); -//! -//! ``` -//! -//! Concurrent access to skip lists is lock-free and sound. -//! Threads won't get blocked waiting for other threads to finish operating -//! on the map. -//! -//! Be warned that, because of this lock-freedom, it's easy to introduce -//! race conditions into your code. For example: -//! ```no_run -//! use crossbeam_skiplist::SkipSet; -//! use crossbeam_utils::thread::scope; -//! -//! let numbers = SkipSet::new(); -//! scope(|s| { -//! // Spawn a thread which will remove 5 from the set. -//! s.spawn(|_| { -//! numbers.remove(&5); -//! }); -//! -//! // While the thread above is running, insert a value into the set. -//! numbers.insert(5); -//! -//! // This check can fail! -//! // The other thread may remove the value -//! // before we perform this check. -//! assert!(numbers.contains(&5)); -//! }).unwrap(); -//! ``` -//! -//! In effect, a _single_ operation on the map, such as [`insert`], -//! operates atomically: race conditions are impossible. However, -//! concurrent calls to functions can become interleaved across -//! threads, introducing non-determinism. -//! -//! To avoid this sort of race condition, never assume that a collection's -//! state will remain the same across multiple lines of code. For instance, -//! in the example above, the problem arises from the assumption that -//! the map won't be mutated between the calls to `insert` and `contains`. -//! In sequential code, this would be correct. But when multiple -//! threads are introduced, more care is needed. -//! -//! Note that race conditions do not violate Rust's memory safety rules. -//! A race between multiple threads can never cause memory errors or -//! segfaults. A race condition is a _logic error_ in its entirety. -//! -//! # Mutable access to elements -//! [`SkipMap`] and [`SkipSet`] provide no way to retrieve a mutable reference -//! to a value. Since access methods can be called concurrently, providing -//! e.g. a `get_mut` function could cause data races. -//! -//! A solution to the above is to have the implementation wrap -//! each value in a lock. However, this has some repercussions: -//! * The map would no longer be lock-free, inhibiting scalability -//! and allowing for deadlocks. -//! * If a user of the map doesn't need mutable access, then they pay -//! the price of locks without actually needing them. -//! -//! Instead, the approach taken by this crate gives more control to the user. -//! If mutable access is needed, then you can use interior mutability, -//! such as [`RwLock`]: `SkipMap>`. -//! -//! # Garbage collection -//! A problem faced by many concurrent data structures -//! is choosing when to free unused memory. Care must be -//! taken to prevent use-after-frees and double-frees, both -//! of which cause undefined behavior. -//! -//! Consider the following sequence of events operating on a [`SkipMap`]: -//! * Thread A calls [`get`] and holds a reference to a value in the map. -//! * Thread B removes that key from the map. -//! * Thread A now attempts to access the value. -//! -//! What happens here? If the map implementation frees the memory -//! belonging to a value when it is -//! removed, then a user-after-free occurs, resulting in memory corruption. -//! -//! To solve the above, this crate uses the _epoch-based memory reclamation_ mechanism -//! implemented in [`crossbeam-epoch`]. Simplified, a value removed from the map -//! is not freed until after all references to it have been dropped. This mechanism -//! is similar to the garbage collection found in some languages, such as Java, except -//! it operates solely on the values inside the map. -//! -//! This garbage collection scheme functions automatically; users don't have to worry about it. -//! However, keep in mind that holding [`Entry`] handles to entries in the map will prevent -//! that memory from being freed until at least after the handles are dropped. -//! -//! # Performance versus B-trees -//! In general, when you need concurrent writes -//! to an ordered collection, skip lists are a reasonable choice. -//! However, they can be substantially slower than B-trees -//! in some scenarios. -//! -//! The main benefit of a skip list over a `RwLock` -//! is that it allows concurrent writes to progress without -//! mutual exclusion. However, when the frequency -//! of writes is low, this benefit isn't as useful. -//! In these cases, a shared [`BTreeMap`] may be a faster option. -//! -//! These guidelines should be taken with a grain of salt—performance -//! in practice varies depending on your use case. -//! In the end, the best way to choose between [`BTreeMap`] and [`SkipMap`] -//! is to benchmark them in your own application. -//! -//! # Alternatives -//! This crate implements _ordered_ maps and sets, akin to [`BTreeMap`] and [`BTreeSet`]. -//! In many situations, however, a defined order on elements is not required. For these -//! purposes, unordered maps will suffice. In addition, unordered maps -//! often have better performance characteristics than their ordered alternatives. -//! -//! Crossbeam [does not currently provide a concurrent unordered map](https://github.com/crossbeam-rs/rfcs/issues/32). -//! That said, here are some other crates which may suit you: -//! * [`DashMap`](https://docs.rs/dashmap) implements a novel concurrent hash map -//! with good performance characteristics. -//! * [`flurry`](https://docs.rs/flurry) is a Rust port of Java's `ConcurrentHashMap`. -//! -//! [`insert`]: SkipMap::insert -//! [`get`]: SkipMap::get -//! [`Entry`]: map::Entry -//! [skip lists]: https://en.wikipedia.org/wiki/Skip_list -//! [`crossbeam-epoch`]: https://docs.rs/crossbeam-epoch -//! [`BTreeMap`]: std::collections::BTreeMap -//! [`BTreeSet`]: std::collections::BTreeSet -//! [`RwLock`]: std::sync::RwLock -//! -//! # Examples -//! [`SkipMap`] basic usage: -//! ``` -//! use crossbeam_skiplist::SkipMap; -//! -//! // Note that the variable doesn't have to be mutable: -//! // SkipMap methods take &self to support concurrent access. -//! let movie_reviews = SkipMap::new(); -//! -//! // Insert some key-value pairs. -//! movie_reviews.insert("Office Space", "Deals with real issues in the workplace."); -//! movie_reviews.insert("Pulp Fiction", "Masterpiece."); -//! movie_reviews.insert("The Godfather", "Very enjoyable."); -//! movie_reviews.insert("The Blues Brothers", "Eye lyked it a lot."); -//! -//! // Get the value associated with a key. -//! // get() returns an Entry, which gives -//! // references to the key and value. -//! let pulp_fiction = movie_reviews.get("Pulp Fiction").unwrap(); -//! assert_eq!(*pulp_fiction.key(), "Pulp Fiction"); -//! assert_eq!(*pulp_fiction.value(), "Masterpiece."); -//! -//! // Remove a key-value pair. -//! movie_reviews.remove("The Blues Brothers"); -//! assert!(movie_reviews.get("The Blues Brothers").is_none()); -//! -//! // Iterate over the reviews. Since SkipMap -//! // is an ordered map, the iterator will yield -//! // keys in lexicographical order. -//! for entry in &movie_reviews { -//! let movie = entry.key(); -//! let review = entry.value(); -//! println!("{}: \"{}\"", movie, review); -//! } -//! ``` -//! -//! [`SkipSet`] basic usage: -//! ``` -//! use crossbeam_skiplist::SkipSet; -//! -//! let books = SkipSet::new(); -//! -//! // Add some books to the set. -//! books.insert("A Dance With Dragons"); -//! books.insert("To Kill a Mockingbird"); -//! books.insert("The Odyssey"); -//! books.insert("The Great Gatsby"); -//! -//! // Check for a specific one. -//! if !books.contains("The Winds of Winter") { -//! println!("We have {} books, but The Winds of Winter ain't one.", -//! books.len()); -//! } -//! -//! // Remove a book from the set. -//! books.remove("To Kill a Mockingbird"); -//! assert!(!books.contains("To Kill a Mockingbird")); -//! -//! // Iterate over the books in the set. -//! // Values are returned in lexicographical order. -//! for entry in &books { -//! let book = entry.value(); -//! println!("{}", book); -//! } -//! ``` - -#![no_std] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] -#![warn(missing_docs, unsafe_op_in_unsafe_fn)] - -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -extern crate alloc; -#[cfg(feature = "std")] -extern crate std; - -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -pub mod base; - -#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] -#[doc(inline)] -pub use crate::base::SkipList; - -#[cfg(feature = "std")] -pub mod map; -#[cfg(feature = "std")] -pub mod set; - -#[cfg(feature = "std")] -#[doc(inline)] -pub use crate::{map::SkipMap, set::SkipSet}; - -pub mod equivalent; diff --git a/crossbeam-utils/CHANGELOG.md b/crossbeam-utils/CHANGELOG.md deleted file mode 100644 index 5aa1967e7..000000000 --- a/crossbeam-utils/CHANGELOG.md +++ /dev/null @@ -1,243 +0,0 @@ -# Version 0.8.21 - -- Improve implementation of `CachePadded`. (#1152) - -# Version 0.8.20 - -- Implement `Display` for `CachePadded`. (#1097) - -# Version 0.8.19 - -- Remove dependency on `cfg-if`. (#1072) - -# Version 0.8.18 - -- Relax the minimum supported Rust version to 1.60. (#1056) -- Improve scalability of `AtomicCell` fallback. (#1055) - -# Version 0.8.17 - -- Bump the minimum supported Rust version to 1.61. (#1037) -- Improve support for targets without atomic CAS or 64-bit atomic. (#1037) -- Always implement `UnwindSafe` and `RefUnwindSafe` for `AtomicCell`. (#1045) -- Improve compatibility with Miri, TSan, and loom. (#995, #1003) -- Improve compatibility with unstable `oom=panic`. (#1045) -- Improve implementation of `CachePadded`. (#1014, #1025) -- Update `loom` dependency to 0.7. - -# Version 0.8.16 - -- Improve implementation of `CachePadded`. (#967) - -# Version 0.8.15 - -- Add `#[clippy::has_significant_drop]` to `ShardedLock{Read,Write}Guard`. (#958) -- Improve handling of very large timeout. (#953) -- Soft-deprecate `thread::scope()` in favor of the more efficient `std::thread::scope` that stabilized in Rust 1.63. (#954) - -# Version 0.8.14 - -- Fix build script bug introduced in 0.8.13. (#932) - -# Version 0.8.13 - -**Note:** This release has been yanked due to regression fixed in 0.8.14. - -- Improve support for custom targets. (#922) - -# Version 0.8.12 - -- Removes the dependency on the `once_cell` crate to restore the MSRV. (#913) -- Work around [rust-lang#98302](https://github.com/rust-lang/rust/issues/98302), which causes compile error on windows-gnu when LTO is enabled. (#913) - -# Version 0.8.11 - -- Bump the minimum supported Rust version to 1.38. (#877) - -# Version 0.8.10 - -- Fix unsoundness of `AtomicCell` on types containing niches. (#834) - This fix contains breaking changes, but they are allowed because this is a soundness bug fix. See #834 for more. - -# Version 0.8.9 - -- Replace lazy_static with once_cell. (#817) - -# Version 0.8.8 - -- Fix a bug when unstable `loom` support is enabled. (#787) - -# Version 0.8.7 - -- Add `AtomicCell<{i*,u*}>::{fetch_max,fetch_min}`. (#785) -- Add `AtomicCell<{i*,u*,bool}>::fetch_nand`. (#785) -- Fix unsoundness of `AtomicCell<{i,u}64>` arithmetics on 32-bit targets that support `Atomic{I,U}64` (#781) - -# Version 0.8.6 - -**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. - -- Re-add `AtomicCell<{i,u}64>::{fetch_add,fetch_sub,fetch_and,fetch_or,fetch_xor}` that were accidentally removed in 0.8.0 on targets that do not support `Atomic{I,U}64`. (#767) -- Re-add `AtomicCell<{i,u}128>::{fetch_add,fetch_sub,fetch_and,fetch_or,fetch_xor}` that were accidentally removed in 0.8.0. (#767) - -# Version 0.8.5 - -**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. - -- Add `AtomicCell::fetch_update`. (#704) -- Support targets that do not have atomic CAS on stable Rust. (#698) - -# Version 0.8.4 - -**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. - -- Bump `loom` dependency to version 0.5. (#686) - -# Version 0.8.3 - -**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. - -- Make `loom` dependency optional. (#666) - -# Version 0.8.2 - -**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. - -- Deprecate `AtomicCell::compare_and_swap`. Use `AtomicCell::compare_exchange` instead. (#619) -- Add `Parker::park_deadline`. (#563) -- Improve implementation of `CachePadded`. (#636) -- Add unstable support for `loom`. (#487) - -# Version 0.8.1 - -**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. - -- Make `AtomicCell::is_lock_free` always const fn. (#600) -- Fix a bug in `seq_lock_wide`. (#596) -- Remove `const_fn` dependency. (#600) -- `crossbeam-utils` no longer fails to compile if unable to determine rustc version. Instead, it now displays a warning. (#604) - -# Version 0.8.0 - -**Note:** This release has been yanked. See [GHSA-qc84-gqf4-9926](https://github.com/crossbeam-rs/crossbeam/security/advisories/GHSA-qc84-gqf4-9926) for details. - -- Bump the minimum supported Rust version to 1.36. -- Remove deprecated `AtomicCell::get_mut()` and `Backoff::is_complete()` methods. -- Remove `alloc` feature. -- Make `CachePadded::new()` const function. -- Make `AtomicCell::is_lock_free()` const function at 1.46+. -- Implement `From` for `AtomicCell`. - -# Version 0.7.2 - -- Fix bug in release (yanking 0.7.1) - -# Version 0.7.1 - -- Bump `autocfg` dependency to version 1.0. (#460) -- Make `AtomicCell` lockfree for u8, u16, u32, u64 sized values at 1.34+. (#454) - -# Version 0.7.0 - -- Bump the minimum required version to 1.28. -- Fix breakage with nightly feature due to rust-lang/rust#65214. -- Apply `#[repr(transparent)]` to `AtomicCell`. -- Make `AtomicCell::new()` const function at 1.31+. - -# Version 0.6.6 - -- Add `UnwindSafe` and `RefUnwindSafe` impls for `AtomicCell`. -- Add `AtomicCell::as_ptr()`. -- Add `AtomicCell::take()`. -- Fix a bug in `AtomicCell::compare_exchange()` and `AtomicCell::compare_and_swap()`. -- Various documentation improvements. - -# Version 0.6.5 - -- Rename `Backoff::is_complete()` to `Backoff::is_completed()`. - -# Version 0.6.4 - -- Add `WaitGroup`, `ShardedLock`, and `Backoff`. -- Add `fetch_*` methods for `AtomicCell` and `AtomicCell`. -- Expand documentation. - -# Version 0.6.3 - -- Add `AtomicCell`. -- Improve documentation. - -# Version 0.6.2 - -- Add `Parker`. -- Improve documentation. - -# Version 0.6.1 - -- Fix a soundness bug in `Scope::spawn()`. -- Remove the `T: 'scope` bound on `ScopedJoinHandle`. - -# Version 0.6.0 - -- Move `AtomicConsume` to `atomic` module. -- `scope()` returns a `Result` of thread joins. -- Remove `spawn_unchecked`. -- Fix a soundness bug due to incorrect lifetimes. -- Improve documentation. -- Support nested scoped spawns. -- Implement `Copy`, `Hash`, `PartialEq`, and `Eq` for `CachePadded`. -- Add `CachePadded::into_inner()`. - -# Version 0.5.0 - -- Reorganize sub-modules and rename functions. - -# Version 0.4.1 - -- Fix a documentation link. - -# Version 0.4.0 - -- `CachePadded` supports types bigger than 64 bytes. -- Fix a bug in scoped threads where unitialized memory was being dropped. -- Minimum required Rust version is now 1.25. - -# Version 0.3.2 - -- Mark `load_consume` with `#[inline]`. - -# Version 0.3.1 - -- `load_consume` on ARM and AArch64. - -# Version 0.3.0 - -- Add `join` for scoped thread API. -- Add `load_consume` for atomic load-consume memory ordering. -- Remove `AtomicOption`. - -# Version 0.2.2 - -- Support Rust 1.12.1. -- Call `T::clone` when cloning a `CachePadded`. - -# Version 0.2.1 - -- Add `use_std` feature. - -# Version 0.2.0 - -- Add `nightly` feature. -- Use `repr(align(64))` on `CachePadded` with the `nightly` feature. -- Implement `Drop` for `CachePadded`. -- Implement `Clone` for `CachePadded`. -- Implement `From` for `CachePadded`. -- Implement better `Debug` for `CachePadded`. -- Write more tests. -- Add this changelog. -- Change cache line length to 64 bytes. -- Remove `ZerosValid`. - -# Version 0.1.0 - -- Old implementation of `CachePadded` from `crossbeam` version 0.3.0 diff --git a/crossbeam-utils/Cargo.toml b/crossbeam-utils/Cargo.toml deleted file mode 100644 index 6e9b52f4c..000000000 --- a/crossbeam-utils/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -[package] -name = "crossbeam-utils" -# When publishing a new version: -# - Update CHANGELOG.md -# - Update README.md (when increasing major or minor version) -# - Run './tools/publish.sh crossbeam-utils ' -version = "0.8.21" -edition = "2021" -rust-version = "1.56" -license = "MIT OR Apache-2.0" -repository = "https://github.com/crossbeam-rs/crossbeam" -homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils" -description = "Utilities for concurrent programming" -keywords = ["scoped", "thread", "atomic", "cache"] -categories = ["algorithms", "concurrency", "data-structures", "no-std"] - -[package.metadata.docs.rs] -all-features = true - -[features] -default = ["std"] - -# Enable to use APIs that require `std`. -# This is enabled by default. -std = [] - -# Enable `atomic` module. -# This requires Rust 1.60. -atomic = ["atomic-maybe-uninit"] - -[dependencies] -atomic-maybe-uninit = { version = "0.3.4", optional = true } - -# Enable the use of loom for concurrency testing. -# -# NOTE: This feature is outside of the normal semver guarantees and minor or -# patch versions of crossbeam may make breaking changes to them at any time. -[target.'cfg(crossbeam_loom)'.dependencies] -loom = { version = "0.7.1", optional = true } - -[dev-dependencies] -rand = "0.8" - -[lints] -workspace = true diff --git a/crossbeam-utils/LICENSE-APACHE b/crossbeam-utils/LICENSE-APACHE deleted file mode 100644 index 16fe87b06..000000000 --- a/crossbeam-utils/LICENSE-APACHE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/crossbeam-utils/LICENSE-MIT b/crossbeam-utils/LICENSE-MIT deleted file mode 100644 index 068d491fd..000000000 --- a/crossbeam-utils/LICENSE-MIT +++ /dev/null @@ -1,27 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2019 The Crossbeam Project Developers - -Permission is hereby granted, free of charge, to any -person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the -Software without restriction, including without -limitation the rights to use, copy, modify, merge, -publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software -is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice -shall be included in all copies or substantial portions -of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. diff --git a/crossbeam-utils/README.md b/crossbeam-utils/README.md deleted file mode 100644 index f836b39f6..000000000 --- a/crossbeam-utils/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# Crossbeam Utils - -[![Build Status](https://github.com/crossbeam-rs/crossbeam/workflows/CI/badge.svg)]( -https://github.com/crossbeam-rs/crossbeam/actions) -[![License](https://img.shields.io/badge/license-MIT_OR_Apache--2.0-blue.svg)]( -https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils#license) -[![Cargo](https://img.shields.io/crates/v/crossbeam-utils.svg)]( -https://crates.io/crates/crossbeam-utils) -[![Documentation](https://docs.rs/crossbeam-utils/badge.svg)]( -https://docs.rs/crossbeam-utils) -[![Rust 1.60+](https://img.shields.io/badge/rust-1.60+-lightgray.svg)]( -https://www.rust-lang.org) -[![chat](https://img.shields.io/discord/569610676205781012.svg?logo=discord)](https://discord.com/invite/JXYwgWZ) - -This crate provides miscellaneous tools for concurrent programming: - -#### Atomics - -* [`AtomicCell`], a thread-safe mutable memory location.(no_std) -* [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.(no_std) - -#### Thread synchronization - -* [`Parker`], a thread parking primitive. -* [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads. -* [`WaitGroup`], for synchronizing the beginning or end of some computation. - -#### Utilities - -* [`Backoff`], for exponential backoff in spin loops.(no_std) -* [`CachePadded`], for padding and aligning a value to the length of a cache line.(no_std) -* [`scope`], for spawning threads that borrow local variables from the stack. - -*Features marked with (no_std) can be used in `no_std` environments.*
- -[`AtomicCell`]: https://docs.rs/crossbeam-utils/latest/crossbeam_utils/atomic/struct.AtomicCell.html -[`AtomicConsume`]: https://docs.rs/crossbeam-utils/latest/crossbeam_utils/atomic/trait.AtomicConsume.html -[`Parker`]: https://docs.rs/crossbeam-utils/latest/crossbeam_utils/sync/struct.Parker.html -[`ShardedLock`]: https://docs.rs/crossbeam-utils/latest/crossbeam_utils/sync/struct.ShardedLock.html -[`WaitGroup`]: https://docs.rs/crossbeam-utils/latest/crossbeam_utils/sync/struct.WaitGroup.html -[`Backoff`]: https://docs.rs/crossbeam-utils/latest/crossbeam_utils/struct.Backoff.html -[`CachePadded`]: https://docs.rs/crossbeam-utils/latest/crossbeam_utils/struct.CachePadded.html -[`scope`]: https://docs.rs/crossbeam-utils/latest/crossbeam_utils/thread/fn.scope.html - -## Usage - -Add this to your `Cargo.toml`: - -```toml -[dependencies] -crossbeam-utils = "0.8" -``` - -## Compatibility - -Crossbeam Utils supports stable Rust releases going back at least six months, -and every time the minimum supported Rust version is increased, a new minor -version is released. Currently, the minimum supported Rust version is 1.56. - -## License - -Licensed under either of - - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) - -at your option. - -#### Contribution - -Unless you explicitly state otherwise, any contribution intentionally submitted -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be -dual licensed as above, without any additional terms or conditions. diff --git a/crossbeam-utils/benches/atomic_cell.rs b/crossbeam-utils/benches/atomic_cell.rs deleted file mode 100644 index 844f7c02b..000000000 --- a/crossbeam-utils/benches/atomic_cell.rs +++ /dev/null @@ -1,156 +0,0 @@ -#![feature(test)] - -extern crate test; - -use std::sync::Barrier; - -use crossbeam_utils::atomic::AtomicCell; -use crossbeam_utils::thread; - -#[bench] -fn load_u8(b: &mut test::Bencher) { - let a = AtomicCell::new(0u8); - let mut sum = 0; - b.iter(|| sum += a.load()); - test::black_box(sum); -} - -#[bench] -fn store_u8(b: &mut test::Bencher) { - let a = AtomicCell::new(0u8); - b.iter(|| a.store(1)); -} - -#[bench] -fn fetch_add_u8(b: &mut test::Bencher) { - let a = AtomicCell::new(0u8); - b.iter(|| a.fetch_add(1)); -} - -#[bench] -fn compare_exchange_u8(b: &mut test::Bencher) { - let a = AtomicCell::new(0u8); - let mut i = 0; - b.iter(|| { - let _ = a.compare_exchange(i, i.wrapping_add(1)); - i = i.wrapping_add(1); - }); -} - -#[bench] -fn concurrent_load_u8(b: &mut test::Bencher) { - const THREADS: usize = 2; - const STEPS: usize = 1_000_000; - - let start = Barrier::new(THREADS + 1); - let end = Barrier::new(THREADS + 1); - let exit = AtomicCell::new(false); - - let a = AtomicCell::new(0u8); - - thread::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| loop { - start.wait(); - - let mut sum = 0; - for _ in 0..STEPS { - sum += a.load(); - } - test::black_box(sum); - - end.wait(); - if exit.load() { - break; - } - }); - } - - start.wait(); - end.wait(); - - b.iter(|| { - start.wait(); - end.wait(); - }); - - start.wait(); - exit.store(true); - end.wait(); - }) - .unwrap(); -} - -#[bench] -fn load_usize(b: &mut test::Bencher) { - let a = AtomicCell::new(0usize); - let mut sum = 0; - b.iter(|| sum += a.load()); - test::black_box(sum); -} - -#[bench] -fn store_usize(b: &mut test::Bencher) { - let a = AtomicCell::new(0usize); - b.iter(|| a.store(1)); -} - -#[bench] -fn fetch_add_usize(b: &mut test::Bencher) { - let a = AtomicCell::new(0usize); - b.iter(|| a.fetch_add(1)); -} - -#[bench] -fn compare_exchange_usize(b: &mut test::Bencher) { - let a = AtomicCell::new(0usize); - let mut i = 0; - b.iter(|| { - let _ = a.compare_exchange(i, i.wrapping_add(1)); - i = i.wrapping_add(1); - }); -} - -#[bench] -fn concurrent_load_usize(b: &mut test::Bencher) { - const THREADS: usize = 2; - const STEPS: usize = 1_000_000; - - let start = Barrier::new(THREADS + 1); - let end = Barrier::new(THREADS + 1); - let exit = AtomicCell::new(false); - - let a = AtomicCell::new(0usize); - - thread::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| loop { - start.wait(); - - let mut sum = 0; - for _ in 0..STEPS { - sum += a.load(); - } - test::black_box(sum); - - end.wait(); - if exit.load() { - break; - } - }); - } - - start.wait(); - end.wait(); - - b.iter(|| { - start.wait(); - end.wait(); - }); - - start.wait(); - exit.store(true); - end.wait(); - }) - .unwrap(); -} diff --git a/crossbeam-utils/build-common.rs b/crossbeam-utils/build-common.rs deleted file mode 120000 index 929510c73..000000000 --- a/crossbeam-utils/build-common.rs +++ /dev/null @@ -1 +0,0 @@ -../build-common.rs \ No newline at end of file diff --git a/crossbeam-utils/build.rs b/crossbeam-utils/build.rs deleted file mode 100644 index dcf39db26..000000000 --- a/crossbeam-utils/build.rs +++ /dev/null @@ -1,48 +0,0 @@ -// The rustc-cfg listed below are considered public API, but it is *unstable* -// and outside of the normal semver guarantees: -// -// - `crossbeam_no_atomic` -// Assume the target does *not* support any atomic operations. -// This is usually detected automatically by the build script, but you may -// need to enable it manually when building for custom targets or using -// non-cargo build systems that don't run the build script. -// -// With the exceptions mentioned above, the rustc-cfg emitted by the build -// script are *not* public API. - -use std::env; - -include!("no_atomic.rs"); -include!("build-common.rs"); - -fn main() { - println!("cargo:rerun-if-changed=no_atomic.rs"); - println!("cargo:rustc-check-cfg=cfg(crossbeam_no_atomic,crossbeam_sanitize_thread,crossbeam_atomic_cell_force_fallback)"); - - let target = match env::var("TARGET") { - Ok(target) => convert_custom_linux_target(target), - Err(e) => { - println!( - "cargo:warning={}: unable to get TARGET environment variable: {}", - env!("CARGO_PKG_NAME"), - e - ); - return; - } - }; - - // Note that this is `no_`*, not `has_*`. This allows treating as the latest - // stable rustc is used when the build script doesn't run. This is useful - // for non-cargo build systems that don't run the build script. - if NO_ATOMIC.contains(&&*target) { - println!("cargo:rustc-cfg=crossbeam_no_atomic"); - } - - // `cfg(sanitize = "..")` is not stabilized. - if let Ok(sanitize) = env::var("CARGO_CFG_SANITIZE") { - if sanitize.contains("thread") { - println!("cargo:rustc-cfg=crossbeam_sanitize_thread"); - } - println!("cargo:rustc-cfg=crossbeam_atomic_cell_force_fallback"); - } -} diff --git a/crossbeam-utils/no_atomic.rs b/crossbeam-utils/no_atomic.rs deleted file mode 120000 index 417886bb7..000000000 --- a/crossbeam-utils/no_atomic.rs +++ /dev/null @@ -1 +0,0 @@ -../no_atomic.rs \ No newline at end of file diff --git a/crossbeam-utils/src/atomic/atomic_cell.rs b/crossbeam-utils/src/atomic/atomic_cell.rs deleted file mode 100644 index 98fb8d495..000000000 --- a/crossbeam-utils/src/atomic/atomic_cell.rs +++ /dev/null @@ -1,1243 +0,0 @@ -// Necessary for implementing atomic methods for `AtomicUnit` -#![allow(clippy::unit_arg)] - -use crate::primitive::sync::atomic::{self, Ordering}; -use crate::CachePadded; -use core::cell::UnsafeCell; -use core::cmp; -use core::fmt; -use core::mem::{self, ManuallyDrop, MaybeUninit}; -use core::panic::{RefUnwindSafe, UnwindSafe}; -use core::ptr; - -use super::seq_lock::SeqLock; - -/// A thread-safe mutable memory location. -/// -/// This type is equivalent to [`Cell`], except it can also be shared among multiple threads. -/// -/// Operations on `AtomicCell`s use atomic instructions whenever possible, and synchronize using -/// global locks otherwise. You can call [`AtomicCell::::is_lock_free()`] to check whether -/// atomic instructions or locks will be used. -/// -/// Atomic loads use the [`Acquire`] ordering and atomic stores use the [`Release`] ordering. -/// -/// [`Cell`]: std::cell::Cell -/// [`AtomicCell::::is_lock_free()`]: AtomicCell::is_lock_free -/// [`Acquire`]: std::sync::atomic::Ordering::Acquire -/// [`Release`]: std::sync::atomic::Ordering::Release -#[repr(transparent)] -pub struct AtomicCell { - /// The inner value. - /// - /// If this value can be transmuted into a primitive atomic type, it will be treated as such. - /// Otherwise, all potentially concurrent operations on this data will be protected by a global - /// lock. - /// - /// Using MaybeUninit to prevent code outside the cell from observing partially initialized state: - /// - /// (This rustc bug has been fixed in Rust 1.64.) - /// - /// Note: - /// - we'll never store uninitialized `T` due to our API only using initialized `T`. - /// - this `MaybeUninit` does *not* fix . - value: UnsafeCell>, -} - -unsafe impl Send for AtomicCell {} -unsafe impl Sync for AtomicCell {} - -impl UnwindSafe for AtomicCell {} -impl RefUnwindSafe for AtomicCell {} - -impl AtomicCell { - /// Creates a new atomic cell initialized with `val`. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(7); - /// ``` - pub const fn new(val: T) -> Self { - Self { - value: UnsafeCell::new(MaybeUninit::new(val)), - } - } - - /// Consumes the atomic and returns the contained value. - /// - /// This is safe because passing `self` by value guarantees that no other threads are - /// concurrently accessing the atomic data. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(7); - /// let v = a.into_inner(); - /// - /// assert_eq!(v, 7); - /// ``` - pub fn into_inner(self) -> T { - let this = ManuallyDrop::new(self); - // SAFETY: - // - passing `self` by value guarantees that no other threads are concurrently - // accessing the atomic data - // - the raw pointer passed in is valid because we got it from an owned value. - // - `ManuallyDrop` prevents double dropping `T` - unsafe { this.as_ptr().read() } - } - - /// Returns `true` if operations on values of this type are lock-free. - /// - /// If the compiler or the platform doesn't support the necessary atomic instructions, - /// `AtomicCell` will use global locks for every potentially concurrent atomic operation. - /// - /// # Examples - /// - /// ``` - /// # // Always use fallback for now on environments that do not support inline assembly. - /// # if cfg!(any(miri, crossbeam_loom, crossbeam_atomic_cell_force_fallback)) { return; } - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// // This type is internally represented as `AtomicUsize` so we can just use atomic - /// // operations provided by it. - /// assert_eq!(AtomicCell::::is_lock_free(), true); - /// - /// // A wrapper struct around `isize`. - /// struct Foo { - /// bar: isize, - /// } - /// // `AtomicCell` will be internally represented as `AtomicIsize`. - /// assert_eq!(AtomicCell::::is_lock_free(), true); - /// - /// // Operations on zero-sized types are always lock-free. - /// assert_eq!(AtomicCell::<()>::is_lock_free(), true); - /// - /// // Very large types cannot be represented as any of the standard atomic types, so atomic - /// // operations on them will have to use global locks for synchronization. - /// assert_eq!(AtomicCell::<[u8; 1000]>::is_lock_free(), false); - /// ``` - pub const fn is_lock_free() -> bool { - atomic_is_lock_free::() - } - - /// Stores `val` into the atomic cell. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(7); - /// - /// assert_eq!(a.load(), 7); - /// a.store(8); - /// assert_eq!(a.load(), 8); - /// ``` - pub fn store(&self, val: T) { - if mem::needs_drop::() { - drop(self.swap(val)); - } else { - unsafe { - atomic_store(self.as_ptr(), val); - } - } - } - - /// Stores `val` into the atomic cell and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(7); - /// - /// assert_eq!(a.load(), 7); - /// assert_eq!(a.swap(8), 7); - /// assert_eq!(a.load(), 8); - /// ``` - pub fn swap(&self, val: T) -> T { - unsafe { atomic_swap(self.as_ptr(), val) } - } - - /// Returns a raw pointer to the underlying data in this atomic cell. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(5); - /// - /// let ptr = a.as_ptr(); - /// ``` - #[inline] - pub fn as_ptr(&self) -> *mut T { - self.value.get().cast::() - } -} - -impl AtomicCell { - /// Takes the value of the atomic cell, leaving `Default::default()` in its place. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(5); - /// let five = a.take(); - /// - /// assert_eq!(five, 5); - /// assert_eq!(a.into_inner(), 0); - /// ``` - pub fn take(&self) -> T { - self.swap(Default::default()) - } -} - -impl AtomicCell { - /// Loads a value from the atomic cell. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(7); - /// - /// assert_eq!(a.load(), 7); - /// ``` - pub fn load(&self) -> T { - unsafe { atomic_load(self.as_ptr()) } - } -} - -impl AtomicCell { - /// If the current value equals `current`, stores `new` into the atomic cell. - /// - /// The return value is a result indicating whether the new value was written and containing - /// the previous value. On success this value is guaranteed to be equal to `current`. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(1); - /// - /// assert_eq!(a.compare_exchange(2, 3), Err(1)); - /// assert_eq!(a.load(), 1); - /// - /// assert_eq!(a.compare_exchange(1, 2), Ok(1)); - /// assert_eq!(a.load(), 2); - /// ``` - pub fn compare_exchange(&self, current: T, new: T) -> Result { - unsafe { atomic_compare_exchange_weak(self.as_ptr(), current, new) } - } - - /// Fetches the value, and applies a function to it that returns an optional - /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else - /// `Err(previous_value)`. - /// - /// Note: This may call the function multiple times if the value has been changed from other threads in - /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied - /// only once to the stored value. - /// - /// # Examples - /// - /// ```rust - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(7); - /// assert_eq!(a.fetch_update(|_| None), Err(7)); - /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(7)); - /// assert_eq!(a.fetch_update(|a| Some(a + 1)), Ok(8)); - /// assert_eq!(a.load(), 9); - /// ``` - #[inline] - pub fn fetch_update(&self, mut f: F) -> Result - where - F: FnMut(T) -> Option, - { - let mut prev = self.load(); - while let Some(next) = f(prev) { - match self.compare_exchange(prev, next) { - x @ Ok(_) => return x, - Err(next_prev) => prev = next_prev, - } - } - Err(prev) - } -} - -// `MaybeUninit` prevents `T` from being dropped, so we need to implement `Drop` -// for `AtomicCell` to avoid leaks of non-`Copy` types. -impl Drop for AtomicCell { - fn drop(&mut self) { - if mem::needs_drop::() { - // SAFETY: - // - the mutable reference guarantees that no other threads are concurrently accessing the atomic data - // - the raw pointer passed in is valid because we got it from a reference - // - `MaybeUninit` prevents double dropping `T` - unsafe { - self.as_ptr().drop_in_place(); - } - } - } -} - -macro_rules! atomic { - // If values of type `$t` can be transmuted into values of the primitive atomic type `$atomic`, - // declares variable `$a` of type `$atomic` and executes `$atomic_op`, breaking out of the loop. - (@check, $t:ty, $atomic:ty, $a:ident, $atomic_op:expr) => { - if can_transmute::<$t, $atomic>() { - let $a: &$atomic; - break $atomic_op; - } - }; - - // If values of type `$t` can be transmuted into values of a primitive atomic type, declares - // variable `$a` of that type and executes `$atomic_op`. Otherwise, just executes - // `$fallback_op`. - ($t:ty, $a:ident, $atomic_op:expr, $fallback_op:expr) => { - loop { - atomic!(@check, $t, AtomicUnit, $a, $atomic_op); - - // Always use fallback for now on environments that do not support inline assembly. - #[cfg(not(any( - miri, - crossbeam_loom, - crossbeam_atomic_cell_force_fallback, - )))] - atomic_maybe_uninit::cfg_has_atomic_cas! { - atomic_maybe_uninit::cfg_has_atomic_8! { - atomic!(@check, $t, atomic_maybe_uninit::AtomicMaybeUninit, $a, $atomic_op); - } - atomic_maybe_uninit::cfg_has_atomic_16! { - atomic!(@check, $t, atomic_maybe_uninit::AtomicMaybeUninit, $a, $atomic_op); - } - atomic_maybe_uninit::cfg_has_atomic_32! { - atomic!(@check, $t, atomic_maybe_uninit::AtomicMaybeUninit, $a, $atomic_op); - } - atomic_maybe_uninit::cfg_has_atomic_64! { - atomic!(@check, $t, atomic_maybe_uninit::AtomicMaybeUninit, $a, $atomic_op); - } - atomic_maybe_uninit::cfg_has_atomic_128! { - atomic!(@check, $t, atomic_maybe_uninit::AtomicMaybeUninit, $a, $atomic_op); - } - } - - break $fallback_op; - } - }; -} - -macro_rules! impl_arithmetic { - ($t:ty, fetch_update, $example:tt) => { - impl AtomicCell<$t> { - /// Increments the current value by `val` and returns the previous value. - /// - /// The addition wraps on overflow. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_add(3), 7); - /// assert_eq!(a.load(), 10); - /// ``` - #[inline] - pub fn fetch_add(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - self.fetch_update(|old| Some(old.wrapping_add(val))).unwrap() - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value = value.wrapping_add(val); - old - } - } - } - - /// Decrements the current value by `val` and returns the previous value. - /// - /// The subtraction wraps on overflow. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_sub(3), 7); - /// assert_eq!(a.load(), 4); - /// ``` - #[inline] - pub fn fetch_sub(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - self.fetch_update(|old| Some(old.wrapping_sub(val))).unwrap() - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value = value.wrapping_sub(val); - old - } - } - } - - /// Applies bitwise "and" to the current value and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_and(3), 7); - /// assert_eq!(a.load(), 3); - /// ``` - #[inline] - pub fn fetch_and(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - self.fetch_update(|old| Some(old & val)).unwrap() - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value &= val; - old - } - } - } - - /// Applies bitwise "nand" to the current value and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_nand(3), 7); - /// assert_eq!(a.load(), !(7 & 3)); - /// ``` - #[inline] - pub fn fetch_nand(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - self.fetch_update(|old| Some(!(old & val))).unwrap() - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value = !(old & val); - old - } - } - } - - /// Applies bitwise "or" to the current value and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_or(16), 7); - /// assert_eq!(a.load(), 23); - /// ``` - #[inline] - pub fn fetch_or(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - self.fetch_update(|old| Some(old | val)).unwrap() - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value |= val; - old - } - } - } - - /// Applies bitwise "xor" to the current value and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_xor(2), 7); - /// assert_eq!(a.load(), 5); - /// ``` - #[inline] - pub fn fetch_xor(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - self.fetch_update(|old| Some(old ^ val)).unwrap() - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value ^= val; - old - } - } - } - - /// Compares and sets the maximum of the current value and `val`, - /// and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_max(2), 7); - /// assert_eq!(a.load(), 7); - /// ``` - #[inline] - pub fn fetch_max(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - self.fetch_update(|old| Some(cmp::max(old, val))).unwrap() - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value = cmp::max(old, val); - old - } - } - } - - /// Compares and sets the minimum of the current value and `val`, - /// and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_min(2), 7); - /// assert_eq!(a.load(), 2); - /// ``` - #[inline] - pub fn fetch_min(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - self.fetch_update(|old| Some(cmp::min(old, val))).unwrap() - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value = cmp::min(old, val); - old - } - } - } - } - }; - ($t:ty, $atomic:ident, $example:tt) => { - impl AtomicCell<$t> { - /// Increments the current value by `val` and returns the previous value. - /// - /// The addition wraps on overflow. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_add(3), 7); - /// assert_eq!(a.load(), 10); - /// ``` - #[inline] - pub fn fetch_add(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; - a.fetch_add(val, Ordering::AcqRel) - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value = value.wrapping_add(val); - old - } - } - } - - /// Decrements the current value by `val` and returns the previous value. - /// - /// The subtraction wraps on overflow. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_sub(3), 7); - /// assert_eq!(a.load(), 4); - /// ``` - #[inline] - pub fn fetch_sub(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; - a.fetch_sub(val, Ordering::AcqRel) - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value = value.wrapping_sub(val); - old - } - } - } - - /// Applies bitwise "and" to the current value and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_and(3), 7); - /// assert_eq!(a.load(), 3); - /// ``` - #[inline] - pub fn fetch_and(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; - a.fetch_and(val, Ordering::AcqRel) - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value &= val; - old - } - } - } - - /// Applies bitwise "nand" to the current value and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_nand(3), 7); - /// assert_eq!(a.load(), !(7 & 3)); - /// ``` - #[inline] - pub fn fetch_nand(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; - a.fetch_nand(val, Ordering::AcqRel) - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value = !(old & val); - old - } - } - } - - /// Applies bitwise "or" to the current value and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_or(16), 7); - /// assert_eq!(a.load(), 23); - /// ``` - #[inline] - pub fn fetch_or(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; - a.fetch_or(val, Ordering::AcqRel) - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value |= val; - old - } - } - } - - /// Applies bitwise "xor" to the current value and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_xor(2), 7); - /// assert_eq!(a.load(), 5); - /// ``` - #[inline] - pub fn fetch_xor(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; - a.fetch_xor(val, Ordering::AcqRel) - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value ^= val; - old - } - } - } - - /// Compares and sets the maximum of the current value and `val`, - /// and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_max(9), 7); - /// assert_eq!(a.load(), 9); - /// ``` - #[inline] - pub fn fetch_max(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; - a.fetch_max(val, Ordering::AcqRel) - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value = cmp::max(old, val); - old - } - } - } - - /// Compares and sets the minimum of the current value and `val`, - /// and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - #[doc = $example] - /// - /// assert_eq!(a.fetch_min(2), 7); - /// assert_eq!(a.load(), 2); - /// ``` - #[inline] - pub fn fetch_min(&self, val: $t) -> $t { - atomic! { - $t, _a, - { - let a = unsafe { &*(self.as_ptr() as *const atomic::$atomic) }; - a.fetch_min(val, Ordering::AcqRel) - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value = cmp::min(old, val); - old - } - } - } - } - }; -} - -impl_arithmetic!(u8, AtomicU8, "let a = AtomicCell::new(7u8);"); -impl_arithmetic!(i8, AtomicI8, "let a = AtomicCell::new(7i8);"); -impl_arithmetic!(u16, AtomicU16, "let a = AtomicCell::new(7u16);"); -impl_arithmetic!(i16, AtomicI16, "let a = AtomicCell::new(7i16);"); - -#[cfg(target_has_atomic = "32")] -impl_arithmetic!(u32, AtomicU32, "let a = AtomicCell::new(7u32);"); -#[cfg(target_has_atomic = "32")] -impl_arithmetic!(i32, AtomicI32, "let a = AtomicCell::new(7i32);"); -#[cfg(not(target_has_atomic = "32"))] -impl_arithmetic!(u32, fetch_update, "let a = AtomicCell::new(7u32);"); -#[cfg(not(target_has_atomic = "32"))] -impl_arithmetic!(i32, fetch_update, "let a = AtomicCell::new(7i32);"); - -#[cfg(target_has_atomic = "64")] -impl_arithmetic!(u64, AtomicU64, "let a = AtomicCell::new(7u64);"); -#[cfg(target_has_atomic = "64")] -impl_arithmetic!(i64, AtomicI64, "let a = AtomicCell::new(7i64);"); -#[cfg(not(target_has_atomic = "64"))] -impl_arithmetic!(u64, fetch_update, "let a = AtomicCell::new(7u64);"); -#[cfg(not(target_has_atomic = "64"))] -impl_arithmetic!(i64, fetch_update, "let a = AtomicCell::new(7i64);"); - -// TODO: core::sync::atomic::AtomicU128 is unstable -// impl_arithmetic!(u128, AtomicU128, "let a = AtomicCell::new(7u128);"); -// impl_arithmetic!(i128, AtomicI128, "let a = AtomicCell::new(7i128);"); -impl_arithmetic!(u128, fetch_update, "let a = AtomicCell::new(7u128);"); -impl_arithmetic!(i128, fetch_update, "let a = AtomicCell::new(7i128);"); - -impl_arithmetic!(usize, AtomicUsize, "let a = AtomicCell::new(7usize);"); -impl_arithmetic!(isize, AtomicIsize, "let a = AtomicCell::new(7isize);"); - -impl AtomicCell { - /// Applies logical "and" to the current value and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(true); - /// - /// assert_eq!(a.fetch_and(true), true); - /// assert_eq!(a.load(), true); - /// - /// assert_eq!(a.fetch_and(false), true); - /// assert_eq!(a.load(), false); - /// ``` - #[inline] - pub fn fetch_and(&self, val: bool) -> bool { - atomic! { - bool, _a, - { - let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; - a.fetch_and(val, Ordering::AcqRel) - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value &= val; - old - } - } - } - - /// Applies logical "nand" to the current value and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(true); - /// - /// assert_eq!(a.fetch_nand(false), true); - /// assert_eq!(a.load(), true); - /// - /// assert_eq!(a.fetch_nand(true), true); - /// assert_eq!(a.load(), false); - /// - /// assert_eq!(a.fetch_nand(false), false); - /// assert_eq!(a.load(), true); - /// ``` - #[inline] - pub fn fetch_nand(&self, val: bool) -> bool { - atomic! { - bool, _a, - { - let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; - a.fetch_nand(val, Ordering::AcqRel) - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value = !(old & val); - old - } - } - } - - /// Applies logical "or" to the current value and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(false); - /// - /// assert_eq!(a.fetch_or(false), false); - /// assert_eq!(a.load(), false); - /// - /// assert_eq!(a.fetch_or(true), false); - /// assert_eq!(a.load(), true); - /// ``` - #[inline] - pub fn fetch_or(&self, val: bool) -> bool { - atomic! { - bool, _a, - { - let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; - a.fetch_or(val, Ordering::AcqRel) - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value |= val; - old - } - } - } - - /// Applies logical "xor" to the current value and returns the previous value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::atomic::AtomicCell; - /// - /// let a = AtomicCell::new(true); - /// - /// assert_eq!(a.fetch_xor(false), true); - /// assert_eq!(a.load(), true); - /// - /// assert_eq!(a.fetch_xor(true), true); - /// assert_eq!(a.load(), false); - /// ``` - #[inline] - pub fn fetch_xor(&self, val: bool) -> bool { - atomic! { - bool, _a, - { - let a = unsafe { &*(self.as_ptr() as *const atomic::AtomicBool) }; - a.fetch_xor(val, Ordering::AcqRel) - }, - { - let _guard = lock(self.as_ptr() as usize).write(); - let value = unsafe { &mut *(self.as_ptr()) }; - let old = *value; - *value ^= val; - old - } - } - } -} - -impl Default for AtomicCell { - fn default() -> Self { - Self::new(T::default()) - } -} - -impl From for AtomicCell { - #[inline] - fn from(val: T) -> Self { - Self::new(val) - } -} - -impl fmt::Debug for AtomicCell { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AtomicCell") - .field("value", &self.load()) - .finish() - } -} - -/// Returns `true` if values of type `A` can be transmuted into values of type `B`. -const fn can_transmute() -> bool { - // Sizes must be equal, but alignment of `A` must be greater or equal than that of `B`. - (mem::size_of::
() == mem::size_of::()) & (mem::align_of::() >= mem::align_of::()) -} - -/// Returns a reference to the global lock associated with the `AtomicCell` at address `addr`. -/// -/// This function is used to protect atomic data which doesn't fit into any of the primitive atomic -/// types in `std::sync::atomic`. Operations on such atomics must therefore use a global lock. -/// -/// However, there is not only one global lock but an array of many locks, and one of them is -/// picked based on the given address. Having many locks reduces contention and improves -/// scalability. -#[inline] -#[must_use] -fn lock(addr: usize) -> &'static SeqLock { - // The number of locks is a prime number because we want to make sure `addr % LEN` gets - // dispersed across all locks. - // - // Note that addresses are always aligned to some power of 2, depending on type `T` in - // `AtomicCell`. If `LEN` was an even number, then `addr % LEN` would be an even number, - // too, which means only half of the locks would get utilized! - // - // It is also possible for addresses to accidentally get aligned to a number that is not a - // power of 2. Consider this example: - // - // ``` - // #[repr(C)] - // struct Foo { - // a: AtomicCell, - // b: u8, - // c: u8, - // } - // ``` - // - // Now, if we have a slice of type `&[Foo]`, it is possible that field `a` in all items gets - // stored at addresses that are multiples of 3. It'd be too bad if `LEN` was divisible by 3. - // In order to protect from such cases, we simply choose a large prime number for `LEN`. - const LEN: usize = 67; - const L: CachePadded = CachePadded::new(SeqLock::new()); - static LOCKS: [CachePadded; LEN] = [L; LEN]; - - // If the modulus is a constant number, the compiler will use crazy math to transform this into - // a sequence of cheap arithmetic operations rather than using the slow modulo instruction. - &LOCKS[addr % LEN] -} - -/// An atomic `()`. -/// -/// All operations are noops. -struct AtomicUnit; - -impl AtomicUnit { - #[inline] - fn load(&self, _order: Ordering) {} - - #[inline] - fn store(&self, _val: (), _order: Ordering) {} - - #[inline] - fn swap(&self, _val: (), _order: Ordering) {} - - #[inline] - fn compare_exchange_weak( - &self, - _current: (), - _new: (), - _success: Ordering, - _failure: Ordering, - ) -> Result<(), ()> { - Ok(()) - } -} - -/// Returns `true` if operations on `AtomicCell` are lock-free. -const fn atomic_is_lock_free() -> bool { - atomic! { T, _a, true, false } -} - -/// Atomically reads data from `src`. -/// -/// This operation uses the `Acquire` ordering. If possible, an atomic instructions is used, and a -/// global lock otherwise. -unsafe fn atomic_load(src: *mut T) -> T -where - T: Copy, -{ - atomic! { - T, a, - { - a = unsafe { &*(src as *const _ as *const _) }; - unsafe { mem::transmute_copy(&a.load(Ordering::Acquire)) } - }, - { - let lock = lock(src as usize); - - // Try doing an optimistic read first. - if let Some(stamp) = lock.optimistic_read() { - // We need a volatile read here because other threads might concurrently modify the - // value. In theory, data races are *always* UB, even if we use volatile reads and - // discard the data when a data race is detected. The proper solution would be to - // do atomic reads and atomic writes, but we can't atomically read and write all - // kinds of data since `AtomicU8` is not available on stable Rust yet. - // Load as `MaybeUninit` because we may load a value that is not valid as `T`. - let val = unsafe { ptr::read_volatile(src.cast::>()) }; - - if lock.validate_read(stamp) { - return unsafe { val.assume_init() }; - } - } - - // Grab a regular write lock so that writers don't starve this load. - let guard = lock.write(); - let val = unsafe { ptr::read(src) }; - // The value hasn't been changed. Drop the guard without incrementing the stamp. - guard.abort(); - val - } - } -} - -/// Atomically writes `val` to `dst`. -/// -/// This operation uses the `Release` ordering. If possible, an atomic instructions is used, and a -/// global lock otherwise. -unsafe fn atomic_store(dst: *mut T, val: T) { - atomic! { - T, a, - { - a = unsafe { &*(dst as *const _ as *const _) }; - a.store(unsafe { mem::transmute_copy(&val) }, Ordering::Release); - mem::forget(val); - }, - { - let _guard = lock(dst as usize).write(); - unsafe { ptr::write(dst, val) } - } - } -} - -/// Atomically swaps data at `dst` with `val`. -/// -/// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a -/// global lock otherwise. -unsafe fn atomic_swap(dst: *mut T, val: T) -> T { - atomic! { - T, a, - { - a = unsafe { &*(dst as *const _ as *const _) }; - let res = unsafe { mem::transmute_copy(&a.swap(mem::transmute_copy(&val), Ordering::AcqRel)) }; - mem::forget(val); - res - }, - { - let _guard = lock(dst as usize).write(); - unsafe { ptr::replace(dst, val) } - } - } -} - -/// Atomically compares data at `dst` to `current` and, if equal byte-for-byte, exchanges data at -/// `dst` with `new`. -/// -/// Returns the old value on success, or the current value at `dst` on failure. -/// -/// This operation uses the `AcqRel` ordering. If possible, an atomic instructions is used, and a -/// global lock otherwise. -#[allow(clippy::let_unit_value)] -unsafe fn atomic_compare_exchange_weak(dst: *mut T, mut current: T, new: T) -> Result -where - T: Copy + Eq, -{ - atomic! { - T, a, - { - a = unsafe { &*(dst as *const _ as *const _) }; - let mut current_raw = unsafe { mem::transmute_copy(¤t) }; - let new_raw = unsafe { mem::transmute_copy(&new) }; - - loop { - match a.compare_exchange_weak( - current_raw, - new_raw, - Ordering::AcqRel, - Ordering::Acquire, - ) { - Ok(_) => break Ok(current), - Err(previous_raw) => { - let previous = unsafe { mem::transmute_copy(&previous_raw) }; - - if !T::eq(&previous, ¤t) { - break Err(previous); - } - - // The compare-exchange operation has failed and didn't store `new`. The - // failure is either spurious, or `previous` was semantically equal to - // `current` but not byte-equal. Let's retry with `previous` as the new - // `current`. - current = previous; - current_raw = previous_raw; - } - } - } - }, - { - let guard = lock(dst as usize).write(); - - let old = unsafe { ptr::read(dst) }; - if T::eq(&old, ¤t) { - unsafe { ptr::write(dst, new) } - Ok(old) - } else { - // The value hasn't been changed. Drop the guard without incrementing the stamp. - guard.abort(); - Err(old) - } - } - } -} diff --git a/crossbeam-utils/src/atomic/consume.rs b/crossbeam-utils/src/atomic/consume.rs deleted file mode 100644 index ff8e316b2..000000000 --- a/crossbeam-utils/src/atomic/consume.rs +++ /dev/null @@ -1,111 +0,0 @@ -#[cfg(not(crossbeam_no_atomic))] -use core::sync::atomic::Ordering; - -/// Trait which allows reading from primitive atomic types with "consume" ordering. -pub trait AtomicConsume { - /// Type returned by `load_consume`. - type Val; - - /// Loads a value from the atomic using a "consume" memory ordering. - /// - /// This is similar to the "acquire" ordering, except that an ordering is - /// only guaranteed with operations that "depend on" the result of the load. - /// However consume loads are usually much faster than acquire loads on - /// architectures with a weak memory model since they don't require memory - /// fence instructions. - /// - /// The exact definition of "depend on" is a bit vague, but it works as you - /// would expect in practice since a lot of software, especially the Linux - /// kernel, rely on this behavior. - /// - /// This is currently only implemented on ARM and AArch64, where a fence - /// can be avoided. On other architectures this will fall back to a simple - /// `load(Ordering::Acquire)`. - fn load_consume(&self) -> Self::Val; -} - -#[cfg(not(crossbeam_no_atomic))] -// Miri and Loom don't support "consume" ordering and ThreadSanitizer doesn't treat -// load(Relaxed) + compiler_fence(Acquire) as "consume" load. -// LLVM generates machine code equivalent to fence(Acquire) in compiler_fence(Acquire) -// on PowerPC, MIPS, etc. (https://godbolt.org/z/hffvjvW7h), so for now the fence -// can be actually avoided here only on ARM and AArch64. See also -// https://github.com/rust-lang/rust/issues/62256. -#[cfg(all( - any(target_arch = "arm", target_arch = "aarch64"), - not(any(miri, crossbeam_loom, crossbeam_sanitize_thread)), -))] -macro_rules! impl_consume { - () => { - #[inline] - fn load_consume(&self) -> Self::Val { - use crate::primitive::sync::atomic::compiler_fence; - let result = self.load(Ordering::Relaxed); - compiler_fence(Ordering::Acquire); - result - } - }; -} - -#[cfg(not(crossbeam_no_atomic))] -#[cfg(not(all( - any(target_arch = "arm", target_arch = "aarch64"), - not(any(miri, crossbeam_loom, crossbeam_sanitize_thread)), -)))] -macro_rules! impl_consume { - () => { - #[inline] - fn load_consume(&self) -> Self::Val { - self.load(Ordering::Acquire) - } - }; -} - -macro_rules! impl_atomic { - ($atomic:ident, $val:ty) => { - #[cfg(not(crossbeam_no_atomic))] - impl AtomicConsume for core::sync::atomic::$atomic { - type Val = $val; - impl_consume!(); - } - #[cfg(crossbeam_loom)] - impl AtomicConsume for loom::sync::atomic::$atomic { - type Val = $val; - impl_consume!(); - } - }; -} - -impl_atomic!(AtomicBool, bool); -impl_atomic!(AtomicUsize, usize); -impl_atomic!(AtomicIsize, isize); -impl_atomic!(AtomicU8, u8); -impl_atomic!(AtomicI8, i8); -impl_atomic!(AtomicU16, u16); -impl_atomic!(AtomicI16, i16); -#[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))] -impl_atomic!(AtomicU32, u32); -#[cfg(any(target_has_atomic = "32", not(target_pointer_width = "16")))] -impl_atomic!(AtomicI32, i32); -#[cfg(any( - target_has_atomic = "64", - not(any(target_pointer_width = "16", target_pointer_width = "32")), -))] -impl_atomic!(AtomicU64, u64); -#[cfg(any( - target_has_atomic = "64", - not(any(target_pointer_width = "16", target_pointer_width = "32")), -))] -impl_atomic!(AtomicI64, i64); - -#[cfg(not(crossbeam_no_atomic))] -impl AtomicConsume for core::sync::atomic::AtomicPtr { - type Val = *mut T; - impl_consume!(); -} - -#[cfg(crossbeam_loom)] -impl AtomicConsume for loom::sync::atomic::AtomicPtr { - type Val = *mut T; - impl_consume!(); -} diff --git a/crossbeam-utils/src/atomic/mod.rs b/crossbeam-utils/src/atomic/mod.rs deleted file mode 100644 index 8662ded56..000000000 --- a/crossbeam-utils/src/atomic/mod.rs +++ /dev/null @@ -1,32 +0,0 @@ -//! Atomic types. -//! -//! * [`AtomicCell`], a thread-safe mutable memory location. -//! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering. - -#[cfg(target_has_atomic = "ptr")] -#[cfg(not(crossbeam_loom))] -// Use "wide" sequence lock if the pointer width <= 32 for preventing its counter against wrap -// around. -// -// In narrow architectures (pointer width <= 16), the counter is still <= 32-bit and may be -// vulnerable to wrap around. But it's mostly okay, since in such a primitive hardware, the -// counter will not be increased that fast. -// Note that Rust (and C99) pointers must be at least 16-bit (i.e., 8-bit targets are impossible): https://github.com/rust-lang/rust/pull/49305 -#[cfg_attr( - any(target_pointer_width = "16", target_pointer_width = "32"), - path = "seq_lock_wide.rs" -)] -mod seq_lock; - -#[cfg(target_has_atomic = "ptr")] -// We cannot provide AtomicCell under cfg(crossbeam_loom) because loom's atomic -// types have a different in-memory representation than the underlying type. -// TODO: The latest loom supports fences, so fallback using seqlock may be available. -#[cfg(not(crossbeam_loom))] -mod atomic_cell; -#[cfg(target_has_atomic = "ptr")] -#[cfg(not(crossbeam_loom))] -pub use atomic_cell::AtomicCell; - -mod consume; -pub use consume::AtomicConsume; diff --git a/crossbeam-utils/src/atomic/seq_lock.rs b/crossbeam-utils/src/atomic/seq_lock.rs deleted file mode 100644 index ff8defd26..000000000 --- a/crossbeam-utils/src/atomic/seq_lock.rs +++ /dev/null @@ -1,112 +0,0 @@ -use core::mem; -use core::sync::atomic::{self, AtomicUsize, Ordering}; - -use crate::Backoff; - -/// A simple stamped lock. -pub(crate) struct SeqLock { - /// The current state of the lock. - /// - /// All bits except the least significant one hold the current stamp. When locked, the state - /// equals 1 and doesn't contain a valid stamp. - state: AtomicUsize, -} - -impl SeqLock { - pub(crate) const fn new() -> Self { - Self { - state: AtomicUsize::new(0), - } - } - - /// If not locked, returns the current stamp. - /// - /// This method should be called before optimistic reads. - #[inline] - pub(crate) fn optimistic_read(&self) -> Option { - let state = self.state.load(Ordering::Acquire); - if state == 1 { - None - } else { - Some(state) - } - } - - /// Returns `true` if the current stamp is equal to `stamp`. - /// - /// This method should be called after optimistic reads to check whether they are valid. The - /// argument `stamp` should correspond to the one returned by method `optimistic_read`. - #[inline] - pub(crate) fn validate_read(&self, stamp: usize) -> bool { - atomic::fence(Ordering::Acquire); - self.state.load(Ordering::Relaxed) == stamp - } - - /// Grabs the lock for writing. - #[inline] - pub(crate) fn write(&'static self) -> SeqLockWriteGuard { - let backoff = Backoff::new(); - loop { - let previous = self.state.swap(1, Ordering::Acquire); - - if previous != 1 { - atomic::fence(Ordering::Release); - - return SeqLockWriteGuard { - lock: self, - state: previous, - }; - } - - backoff.snooze(); - } - } -} - -/// An RAII guard that releases the lock and increments the stamp when dropped. -pub(crate) struct SeqLockWriteGuard { - /// The parent lock. - lock: &'static SeqLock, - - /// The stamp before locking. - state: usize, -} - -impl SeqLockWriteGuard { - /// Releases the lock without incrementing the stamp. - #[inline] - pub(crate) fn abort(self) { - self.lock.state.store(self.state, Ordering::Release); - - // We specifically don't want to call drop(), since that's - // what increments the stamp. - mem::forget(self); - } -} - -impl Drop for SeqLockWriteGuard { - #[inline] - fn drop(&mut self) { - // Release the lock and increment the stamp. - self.lock - .state - .store(self.state.wrapping_add(2), Ordering::Release); - } -} - -#[cfg(test)] -mod tests { - use super::SeqLock; - - #[test] - fn test_abort() { - static LK: SeqLock = SeqLock::new(); - let before = LK.optimistic_read().unwrap(); - { - let guard = LK.write(); - guard.abort(); - } - let after = LK.optimistic_read().unwrap(); - assert_eq!(before, after, "aborted write does not update the stamp"); - } -} diff --git a/crossbeam-utils/src/atomic/seq_lock_wide.rs b/crossbeam-utils/src/atomic/seq_lock_wide.rs deleted file mode 100644 index ef5d94a45..000000000 --- a/crossbeam-utils/src/atomic/seq_lock_wide.rs +++ /dev/null @@ -1,155 +0,0 @@ -use core::mem; -use core::sync::atomic::{self, AtomicUsize, Ordering}; - -use crate::Backoff; - -/// A simple stamped lock. -/// -/// The state is represented as two `AtomicUsize`: `state_hi` for high bits and `state_lo` for low -/// bits. -pub(crate) struct SeqLock { - /// The high bits of the current state of the lock. - state_hi: AtomicUsize, - - /// The low bits of the current state of the lock. - /// - /// All bits except the least significant one hold the current stamp. When locked, the state_lo - /// equals 1 and doesn't contain a valid stamp. - state_lo: AtomicUsize, -} - -impl SeqLock { - pub(crate) const fn new() -> Self { - Self { - state_hi: AtomicUsize::new(0), - state_lo: AtomicUsize::new(0), - } - } - - /// If not locked, returns the current stamp. - /// - /// This method should be called before optimistic reads. - #[inline] - pub(crate) fn optimistic_read(&self) -> Option<(usize, usize)> { - // The acquire loads from `state_hi` and `state_lo` synchronize with the release stores in - // `SeqLockWriteGuard::drop`. - // - // As a consequence, we can make sure that (1) all writes within the era of `state_hi - 1` - // happens before now; and therefore, (2) if `state_lo` is even, all writes within the - // critical section of (`state_hi`, `state_lo`) happens before now. - let state_hi = self.state_hi.load(Ordering::Acquire); - let state_lo = self.state_lo.load(Ordering::Acquire); - if state_lo == 1 { - None - } else { - Some((state_hi, state_lo)) - } - } - - /// Returns `true` if the current stamp is equal to `stamp`. - /// - /// This method should be called after optimistic reads to check whether they are valid. The - /// argument `stamp` should correspond to the one returned by method `optimistic_read`. - #[inline] - pub(crate) fn validate_read(&self, stamp: (usize, usize)) -> bool { - // Thanks to the fence, if we're noticing any modification to the data at the critical - // section of `(a, b)`, then the critical section's write of 1 to state_lo should be - // visible. - atomic::fence(Ordering::Acquire); - - // So if `state_lo` coincides with `stamp.1`, then either (1) we're noticing no modification - // to the data after the critical section of `(stamp.0, stamp.1)`, or (2) `state_lo` wrapped - // around. - // - // If (2) is the case, the acquire ordering ensures we see the new value of `state_hi`. - let state_lo = self.state_lo.load(Ordering::Acquire); - - // If (2) is the case and `state_hi` coincides with `stamp.0`, then `state_hi` also wrapped - // around, which we give up to correctly validate the read. - let state_hi = self.state_hi.load(Ordering::Relaxed); - - // Except for the case that both `state_hi` and `state_lo` wrapped around, the following - // condition implies that we're noticing no modification to the data after the critical - // section of `(stamp.0, stamp.1)`. - (state_hi, state_lo) == stamp - } - - /// Grabs the lock for writing. - #[inline] - pub(crate) fn write(&'static self) -> SeqLockWriteGuard { - let backoff = Backoff::new(); - loop { - let previous = self.state_lo.swap(1, Ordering::Acquire); - - if previous != 1 { - // To synchronize with the acquire fence in `validate_read` via any modification to - // the data at the critical section of `(state_hi, previous)`. - atomic::fence(Ordering::Release); - - return SeqLockWriteGuard { - lock: self, - state_lo: previous, - }; - } - - backoff.snooze(); - } - } -} - -/// An RAII guard that releases the lock and increments the stamp when dropped. -pub(crate) struct SeqLockWriteGuard { - /// The parent lock. - lock: &'static SeqLock, - - /// The stamp before locking. - state_lo: usize, -} - -impl SeqLockWriteGuard { - /// Releases the lock without incrementing the stamp. - #[inline] - pub(crate) fn abort(self) { - self.lock.state_lo.store(self.state_lo, Ordering::Release); - mem::forget(self); - } -} - -impl Drop for SeqLockWriteGuard { - #[inline] - fn drop(&mut self) { - let state_lo = self.state_lo.wrapping_add(2); - - // Increase the high bits if the low bits wrap around. - // - // Release ordering for synchronizing with `optimistic_read`. - if state_lo == 0 { - let state_hi = self.lock.state_hi.load(Ordering::Relaxed); - self.lock - .state_hi - .store(state_hi.wrapping_add(1), Ordering::Release); - } - - // Release the lock and increment the stamp. - // - // Release ordering for synchronizing with `optimistic_read`. - self.lock.state_lo.store(state_lo, Ordering::Release); - } -} - -#[cfg(test)] -mod tests { - use super::SeqLock; - - #[test] - fn test_abort() { - static LK: SeqLock = SeqLock::new(); - let before = LK.optimistic_read().unwrap(); - { - let guard = LK.write(); - guard.abort(); - } - let after = LK.optimistic_read().unwrap(); - assert_eq!(before, after, "aborted write does not update the stamp"); - } -} diff --git a/crossbeam-utils/src/backoff.rs b/crossbeam-utils/src/backoff.rs deleted file mode 100644 index 9729ce695..000000000 --- a/crossbeam-utils/src/backoff.rs +++ /dev/null @@ -1,289 +0,0 @@ -use crate::primitive::hint; -use core::cell::Cell; -use core::fmt; - -const SPIN_LIMIT: u32 = 6; -const YIELD_LIMIT: u32 = 10; - -/// Performs exponential backoff in spin loops. -/// -/// Backing off in spin loops reduces contention and improves overall performance. -/// -/// This primitive can execute *YIELD* and *PAUSE* instructions, yield the current thread to the OS -/// scheduler, and tell when is a good time to block the thread using a different synchronization -/// mechanism. Each step of the back off procedure takes roughly twice as long as the previous -/// step. -/// -/// # Examples -/// -/// Backing off in a lock-free loop: -/// -/// ``` -/// use crossbeam_utils::Backoff; -/// use std::sync::atomic::AtomicUsize; -/// use std::sync::atomic::Ordering::SeqCst; -/// -/// fn fetch_mul(a: &AtomicUsize, b: usize) -> usize { -/// let backoff = Backoff::new(); -/// loop { -/// let val = a.load(SeqCst); -/// if a.compare_exchange(val, val.wrapping_mul(b), SeqCst, SeqCst).is_ok() { -/// return val; -/// } -/// backoff.spin(); -/// } -/// } -/// ``` -/// -/// Waiting for an [`AtomicBool`] to become `true`: -/// -/// ``` -/// use crossbeam_utils::Backoff; -/// use std::sync::atomic::AtomicBool; -/// use std::sync::atomic::Ordering::SeqCst; -/// -/// fn spin_wait(ready: &AtomicBool) { -/// let backoff = Backoff::new(); -/// while !ready.load(SeqCst) { -/// backoff.snooze(); -/// } -/// } -/// ``` -/// -/// Waiting for an [`AtomicBool`] to become `true` and parking the thread after a long wait. -/// Note that whoever sets the atomic variable to `true` must notify the parked thread by calling -/// [`unpark()`]: -/// -/// ``` -/// use crossbeam_utils::Backoff; -/// use std::sync::atomic::AtomicBool; -/// use std::sync::atomic::Ordering::SeqCst; -/// use std::thread; -/// -/// fn blocking_wait(ready: &AtomicBool) { -/// let backoff = Backoff::new(); -/// while !ready.load(SeqCst) { -/// if backoff.is_completed() { -/// thread::park(); -/// } else { -/// backoff.snooze(); -/// } -/// } -/// } -/// ``` -/// -/// [`is_completed`]: Backoff::is_completed -/// [`std::thread::park()`]: std::thread::park -/// [`Condvar`]: std::sync::Condvar -/// [`AtomicBool`]: std::sync::atomic::AtomicBool -/// [`unpark()`]: std::thread::Thread::unpark -pub struct Backoff { - step: Cell, -} - -impl Backoff { - /// Creates a new `Backoff`. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::Backoff; - /// - /// let backoff = Backoff::new(); - /// ``` - #[inline] - pub fn new() -> Self { - Self { step: Cell::new(0) } - } - - /// Resets the `Backoff`. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::Backoff; - /// - /// let backoff = Backoff::new(); - /// backoff.reset(); - /// ``` - #[inline] - pub fn reset(&self) { - self.step.set(0); - } - - /// Backs off in a lock-free loop. - /// - /// This method should be used when we need to retry an operation because another thread made - /// progress. - /// - /// The processor may yield using the *YIELD* or *PAUSE* instruction. - /// - /// # Examples - /// - /// Backing off in a lock-free loop: - /// - /// ``` - /// use crossbeam_utils::Backoff; - /// use std::sync::atomic::AtomicUsize; - /// use std::sync::atomic::Ordering::SeqCst; - /// - /// fn fetch_mul(a: &AtomicUsize, b: usize) -> usize { - /// let backoff = Backoff::new(); - /// loop { - /// let val = a.load(SeqCst); - /// if a.compare_exchange(val, val.wrapping_mul(b), SeqCst, SeqCst).is_ok() { - /// return val; - /// } - /// backoff.spin(); - /// } - /// } - /// - /// let a = AtomicUsize::new(7); - /// assert_eq!(fetch_mul(&a, 8), 7); - /// assert_eq!(a.load(SeqCst), 56); - /// ``` - #[inline] - pub fn spin(&self) { - for _ in 0..1 << self.step.get().min(SPIN_LIMIT) { - hint::spin_loop(); - } - - if self.step.get() <= SPIN_LIMIT { - self.step.set(self.step.get() + 1); - } - } - - /// Backs off in a blocking loop. - /// - /// This method should be used when we need to wait for another thread to make progress. - /// - /// The processor may yield using the *YIELD* or *PAUSE* instruction and the current thread - /// may yield by giving up a timeslice to the OS scheduler. - /// - /// In `#[no_std]` environments, this method is equivalent to [`spin`]. - /// - /// If possible, use [`is_completed`] to check when it is advised to stop using backoff and - /// block the current thread using a different synchronization mechanism instead. - /// - /// [`spin`]: Backoff::spin - /// [`is_completed`]: Backoff::is_completed - /// - /// # Examples - /// - /// Waiting for an [`AtomicBool`] to become `true`: - /// - /// ``` - /// use crossbeam_utils::Backoff; - /// use std::sync::Arc; - /// use std::sync::atomic::AtomicBool; - /// use std::sync::atomic::Ordering::SeqCst; - /// use std::thread; - /// use std::time::Duration; - /// - /// fn spin_wait(ready: &AtomicBool) { - /// let backoff = Backoff::new(); - /// while !ready.load(SeqCst) { - /// backoff.snooze(); - /// } - /// } - /// - /// let ready = Arc::new(AtomicBool::new(false)); - /// let ready2 = ready.clone(); - /// - /// # let t = - /// thread::spawn(move || { - /// thread::sleep(Duration::from_millis(100)); - /// ready2.store(true, SeqCst); - /// }); - /// - /// assert_eq!(ready.load(SeqCst), false); - /// spin_wait(&ready); - /// assert_eq!(ready.load(SeqCst), true); - /// # t.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// ``` - /// - /// [`AtomicBool`]: std::sync::atomic::AtomicBool - #[inline] - pub fn snooze(&self) { - if self.step.get() <= SPIN_LIMIT { - for _ in 0..1 << self.step.get() { - hint::spin_loop(); - } - } else { - #[cfg(not(feature = "std"))] - for _ in 0..1 << self.step.get() { - hint::spin_loop(); - } - - #[cfg(feature = "std")] - ::std::thread::yield_now(); - } - - if self.step.get() <= YIELD_LIMIT { - self.step.set(self.step.get() + 1); - } - } - - /// Returns `true` if exponential backoff has completed and blocking the thread is advised. - /// - /// # Examples - /// - /// Waiting for an [`AtomicBool`] to become `true` and parking the thread after a long wait: - /// - /// ``` - /// use crossbeam_utils::Backoff; - /// use std::sync::Arc; - /// use std::sync::atomic::AtomicBool; - /// use std::sync::atomic::Ordering::SeqCst; - /// use std::thread; - /// use std::time::Duration; - /// - /// fn blocking_wait(ready: &AtomicBool) { - /// let backoff = Backoff::new(); - /// while !ready.load(SeqCst) { - /// if backoff.is_completed() { - /// thread::park(); - /// } else { - /// backoff.snooze(); - /// } - /// } - /// } - /// - /// let ready = Arc::new(AtomicBool::new(false)); - /// let ready2 = ready.clone(); - /// let waiter = thread::current(); - /// - /// # let t = - /// thread::spawn(move || { - /// thread::sleep(Duration::from_millis(100)); - /// ready2.store(true, SeqCst); - /// waiter.unpark(); - /// }); - /// - /// assert_eq!(ready.load(SeqCst), false); - /// blocking_wait(&ready); - /// assert_eq!(ready.load(SeqCst), true); - /// # t.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// ``` - /// - /// [`AtomicBool`]: std::sync::atomic::AtomicBool - #[inline] - pub fn is_completed(&self) -> bool { - self.step.get() > YIELD_LIMIT - } -} - -impl fmt::Debug for Backoff { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Backoff") - .field("step", &self.step) - .field("is_completed", &self.is_completed()) - .finish() - } -} - -impl Default for Backoff { - fn default() -> Self { - Self::new() - } -} diff --git a/crossbeam-utils/src/cache_padded.rs b/crossbeam-utils/src/cache_padded.rs deleted file mode 100644 index 50cf7c0e0..000000000 --- a/crossbeam-utils/src/cache_padded.rs +++ /dev/null @@ -1,217 +0,0 @@ -use core::fmt; -use core::ops::{Deref, DerefMut}; - -/// Pads and aligns a value to the length of a cache line. -/// -/// In concurrent programming, sometimes it is desirable to make sure commonly accessed pieces of -/// data are not placed into the same cache line. Updating an atomic value invalidates the whole -/// cache line it belongs to, which makes the next access to the same cache line slower for other -/// CPU cores. Use `CachePadded` to ensure updating one piece of data doesn't invalidate other -/// cached data. -/// -/// # Size and alignment -/// -/// Cache lines are assumed to be N bytes long, depending on the architecture: -/// -/// * On x86-64, aarch64, and powerpc64, N = 128. -/// * On arm, mips, mips64, sparc, and hexagon, N = 32. -/// * On m68k, N = 16. -/// * On s390x, N = 256. -/// * On all others, N = 64. -/// -/// Note that N is just a reasonable guess and is not guaranteed to match the actual cache line -/// length of the machine the program is running on. On modern Intel architectures, spatial -/// prefetcher is pulling pairs of 64-byte cache lines at a time, so we pessimistically assume that -/// cache lines are 128 bytes long. -/// -/// The size of `CachePadded` is the smallest multiple of N bytes large enough to accommodate -/// a value of type `T`. -/// -/// The alignment of `CachePadded` is the maximum of N bytes and the alignment of `T`. -/// -/// # Examples -/// -/// Alignment and padding: -/// -/// ``` -/// use crossbeam_utils::CachePadded; -/// -/// let array = [CachePadded::new(1i8), CachePadded::new(2i8)]; -/// let addr1 = &*array[0] as *const i8 as usize; -/// let addr2 = &*array[1] as *const i8 as usize; -/// -/// assert!(addr2 - addr1 >= 32); -/// assert_eq!(addr1 % 32, 0); -/// assert_eq!(addr2 % 32, 0); -/// ``` -/// -/// When building a concurrent queue with a head and a tail index, it is wise to place them in -/// different cache lines so that concurrent threads pushing and popping elements don't invalidate -/// each other's cache lines: -/// -/// ``` -/// use crossbeam_utils::CachePadded; -/// use std::sync::atomic::AtomicUsize; -/// -/// struct Queue { -/// head: CachePadded, -/// tail: CachePadded, -/// buffer: *mut T, -/// } -/// ``` -#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)] -// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache -// lines at a time, so we have to align to 128 bytes rather than 64. -// -// Sources: -// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf -// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107 -// -// aarch64/arm64ec's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size. -// -// Sources: -// - https://www.mono-project.com/news/2016/09/12/arm64-icache/ -// -// powerpc64 has 128-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/powerpc/include/asm/cache.h#L26 -#[cfg_attr( - any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "arm64ec", - target_arch = "powerpc64", - ), - repr(align(128)) -)] -// arm, mips, mips64, sparc, and hexagon have 32-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L17 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/hexagon/include/asm/cache.h#L12 -#[cfg_attr( - any( - target_arch = "arm", - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "mips64", - target_arch = "mips64r6", - target_arch = "sparc", - target_arch = "hexagon", - ), - repr(align(32)) -)] -// m68k has 16-byte cache line size. -// -// Sources: -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/m68k/include/asm/cache.h#L9 -#[cfg_attr(target_arch = "m68k", repr(align(16)))] -// s390x has 256-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/s390/include/asm/cache.h#L13 -#[cfg_attr(target_arch = "s390x", repr(align(256)))] -// x86, wasm, riscv, and sparc64 have 64-byte cache line size. -// -// Sources: -// - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9 -// - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/riscv/include/asm/cache.h#L10 -// - https://github.com/torvalds/linux/blob/3516bd729358a2a9b090c1905bd2a3fa926e24c6/arch/sparc/include/asm/cache.h#L19 -// -// All others are assumed to have 64-byte cache line size. -#[cfg_attr( - not(any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "arm64ec", - target_arch = "powerpc64", - target_arch = "arm", - target_arch = "mips", - target_arch = "mips32r6", - target_arch = "mips64", - target_arch = "mips64r6", - target_arch = "sparc", - target_arch = "hexagon", - target_arch = "m68k", - target_arch = "s390x", - )), - repr(align(64)) -)] -pub struct CachePadded { - value: T, -} - -unsafe impl Send for CachePadded {} -unsafe impl Sync for CachePadded {} - -impl CachePadded { - /// Pads and aligns a value to the length of a cache line. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::CachePadded; - /// - /// let padded_value = CachePadded::new(1); - /// ``` - pub const fn new(t: T) -> Self { - Self { value: t } - } - - /// Returns the inner value. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::CachePadded; - /// - /// let padded_value = CachePadded::new(7); - /// let value = padded_value.into_inner(); - /// assert_eq!(value, 7); - /// ``` - pub fn into_inner(self) -> T { - self.value - } -} - -impl Deref for CachePadded { - type Target = T; - - fn deref(&self) -> &T { - &self.value - } -} - -impl DerefMut for CachePadded { - fn deref_mut(&mut self) -> &mut T { - &mut self.value - } -} - -impl fmt::Debug for CachePadded { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("CachePadded") - .field("value", &self.value) - .finish() - } -} - -impl From for CachePadded { - fn from(t: T) -> Self { - Self::new(t) - } -} - -impl fmt::Display for CachePadded { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&self.value, f) - } -} diff --git a/crossbeam-utils/src/lib.rs b/crossbeam-utils/src/lib.rs deleted file mode 100644 index 173678e19..000000000 --- a/crossbeam-utils/src/lib.rs +++ /dev/null @@ -1,92 +0,0 @@ -//! Miscellaneous tools for concurrent programming. -//! -//! ## Atomics -//! -//! * [`AtomicCell`], a thread-safe mutable memory location. -//! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering. -//! -//! ## Thread synchronization -//! -//! * [`Parker`], a thread parking primitive. -//! * [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads. -//! * [`WaitGroup`], for synchronizing the beginning or end of some computation. -//! -//! ## Utilities -//! -//! * [`Backoff`], for exponential backoff in spin loops. -//! * [`CachePadded`], for padding and aligning a value to the length of a cache line. -//! * [`scope`], for spawning threads that borrow local variables from the stack. -//! -//! [`AtomicCell`]: atomic::AtomicCell -//! [`AtomicConsume`]: atomic::AtomicConsume -//! [`Parker`]: sync::Parker -//! [`ShardedLock`]: sync::ShardedLock -//! [`WaitGroup`]: sync::WaitGroup -//! [`scope`]: thread::scope - -#![no_std] -#![doc(test( - no_crate_inject, - attr( - deny(warnings, rust_2018_idioms, single_use_lifetimes), - allow(dead_code, unused_assignments, unused_variables) - ) -))] -#![warn(missing_docs, unsafe_op_in_unsafe_fn)] -#![cfg_attr(docsrs, feature(doc_cfg))] - -#[cfg(feature = "std")] -extern crate std; - -#[cfg(crossbeam_loom)] -#[allow(unused_imports)] -mod primitive { - pub(crate) mod hint { - pub(crate) use loom::hint::spin_loop; - } - pub(crate) mod sync { - pub(crate) mod atomic { - pub(crate) use loom::sync::atomic::{ - AtomicBool, AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, - AtomicU32, AtomicU64, AtomicU8, AtomicUsize, Ordering, - }; - - // FIXME: loom does not support compiler_fence at the moment. - // https://github.com/tokio-rs/loom/issues/117 - // we use fence as a stand-in for compiler_fence for the time being. - // this may miss some races since fence is stronger than compiler_fence, - // but it's the best we can do for the time being. - pub(crate) use loom::sync::atomic::fence as compiler_fence; - } - pub(crate) use loom::sync::{Arc, Condvar, Mutex}; - } -} -#[cfg(not(crossbeam_loom))] -#[allow(unused_imports)] -mod primitive { - pub(crate) mod hint { - pub(crate) use core::hint::spin_loop; - } - pub(crate) mod sync { - pub(crate) use core::sync::atomic; - #[cfg(feature = "std")] - pub(crate) use std::sync::{Arc, Condvar, Mutex}; - } -} - -#[cfg(feature = "atomic")] -#[cfg_attr(docsrs, doc(cfg(feature = "atomic")))] -pub mod atomic; - -mod cache_padded; -pub use crate::cache_padded::CachePadded; - -mod backoff; -pub use crate::backoff::Backoff; - -#[cfg(feature = "std")] -pub mod sync; - -#[cfg(feature = "std")] -#[cfg(not(crossbeam_loom))] -pub mod thread; diff --git a/crossbeam-utils/src/sync/mod.rs b/crossbeam-utils/src/sync/mod.rs deleted file mode 100644 index ed5542117..000000000 --- a/crossbeam-utils/src/sync/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -//! Thread synchronization primitives. -//! -//! * [`Parker`], a thread parking primitive. -//! * [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads. -//! * [`WaitGroup`], for synchronizing the beginning or end of some computation. - -#[cfg(not(crossbeam_loom))] -mod once_lock; -mod parker; -#[cfg(not(crossbeam_loom))] -mod sharded_lock; -mod wait_group; - -pub use self::parker::{Parker, UnparkReason, Unparker}; -#[cfg(not(crossbeam_loom))] -pub use self::sharded_lock::{ShardedLock, ShardedLockReadGuard, ShardedLockWriteGuard}; -pub use self::wait_group::WaitGroup; diff --git a/crossbeam-utils/src/sync/once_lock.rs b/crossbeam-utils/src/sync/once_lock.rs deleted file mode 100644 index 7c1af6938..000000000 --- a/crossbeam-utils/src/sync/once_lock.rs +++ /dev/null @@ -1,90 +0,0 @@ -// Based on unstable std::sync::OnceLock. -// -// Source: https://github.com/rust-lang/rust/blob/8e9c93df464b7ada3fc7a1c8ccddd9dcb24ee0a0/library/std/src/sync/once_lock.rs - -use core::cell::UnsafeCell; -use core::mem::MaybeUninit; -use std::sync::Once; - -pub(crate) struct OnceLock { - once: Once, - value: UnsafeCell>, - // Unlike std::sync::OnceLock, we don't need PhantomData here because - // we don't use #[may_dangle]. -} - -unsafe impl Sync for OnceLock {} -unsafe impl Send for OnceLock {} - -impl OnceLock { - /// Creates a new empty cell. - #[must_use] - pub(crate) const fn new() -> Self { - Self { - once: Once::new(), - value: UnsafeCell::new(MaybeUninit::uninit()), - } - } - - /// Gets the contents of the cell, initializing it with `f` if the cell - /// was empty. - /// - /// Many threads may call `get_or_init` concurrently with different - /// initializing functions, but it is guaranteed that only one function - /// will be executed. - /// - /// # Panics - /// - /// If `f` panics, the panic is propagated to the caller, and the cell - /// remains uninitialized. - /// - /// It is an error to reentrantly initialize the cell from `f`. The - /// exact outcome is unspecified. Current implementation deadlocks, but - /// this may be changed to a panic in the future. - pub(crate) fn get_or_init(&self, f: F) -> &T - where - F: FnOnce() -> T, - { - // Fast path check - if self.once.is_completed() { - // SAFETY: The inner value has been initialized - return unsafe { self.get_unchecked() }; - } - self.initialize(f); - - // SAFETY: The inner value has been initialized - unsafe { self.get_unchecked() } - } - - #[cold] - fn initialize(&self, f: F) - where - F: FnOnce() -> T, - { - let slot = self.value.get(); - - self.once.call_once(|| { - let value = f(); - unsafe { slot.write(MaybeUninit::new(value)) } - }); - } - - /// # Safety - /// - /// The value must be initialized - unsafe fn get_unchecked(&self) -> &T { - debug_assert!(self.once.is_completed()); - unsafe { (*self.value.get()).assume_init_ref() } - } -} - -impl Drop for OnceLock { - fn drop(&mut self) { - if self.once.is_completed() { - // SAFETY: The inner value has been initialized - // assume_init_drop requires Rust 1.60 - // unsafe { (*self.value.get()).assume_init_drop() }; - unsafe { self.value.get().cast::().drop_in_place() }; - } - } -} diff --git a/crossbeam-utils/src/sync/parker.rs b/crossbeam-utils/src/sync/parker.rs deleted file mode 100644 index 82d48e151..000000000 --- a/crossbeam-utils/src/sync/parker.rs +++ /dev/null @@ -1,433 +0,0 @@ -use crate::primitive::sync::atomic::{AtomicUsize, Ordering::SeqCst}; -use crate::primitive::sync::{Arc, Condvar, Mutex}; -use std::fmt; -use std::marker::PhantomData; -use std::time::{Duration, Instant}; - -/// A thread parking primitive. -/// -/// Conceptually, each `Parker` has an associated token which is initially not present: -/// -/// * The [`park`] method blocks the current thread unless or until the token is available, at -/// which point it automatically consumes the token. -/// -/// * The [`park_timeout`] and [`park_deadline`] methods work the same as [`park`], but block for -/// a specified maximum time. -/// -/// * The [`unpark`] method atomically makes the token available if it wasn't already. Because the -/// token is initially absent, [`unpark`] followed by [`park`] will result in the second call -/// returning immediately. -/// -/// In other words, each `Parker` acts a bit like a spinlock that can be locked and unlocked using -/// [`park`] and [`unpark`]. -/// -/// # Examples -/// -/// ``` -/// use std::thread; -/// use std::time::Duration; -/// use crossbeam_utils::sync::Parker; -/// -/// let p = Parker::new(); -/// let u = p.unparker().clone(); -/// -/// // Make the token available. -/// u.unpark(); -/// // Wakes up immediately and consumes the token. -/// p.park(); -/// -/// # let t = -/// thread::spawn(move || { -/// thread::sleep(Duration::from_millis(500)); -/// u.unpark(); -/// }); -/// -/// // Wakes up when `u.unpark()` provides the token. -/// p.park(); -/// # t.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 -/// ``` -/// -/// [`park`]: Parker::park -/// [`park_timeout`]: Parker::park_timeout -/// [`park_deadline`]: Parker::park_deadline -/// [`unpark`]: Unparker::unpark -pub struct Parker { - unparker: Unparker, - _marker: PhantomData<*const ()>, -} - -unsafe impl Send for Parker {} - -impl Default for Parker { - fn default() -> Self { - Self { - unparker: Unparker { - inner: Arc::new(Inner { - state: AtomicUsize::new(EMPTY), - lock: Mutex::new(()), - cvar: Condvar::new(), - }), - }, - _marker: PhantomData, - } - } -} - -impl Parker { - /// Creates a new `Parker`. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::Parker; - /// - /// let p = Parker::new(); - /// ``` - /// - pub fn new() -> Self { - Self::default() - } - - /// Blocks the current thread until the token is made available. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::Parker; - /// - /// let p = Parker::new(); - /// let u = p.unparker().clone(); - /// - /// // Make the token available. - /// u.unpark(); - /// - /// // Wakes up immediately and consumes the token. - /// p.park(); - /// ``` - pub fn park(&self) { - self.unparker.inner.park(None); - } - - /// Blocks the current thread until the token is made available, but only for a limited time. - /// - /// # Examples - /// - /// ``` - /// use std::time::Duration; - /// use crossbeam_utils::sync::Parker; - /// - /// let p = Parker::new(); - /// - /// // Waits for the token to become available, but will not wait longer than 500 ms. - /// p.park_timeout(Duration::from_millis(500)); - /// ``` - pub fn park_timeout(&self, timeout: Duration) -> UnparkReason { - match Instant::now().checked_add(timeout) { - Some(deadline) => self.park_deadline(deadline), - None => { - self.park(); - UnparkReason::Unparked - } - } - } - - /// Blocks the current thread until the token is made available, or until a certain deadline. - /// - /// # Examples - /// - /// ``` - /// use std::time::{Duration, Instant}; - /// use crossbeam_utils::sync::Parker; - /// - /// let p = Parker::new(); - /// let deadline = Instant::now() + Duration::from_millis(500); - /// - /// // Waits for the token to become available, but will not wait longer than 500 ms. - /// p.park_deadline(deadline); - /// ``` - pub fn park_deadline(&self, deadline: Instant) -> UnparkReason { - self.unparker.inner.park(Some(deadline)) - } - - /// Returns a reference to an associated [`Unparker`]. - /// - /// The returned [`Unparker`] doesn't have to be used by reference - it can also be cloned. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::Parker; - /// - /// let p = Parker::new(); - /// let u = p.unparker().clone(); - /// - /// // Make the token available. - /// u.unpark(); - /// // Wakes up immediately and consumes the token. - /// p.park(); - /// ``` - /// - /// [`park`]: Parker::park - /// [`park_timeout`]: Parker::park_timeout - pub fn unparker(&self) -> &Unparker { - &self.unparker - } - - /// Converts a `Parker` into a raw pointer. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::Parker; - /// - /// let p = Parker::new(); - /// let raw = Parker::into_raw(p); - /// # let _ = unsafe { Parker::from_raw(raw) }; - /// ``` - pub fn into_raw(this: Self) -> *const () { - Unparker::into_raw(this.unparker) - } - - /// Converts a raw pointer into a `Parker`. - /// - /// # Safety - /// - /// This method is safe to use only with pointers returned by [`Parker::into_raw`]. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::Parker; - /// - /// let p = Parker::new(); - /// let raw = Parker::into_raw(p); - /// let p = unsafe { Parker::from_raw(raw) }; - /// ``` - pub unsafe fn from_raw(ptr: *const ()) -> Self { - Self { - unparker: unsafe { Unparker::from_raw(ptr) }, - _marker: PhantomData, - } - } -} - -impl fmt::Debug for Parker { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Parker { .. }") - } -} - -/// Unparks a thread parked by the associated [`Parker`]. -pub struct Unparker { - inner: Arc, -} - -unsafe impl Send for Unparker {} -unsafe impl Sync for Unparker {} - -impl Unparker { - /// Atomically makes the token available if it is not already. - /// - /// This method will wake up the thread blocked on [`park`] or [`park_timeout`], if there is - /// any. - /// - /// # Examples - /// - /// ``` - /// use std::thread; - /// use std::time::Duration; - /// use crossbeam_utils::sync::Parker; - /// - /// let p = Parker::new(); - /// let u = p.unparker().clone(); - /// - /// # let t = - /// thread::spawn(move || { - /// thread::sleep(Duration::from_millis(500)); - /// u.unpark(); - /// }); - /// - /// // Wakes up when `u.unpark()` provides the token. - /// p.park(); - /// # t.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// ``` - /// - /// [`park`]: Parker::park - /// [`park_timeout`]: Parker::park_timeout - pub fn unpark(&self) { - self.inner.unpark() - } - - /// Converts an `Unparker` into a raw pointer. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::{Parker, Unparker}; - /// - /// let p = Parker::new(); - /// let u = p.unparker().clone(); - /// let raw = Unparker::into_raw(u); - /// # let _ = unsafe { Unparker::from_raw(raw) }; - /// ``` - pub fn into_raw(this: Self) -> *const () { - Arc::into_raw(this.inner).cast::<()>() - } - - /// Converts a raw pointer into an `Unparker`. - /// - /// # Safety - /// - /// This method is safe to use only with pointers returned by [`Unparker::into_raw`]. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::{Parker, Unparker}; - /// - /// let p = Parker::new(); - /// let u = p.unparker().clone(); - /// - /// let raw = Unparker::into_raw(u); - /// let u = unsafe { Unparker::from_raw(raw) }; - /// ``` - pub unsafe fn from_raw(ptr: *const ()) -> Self { - Self { - inner: unsafe { Arc::from_raw(ptr.cast::()) }, - } - } -} - -impl fmt::Debug for Unparker { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Unparker { .. }") - } -} - -impl Clone for Unparker { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } -} - -/// An enum that reports whether a `Parker::park_timeout` or -/// `Parker::park_deadline` returned because another thread called `unpark` or -/// because of a timeout. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum UnparkReason { - /// The park method returned due to a call to `unpark`. - Unparked, - - /// The park method returned due to a timeout. - Timeout, -} - -const EMPTY: usize = 0; -const PARKED: usize = 1; -const NOTIFIED: usize = 2; - -struct Inner { - state: AtomicUsize, - lock: Mutex<()>, - cvar: Condvar, -} - -impl Inner { - fn park(&self, deadline: Option) -> UnparkReason { - // If we were previously notified then we consume this notification and return quickly. - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - return UnparkReason::Unparked; - } - - // If the timeout is zero, then there is no need to actually block. - if let Some(deadline) = deadline { - if deadline <= Instant::now() { - return UnparkReason::Timeout; - } - } - - // Otherwise we need to coordinate going to sleep. - let mut m = self.lock.lock().unwrap(); - - match self.state.compare_exchange(EMPTY, PARKED, SeqCst, SeqCst) { - Ok(_) => {} - // Consume this notification to avoid spurious wakeups in the next park. - Err(NOTIFIED) => { - // We must read `state` here, even though we know it will be `NOTIFIED`. This is - // because `unpark` may have been called again since we read `NOTIFIED` in the - // `compare_exchange` above. We must perform an acquire operation that synchronizes - // with that `unpark` to observe any writes it made before the call to `unpark`. To - // do that we must read from the write it made to `state`. - let old = self.state.swap(EMPTY, SeqCst); - assert_eq!(old, NOTIFIED, "park state changed unexpectedly"); - return UnparkReason::Unparked; - } - Err(n) => panic!("inconsistent park_timeout state: {}", n), - } - - loop { - // Block the current thread on the conditional variable. - m = match deadline { - None => self.cvar.wait(m).unwrap(), - Some(deadline) => { - let now = Instant::now(); - if now < deadline { - // We could check for a timeout here, in the return value of wait_timeout, - // but in the case that a timeout and an unpark arrive simultaneously, we - // prefer to report the former. - self.cvar.wait_timeout(m, deadline - now).unwrap().0 - } else { - // We've timed out; swap out the state back to empty on our way out - return match self.state.swap(EMPTY, SeqCst) { - NOTIFIED => UnparkReason::Unparked, // got a notification - PARKED => UnparkReason::Timeout, // no notification - n => panic!("inconsistent park_timeout state: {}", n), - }; - } - } - }; - - if self - .state - .compare_exchange(NOTIFIED, EMPTY, SeqCst, SeqCst) - .is_ok() - { - // got a notification - return UnparkReason::Unparked; - } - - // Spurious wakeup, go back to sleep. Alternatively, if we timed out, it will be caught - // in the branch above, when we discover the deadline is in the past - } - } - - pub(crate) fn unpark(&self) { - // To ensure the unparked thread will observe any writes we made before this call, we must - // perform a release operation that `park` can synchronize with. To do that we must write - // `NOTIFIED` even if `state` is already `NOTIFIED`. That is why this must be a swap rather - // than a compare-and-swap that returns if it reads `NOTIFIED` on failure. - match self.state.swap(NOTIFIED, SeqCst) { - EMPTY => return, // no one was waiting - NOTIFIED => return, // already unparked - PARKED => {} // gotta go wake someone up - _ => panic!("inconsistent state in unpark"), - } - - // There is a period between when the parked thread sets `state` to `PARKED` (or last - // checked `state` in the case of a spurious wakeup) and when it actually waits on `cvar`. - // If we were to notify during this period it would be ignored and then when the parked - // thread went to sleep it would never wake up. Fortunately, it has `lock` locked at this - // stage so we can acquire `lock` to wait until it is ready to receive the notification. - // - // Releasing `lock` before the call to `notify_one` means that when the parked thread wakes - // it doesn't get woken only to have to wait for us to release `lock`. - drop(self.lock.lock().unwrap()); - self.cvar.notify_one(); - } -} diff --git a/crossbeam-utils/src/sync/sharded_lock.rs b/crossbeam-utils/src/sync/sharded_lock.rs deleted file mode 100644 index a04884c72..000000000 --- a/crossbeam-utils/src/sync/sharded_lock.rs +++ /dev/null @@ -1,638 +0,0 @@ -use std::boxed::Box; -use std::cell::UnsafeCell; -use std::collections::HashMap; -use std::fmt; -use std::marker::PhantomData; -use std::mem; -use std::ops::{Deref, DerefMut}; -use std::panic::{RefUnwindSafe, UnwindSafe}; -use std::sync::{LockResult, PoisonError, TryLockError, TryLockResult}; -use std::sync::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use std::thread::{self, ThreadId}; -use std::vec::Vec; - -use crate::sync::once_lock::OnceLock; -use crate::CachePadded; - -/// The number of shards per sharded lock. Must be a power of two. -const NUM_SHARDS: usize = 8; - -/// A shard containing a single reader-writer lock. -struct Shard { - /// The inner reader-writer lock. - lock: RwLock<()>, - - /// The write-guard keeping this shard locked. - /// - /// Write operations will lock each shard and store the guard here. These guards get dropped at - /// the same time the big guard is dropped. - write_guard: UnsafeCell>>, -} - -/// A sharded reader-writer lock. -/// -/// This lock is equivalent to [`RwLock`], except read operations are faster and write operations -/// are slower. -/// -/// A `ShardedLock` is internally made of a list of *shards*, each being a [`RwLock`] occupying a -/// single cache line. Read operations will pick one of the shards depending on the current thread -/// and lock it. Write operations need to lock all shards in succession. -/// -/// By splitting the lock into shards, concurrent read operations will in most cases choose -/// different shards and thus update different cache lines, which is good for scalability. However, -/// write operations need to do more work and are therefore slower than usual. -/// -/// The priority policy of the lock is dependent on the underlying operating system's -/// implementation, and this type does not guarantee that any particular policy will be used. -/// -/// # Poisoning -/// -/// A `ShardedLock`, like [`RwLock`], will become poisoned on a panic. Note that it may only be -/// poisoned if a panic occurs while a write operation is in progress. If a panic occurs in any -/// read operation, the lock will not be poisoned. -/// -/// # Examples -/// -/// ``` -/// use crossbeam_utils::sync::ShardedLock; -/// -/// let lock = ShardedLock::new(5); -/// -/// // Any number of read locks can be held at once. -/// { -/// let r1 = lock.read().unwrap(); -/// let r2 = lock.read().unwrap(); -/// assert_eq!(*r1, 5); -/// assert_eq!(*r2, 5); -/// } // Read locks are dropped at this point. -/// -/// // However, only one write lock may be held. -/// { -/// let mut w = lock.write().unwrap(); -/// *w += 1; -/// assert_eq!(*w, 6); -/// } // Write lock is dropped here. -/// ``` -/// -/// [`RwLock`]: std::sync::RwLock -pub struct ShardedLock { - /// A list of locks protecting the internal data. - shards: Box<[CachePadded]>, - - /// The internal data. - value: UnsafeCell, -} - -unsafe impl Send for ShardedLock {} -unsafe impl Sync for ShardedLock {} - -impl UnwindSafe for ShardedLock {} -impl RefUnwindSafe for ShardedLock {} - -impl ShardedLock { - /// Creates a new sharded reader-writer lock. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::ShardedLock; - /// - /// let lock = ShardedLock::new(5); - /// ``` - pub fn new(value: T) -> Self { - Self { - shards: (0..NUM_SHARDS) - .map(|_| { - CachePadded::new(Shard { - lock: RwLock::new(()), - write_guard: UnsafeCell::new(None), - }) - }) - .collect::>(), - value: UnsafeCell::new(value), - } - } - - /// Consumes this lock, returning the underlying data. - /// - /// # Errors - /// - /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write - /// operation panics. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::ShardedLock; - /// - /// let lock = ShardedLock::new(String::new()); - /// { - /// let mut s = lock.write().unwrap(); - /// *s = "modified".to_owned(); - /// } - /// assert_eq!(lock.into_inner().unwrap(), "modified"); - /// ``` - pub fn into_inner(self) -> LockResult { - let is_poisoned = self.is_poisoned(); - let inner = self.value.into_inner(); - - if is_poisoned { - Err(PoisonError::new(inner)) - } else { - Ok(inner) - } - } -} - -impl ShardedLock { - /// Returns `true` if the lock is poisoned. - /// - /// If another thread can still access the lock, it may become poisoned at any time. A `false` - /// result should not be trusted without additional synchronization. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::ShardedLock; - /// use std::sync::Arc; - /// use std::thread; - /// - /// let lock = Arc::new(ShardedLock::new(0)); - /// let c_lock = lock.clone(); - /// - /// let _: Result<(), _> = thread::spawn(move || { - /// let _lock = c_lock.write().unwrap(); - /// panic!(); // the lock gets poisoned - /// }).join(); - /// assert_eq!(lock.is_poisoned(), true); - /// ``` - pub fn is_poisoned(&self) -> bool { - self.shards[0].lock.is_poisoned() - } - - /// Returns a mutable reference to the underlying data. - /// - /// Since this call borrows the lock mutably, no actual locking needs to take place. - /// - /// # Errors - /// - /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write - /// operation panics. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::ShardedLock; - /// - /// let mut lock = ShardedLock::new(0); - /// *lock.get_mut().unwrap() = 10; - /// assert_eq!(*lock.read().unwrap(), 10); - /// ``` - pub fn get_mut(&mut self) -> LockResult<&mut T> { - let is_poisoned = self.is_poisoned(); - let inner = unsafe { &mut *self.value.get() }; - - if is_poisoned { - Err(PoisonError::new(inner)) - } else { - Ok(inner) - } - } - - /// Attempts to acquire this lock with shared read access. - /// - /// If the access could not be granted at this time, an error is returned. Otherwise, a guard - /// is returned which will release the shared access when it is dropped. This method does not - /// provide any guarantees with respect to the ordering of whether contentious readers or - /// writers will acquire the lock first. - /// - /// # Errors - /// - /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write - /// operation panics. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::ShardedLock; - /// - /// let lock = ShardedLock::new(1); - /// - /// match lock.try_read() { - /// Ok(n) => assert_eq!(*n, 1), - /// Err(_) => unreachable!(), - /// }; - /// ``` - pub fn try_read(&self) -> TryLockResult> { - // Take the current thread index and map it to a shard index. Thread indices will tend to - // distribute shards among threads equally, thus reducing contention due to read-locking. - let current_index = current_index().unwrap_or(0); - let shard_index = current_index & (self.shards.len() - 1); - - match self.shards[shard_index].lock.try_read() { - Ok(guard) => Ok(ShardedLockReadGuard { - lock: self, - _guard: guard, - _marker: PhantomData, - }), - Err(TryLockError::Poisoned(err)) => { - let guard = ShardedLockReadGuard { - lock: self, - _guard: err.into_inner(), - _marker: PhantomData, - }; - Err(TryLockError::Poisoned(PoisonError::new(guard))) - } - Err(TryLockError::WouldBlock) => Err(TryLockError::WouldBlock), - } - } - - /// Locks with shared read access, blocking the current thread until it can be acquired. - /// - /// The calling thread will be blocked until there are no more writers which hold the lock. - /// There may be other readers currently inside the lock when this method returns. This method - /// does not provide any guarantees with respect to the ordering of whether contentious readers - /// or writers will acquire the lock first. - /// - /// Returns a guard which will release the shared access when dropped. - /// - /// # Errors - /// - /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write - /// operation panics. - /// - /// # Panics - /// - /// This method might panic when called if the lock is already held by the current thread. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::ShardedLock; - /// use std::sync::Arc; - /// use std::thread; - /// - /// let lock = Arc::new(ShardedLock::new(1)); - /// let c_lock = lock.clone(); - /// - /// let n = lock.read().unwrap(); - /// assert_eq!(*n, 1); - /// - /// thread::spawn(move || { - /// let r = c_lock.read(); - /// assert!(r.is_ok()); - /// }).join().unwrap(); - /// ``` - pub fn read(&self) -> LockResult> { - // Take the current thread index and map it to a shard index. Thread indices will tend to - // distribute shards among threads equally, thus reducing contention due to read-locking. - let current_index = current_index().unwrap_or(0); - let shard_index = current_index & (self.shards.len() - 1); - - match self.shards[shard_index].lock.read() { - Ok(guard) => Ok(ShardedLockReadGuard { - lock: self, - _guard: guard, - _marker: PhantomData, - }), - Err(err) => Err(PoisonError::new(ShardedLockReadGuard { - lock: self, - _guard: err.into_inner(), - _marker: PhantomData, - })), - } - } - - /// Attempts to acquire this lock with exclusive write access. - /// - /// If the access could not be granted at this time, an error is returned. Otherwise, a guard - /// is returned which will release the exclusive access when it is dropped. This method does - /// not provide any guarantees with respect to the ordering of whether contentious readers or - /// writers will acquire the lock first. - /// - /// # Errors - /// - /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write - /// operation panics. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::ShardedLock; - /// - /// let lock = ShardedLock::new(1); - /// - /// let n = lock.read().unwrap(); - /// assert_eq!(*n, 1); - /// - /// assert!(lock.try_write().is_err()); - /// ``` - pub fn try_write(&self) -> TryLockResult> { - let mut poisoned = false; - let mut blocked = None; - - // Write-lock each shard in succession. - for (i, shard) in self.shards.iter().enumerate() { - let guard = match shard.lock.try_write() { - Ok(guard) => guard, - Err(TryLockError::Poisoned(err)) => { - poisoned = true; - err.into_inner() - } - Err(TryLockError::WouldBlock) => { - blocked = Some(i); - break; - } - }; - - // Store the guard into the shard. - unsafe { - let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard); - let dest: *mut _ = shard.write_guard.get(); - *dest = Some(guard); - } - } - - if let Some(i) = blocked { - // Unlock the shards in reverse order of locking. - for shard in self.shards[0..i].iter().rev() { - unsafe { - let dest: *mut _ = shard.write_guard.get(); - let guard = (*dest).take(); - drop(guard); - } - } - Err(TryLockError::WouldBlock) - } else if poisoned { - let guard = ShardedLockWriteGuard { - lock: self, - _marker: PhantomData, - }; - Err(TryLockError::Poisoned(PoisonError::new(guard))) - } else { - Ok(ShardedLockWriteGuard { - lock: self, - _marker: PhantomData, - }) - } - } - - /// Locks with exclusive write access, blocking the current thread until it can be acquired. - /// - /// The calling thread will be blocked until there are no more writers which hold the lock. - /// There may be other readers currently inside the lock when this method returns. This method - /// does not provide any guarantees with respect to the ordering of whether contentious readers - /// or writers will acquire the lock first. - /// - /// Returns a guard which will release the exclusive access when dropped. - /// - /// # Errors - /// - /// This method will return an error if the lock is poisoned. A lock gets poisoned when a write - /// operation panics. - /// - /// # Panics - /// - /// This method might panic when called if the lock is already held by the current thread. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::ShardedLock; - /// - /// let lock = ShardedLock::new(1); - /// - /// let mut n = lock.write().unwrap(); - /// *n = 2; - /// - /// assert!(lock.try_read().is_err()); - /// ``` - pub fn write(&self) -> LockResult> { - let mut poisoned = false; - - // Write-lock each shard in succession. - for shard in self.shards.iter() { - let guard = match shard.lock.write() { - Ok(guard) => guard, - Err(err) => { - poisoned = true; - err.into_inner() - } - }; - - // Store the guard into the shard. - unsafe { - let guard: RwLockWriteGuard<'_, ()> = guard; - let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard); - let dest: *mut _ = shard.write_guard.get(); - *dest = Some(guard); - } - } - - if poisoned { - Err(PoisonError::new(ShardedLockWriteGuard { - lock: self, - _marker: PhantomData, - })) - } else { - Ok(ShardedLockWriteGuard { - lock: self, - _marker: PhantomData, - }) - } - } -} - -impl fmt::Debug for ShardedLock { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.try_read() { - Ok(guard) => f - .debug_struct("ShardedLock") - .field("data", &&*guard) - .finish(), - Err(TryLockError::Poisoned(err)) => f - .debug_struct("ShardedLock") - .field("data", &&**err.get_ref()) - .finish(), - Err(TryLockError::WouldBlock) => { - struct LockedPlaceholder; - impl fmt::Debug for LockedPlaceholder { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("") - } - } - f.debug_struct("ShardedLock") - .field("data", &LockedPlaceholder) - .finish() - } - } - } -} - -impl Default for ShardedLock { - fn default() -> Self { - Self::new(Default::default()) - } -} - -impl From for ShardedLock { - fn from(t: T) -> Self { - Self::new(t) - } -} - -/// A guard used to release the shared read access of a [`ShardedLock`] when dropped. -#[clippy::has_significant_drop] -pub struct ShardedLockReadGuard<'a, T: ?Sized> { - lock: &'a ShardedLock, - _guard: RwLockReadGuard<'a, ()>, - _marker: PhantomData>, -} - -unsafe impl Sync for ShardedLockReadGuard<'_, T> {} - -impl Deref for ShardedLockReadGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - unsafe { &*self.lock.value.get() } - } -} - -impl fmt::Debug for ShardedLockReadGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ShardedLockReadGuard") - .field("lock", &self.lock) - .finish() - } -} - -impl fmt::Display for ShardedLockReadGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -/// A guard used to release the exclusive write access of a [`ShardedLock`] when dropped. -#[clippy::has_significant_drop] -pub struct ShardedLockWriteGuard<'a, T: ?Sized> { - lock: &'a ShardedLock, - _marker: PhantomData>, -} - -unsafe impl Sync for ShardedLockWriteGuard<'_, T> {} - -impl Drop for ShardedLockWriteGuard<'_, T> { - fn drop(&mut self) { - // Unlock the shards in reverse order of locking. - for shard in self.lock.shards.iter().rev() { - unsafe { - let dest: *mut _ = shard.write_guard.get(); - let guard = (*dest).take(); - drop(guard); - } - } - } -} - -impl fmt::Debug for ShardedLockWriteGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ShardedLockWriteGuard") - .field("lock", &self.lock) - .finish() - } -} - -impl fmt::Display for ShardedLockWriteGuard<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - (**self).fmt(f) - } -} - -impl Deref for ShardedLockWriteGuard<'_, T> { - type Target = T; - - fn deref(&self) -> &T { - unsafe { &*self.lock.value.get() } - } -} - -impl DerefMut for ShardedLockWriteGuard<'_, T> { - fn deref_mut(&mut self) -> &mut T { - unsafe { &mut *self.lock.value.get() } - } -} - -/// Returns a `usize` that identifies the current thread. -/// -/// Each thread is associated with an 'index'. While there are no particular guarantees, indices -/// usually tend to be consecutive numbers between 0 and the number of running threads. -/// -/// Since this function accesses TLS, `None` might be returned if the current thread's TLS is -/// tearing down. -#[inline] -fn current_index() -> Option { - REGISTRATION.try_with(|reg| reg.index).ok() -} - -/// The global registry keeping track of registered threads and indices. -struct ThreadIndices { - /// Mapping from `ThreadId` to thread index. - mapping: HashMap, - - /// A list of free indices. - free_list: Vec, - - /// The next index to allocate if the free list is empty. - next_index: usize, -} - -fn thread_indices() -> &'static Mutex { - static THREAD_INDICES: OnceLock> = OnceLock::new(); - fn init() -> Mutex { - Mutex::new(ThreadIndices { - mapping: HashMap::new(), - free_list: Vec::new(), - next_index: 0, - }) - } - THREAD_INDICES.get_or_init(init) -} - -/// A registration of a thread with an index. -/// -/// When dropped, unregisters the thread and frees the reserved index. -struct Registration { - index: usize, - thread_id: ThreadId, -} - -impl Drop for Registration { - fn drop(&mut self) { - let mut indices = thread_indices().lock().unwrap(); - indices.mapping.remove(&self.thread_id); - indices.free_list.push(self.index); - } -} - -std::thread_local! { - static REGISTRATION: Registration = { - let thread_id = thread::current().id(); - let mut indices = thread_indices().lock().unwrap(); - - let index = match indices.free_list.pop() { - Some(i) => i, - None => { - let i = indices.next_index; - indices.next_index += 1; - i - } - }; - indices.mapping.insert(thread_id, index); - - Registration { - index, - thread_id, - } - }; -} diff --git a/crossbeam-utils/src/sync/wait_group.rs b/crossbeam-utils/src/sync/wait_group.rs deleted file mode 100644 index 3c55fac63..000000000 --- a/crossbeam-utils/src/sync/wait_group.rs +++ /dev/null @@ -1,146 +0,0 @@ -use crate::primitive::sync::{Arc, Condvar, Mutex}; -use std::fmt; - -/// Enables threads to synchronize the beginning or end of some computation. -/// -/// # Wait groups vs barriers -/// -/// `WaitGroup` is very similar to [`Barrier`], but there are a few differences: -/// -/// * [`Barrier`] needs to know the number of threads at construction, while `WaitGroup` is cloned to -/// register more threads. -/// -/// * A [`Barrier`] can be reused even after all threads have synchronized, while a `WaitGroup` -/// synchronizes threads only once. -/// -/// * All threads wait for others to reach the [`Barrier`]. With `WaitGroup`, each thread can choose -/// to either wait for other threads or to continue without blocking. -/// -/// # Examples -/// -/// ``` -/// use crossbeam_utils::sync::WaitGroup; -/// use std::thread; -/// -/// // Create a new wait group. -/// let wg = WaitGroup::new(); -/// -/// for _ in 0..4 { -/// // Create another reference to the wait group. -/// let wg = wg.clone(); -/// -/// thread::spawn(move || { -/// // Do some work. -/// -/// // Drop the reference to the wait group. -/// drop(wg); -/// }); -/// } -/// -/// // Block until all threads have finished their work. -/// wg.wait(); -/// # if cfg!(miri) { std::thread::sleep(std::time::Duration::from_millis(500)); } // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 -/// ``` -/// -/// [`Barrier`]: std::sync::Barrier -pub struct WaitGroup { - inner: Arc, -} - -/// Inner state of a `WaitGroup`. -struct Inner { - cvar: Condvar, - count: Mutex, -} - -impl Default for WaitGroup { - fn default() -> Self { - Self { - inner: Arc::new(Inner { - cvar: Condvar::new(), - count: Mutex::new(1), - }), - } - } -} - -impl WaitGroup { - /// Creates a new wait group and returns the single reference to it. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::WaitGroup; - /// - /// let wg = WaitGroup::new(); - /// ``` - pub fn new() -> Self { - Self::default() - } - - /// Drops this reference and waits until all other references are dropped. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::sync::WaitGroup; - /// use std::thread; - /// - /// let wg = WaitGroup::new(); - /// - /// # let t = - /// thread::spawn({ - /// let wg = wg.clone(); - /// move || { - /// // Block until both threads have reached `wait()`. - /// wg.wait(); - /// } - /// }); - /// - /// // Block until both threads have reached `wait()`. - /// wg.wait(); - /// # t.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 - /// ``` - pub fn wait(self) { - if *self.inner.count.lock().unwrap() == 1 { - return; - } - - let inner = self.inner.clone(); - drop(self); - - let mut count = inner.count.lock().unwrap(); - while *count > 0 { - count = inner.cvar.wait(count).unwrap(); - } - } -} - -impl Drop for WaitGroup { - fn drop(&mut self) { - let mut count = self.inner.count.lock().unwrap(); - *count -= 1; - - if *count == 0 { - self.inner.cvar.notify_all(); - } - } -} - -impl Clone for WaitGroup { - fn clone(&self) -> Self { - let mut count = self.inner.count.lock().unwrap(); - *count += 1; - - Self { - inner: self.inner.clone(), - } - } -} - -impl fmt::Debug for WaitGroup { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let count: &usize = &self.inner.count.lock().unwrap(); - f.debug_struct("WaitGroup").field("count", count).finish() - } -} diff --git a/crossbeam-utils/src/thread.rs b/crossbeam-utils/src/thread.rs deleted file mode 100644 index c1226a46c..000000000 --- a/crossbeam-utils/src/thread.rs +++ /dev/null @@ -1,633 +0,0 @@ -//! Threads that can borrow variables from the stack. -//! -//! Create a scope when spawned threads need to access variables on the stack: -//! -//! ``` -//! use crossbeam_utils::thread; -//! -//! let people = vec![ -//! "Alice".to_string(), -//! "Bob".to_string(), -//! "Carol".to_string(), -//! ]; -//! -//! thread::scope(|s| { -//! for person in &people { -//! s.spawn(move |_| { -//! println!("Hello, {}!", person); -//! }); -//! } -//! }).unwrap(); -//! ``` -//! -//! # Why scoped threads? -//! -//! Suppose we wanted to re-write the previous example using plain threads: -//! -//! ```compile_fail,E0597 -//! use std::thread; -//! -//! let people = vec![ -//! "Alice".to_string(), -//! "Bob".to_string(), -//! "Carol".to_string(), -//! ]; -//! -//! let mut threads = Vec::new(); -//! -//! for person in &people { -//! threads.push(thread::spawn(move || { -//! println!("Hello, {}!", person); -//! })); -//! } -//! -//! for thread in threads { -//! thread.join().unwrap(); -//! } -//! ``` -//! -//! This doesn't work because the borrow checker complains about `people` not living long enough: -//! -//! ```text -//! error[E0597]: `people` does not live long enough -//! --> src/main.rs:12:20 -//! | -//! 12 | for person in &people { -//! | ^^^^^^ borrowed value does not live long enough -//! ... -//! 21 | } -//! | - borrowed value only lives until here -//! | -//! = note: borrowed value must be valid for the static lifetime... -//! ``` -//! -//! The problem here is that spawned threads are not allowed to borrow variables on stack because -//! the compiler cannot prove they will be joined before `people` is destroyed. -//! -//! Scoped threads are a mechanism to guarantee to the compiler that spawned threads will be joined -//! before the scope ends. -//! -//! # How scoped threads work -//! -//! If a variable is borrowed by a thread, the thread must complete before the variable is -//! destroyed. Threads spawned using [`std::thread::spawn`] can only borrow variables with the -//! `'static` lifetime because the borrow checker cannot be sure when the thread will complete. -//! -//! A scope creates a clear boundary between variables outside the scope and threads inside the -//! scope. Whenever a scope spawns a thread, it promises to join the thread before the scope ends. -//! This way we guarantee to the borrow checker that scoped threads only live within the scope and -//! can safely access variables outside it. -//! -//! # Nesting scoped threads -//! -//! Sometimes scoped threads need to spawn more threads within the same scope. This is a little -//! tricky because argument `s` lives *inside* the invocation of `thread::scope()` and as such -//! cannot be borrowed by scoped threads: -//! -//! ```compile_fail,E0521 -//! use crossbeam_utils::thread; -//! -//! thread::scope(|s| { -//! s.spawn(|_| { -//! // Not going to compile because we're trying to borrow `s`, -//! // which lives *inside* the scope! :( -//! s.spawn(|_| println!("nested thread")); -//! }); -//! }); -//! ``` -//! -//! Fortunately, there is a solution. Every scoped thread is passed a reference to its scope as an -//! argument, which can be used for spawning nested threads: -//! -//! ``` -//! use crossbeam_utils::thread; -//! -//! thread::scope(|s| { -//! // Note the `|s|` here. -//! s.spawn(|s| { -//! // Yay, this works because we're using a fresh argument `s`! :) -//! s.spawn(|_| println!("nested thread")); -//! }); -//! }).unwrap(); -//! ``` - -use std::boxed::Box; -use std::fmt; -use std::io; -use std::marker::PhantomData; -use std::mem; -use std::panic; -use std::string::String; -use std::sync::{Arc, Mutex}; -use std::thread; -use std::vec::Vec; - -use crate::sync::WaitGroup; - -type SharedVec = Arc>>; -type SharedOption = Arc>>; - -/// Creates a new scope for spawning threads. -/// -/// All child threads that haven't been manually joined will be automatically joined just before -/// this function invocation ends. If all joined threads have successfully completed, `Ok` is -/// returned with the return value of `f`. If any of the joined threads has panicked, an `Err` is -/// returned containing errors from panicked threads. Note that if panics are implemented by -/// aborting the process, no error is returned; see the notes of [std::panic::catch_unwind]. -/// -/// **Note:** Since Rust 1.63, this function is soft-deprecated in favor of the more efficient [`std::thread::scope`]. -/// -/// # Examples -/// -/// ``` -/// use crossbeam_utils::thread; -/// -/// let var = vec![1, 2, 3]; -/// -/// thread::scope(|s| { -/// s.spawn(|_| { -/// println!("A child thread borrowing `var`: {:?}", var); -/// }); -/// }).unwrap(); -/// ``` -pub fn scope<'env, F, R>(f: F) -> thread::Result -where - F: FnOnce(&Scope<'env>) -> R, -{ - struct AbortOnPanic; - impl Drop for AbortOnPanic { - fn drop(&mut self) { - if thread::panicking() { - std::process::abort(); - } - } - } - - let wg = WaitGroup::new(); - let scope = Scope::<'env> { - handles: SharedVec::default(), - wait_group: wg.clone(), - _marker: PhantomData, - }; - - // Execute the scoped function, but catch any panics. - let result = panic::catch_unwind(panic::AssertUnwindSafe(|| f(&scope))); - - // If an unwinding panic occurs before all threads are joined - // promote it to an aborting panic to prevent any threads from escaping the scope. - let guard = AbortOnPanic; - - // Wait until all nested scopes are dropped. - drop(scope.wait_group); - wg.wait(); - - // Join all remaining spawned threads. - let panics: Vec<_> = scope - .handles - .lock() - .unwrap() - // Filter handles that haven't been joined, join them, and collect errors. - .drain(..) - .filter_map(|handle| handle.lock().unwrap().take()) - .filter_map(|handle| handle.join().err()) - .collect(); - - mem::forget(guard); - - // If `f` has panicked, resume unwinding. - // If any of the child threads have panicked, return the panic errors. - // Otherwise, everything is OK and return the result of `f`. - match result { - Err(err) => panic::resume_unwind(err), - Ok(res) => { - if panics.is_empty() { - Ok(res) - } else { - Err(Box::new(panics)) - } - } - } -} - -/// A scope for spawning threads. -pub struct Scope<'env> { - /// The list of the thread join handles. - handles: SharedVec>>, - - /// Used to wait until all subscopes all dropped. - wait_group: WaitGroup, - - /// Borrows data with invariant lifetime `'env`. - _marker: PhantomData<&'env mut &'env ()>, -} - -unsafe impl Sync for Scope<'_> {} - -impl<'env> Scope<'env> { - /// Spawns a scoped thread. - /// - /// This method is similar to the [`spawn`] function in Rust's standard library. The difference - /// is that this thread is scoped, meaning it's guaranteed to terminate before the scope exits, - /// allowing it to reference variables outside the scope. - /// - /// The scoped thread is passed a reference to this scope as an argument, which can be used for - /// spawning nested threads. - /// - /// The returned [handle](ScopedJoinHandle) can be used to manually - /// [join](ScopedJoinHandle::join) the thread before the scope exits. - /// - /// This will create a thread using default parameters of [`ScopedThreadBuilder`], if you want to specify the - /// stack size or the name of the thread, use this API instead. - /// - /// [`spawn`]: std::thread::spawn - /// - /// # Panics - /// - /// Panics if the OS fails to create a thread; use [`ScopedThreadBuilder::spawn`] - /// to recover from such errors. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::thread; - /// - /// thread::scope(|s| { - /// let handle = s.spawn(|_| { - /// println!("A child thread is running"); - /// 42 - /// }); - /// - /// // Join the thread and retrieve its result. - /// let res = handle.join().unwrap(); - /// assert_eq!(res, 42); - /// }).unwrap(); - /// ``` - pub fn spawn<'scope, F, T>(&'scope self, f: F) -> ScopedJoinHandle<'scope, T> - where - F: FnOnce(&Scope<'env>) -> T, - F: Send + 'env, - T: Send + 'env, - { - self.builder() - .spawn(f) - .expect("failed to spawn scoped thread") - } - - /// Creates a builder that can configure a thread before spawning. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::thread; - /// - /// thread::scope(|s| { - /// s.builder() - /// .spawn(|_| println!("A child thread is running")) - /// .unwrap(); - /// }).unwrap(); - /// ``` - pub fn builder<'scope>(&'scope self) -> ScopedThreadBuilder<'scope, 'env> { - ScopedThreadBuilder { - scope: self, - builder: thread::Builder::new(), - } - } -} - -impl fmt::Debug for Scope<'_> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Scope { .. }") - } -} - -/// Configures the properties of a new thread. -/// -/// The two configurable properties are: -/// -/// - [`name`]: Specifies an [associated name for the thread][naming-threads]. -/// - [`stack_size`]: Specifies the [desired stack size for the thread][stack-size]. -/// -/// The [`spawn`] method will take ownership of the builder and return an [`io::Result`] of the -/// thread handle with the given configuration. -/// -/// The [`Scope::spawn`] method uses a builder with default configuration and unwraps its return -/// value. You may want to use this builder when you want to recover from a failure to launch a -/// thread. -/// -/// # Examples -/// -/// ``` -/// use crossbeam_utils::thread; -/// -/// thread::scope(|s| { -/// s.builder() -/// .spawn(|_| println!("Running a child thread")) -/// .unwrap(); -/// }).unwrap(); -/// ``` -/// -/// [`name`]: ScopedThreadBuilder::name -/// [`stack_size`]: ScopedThreadBuilder::stack_size -/// [`spawn`]: ScopedThreadBuilder::spawn -/// [`io::Result`]: std::io::Result -/// [naming-threads]: std::thread#naming-threads -/// [stack-size]: std::thread#stack-size -#[must_use = "must eventually spawn the thread"] -#[derive(Debug)] -pub struct ScopedThreadBuilder<'scope, 'env> { - scope: &'scope Scope<'env>, - builder: thread::Builder, -} - -impl<'scope, 'env> ScopedThreadBuilder<'scope, 'env> { - /// Sets the name for the new thread. - /// - /// The name must not contain null bytes (`\0`). - /// - /// For more information about named threads, see [here][naming-threads]. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::thread; - /// use std::thread::current; - /// - /// thread::scope(|s| { - /// s.builder() - /// .name("my thread".to_string()) - /// .spawn(|_| assert_eq!(current().name(), Some("my thread"))) - /// .unwrap(); - /// }).unwrap(); - /// ``` - /// - /// [naming-threads]: std::thread#naming-threads - pub fn name(mut self, name: String) -> ScopedThreadBuilder<'scope, 'env> { - self.builder = self.builder.name(name); - self - } - - /// Sets the size of the stack for the new thread. - /// - /// The stack size is measured in bytes. - /// - /// For more information about the stack size for threads, see [here][stack-size]. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::thread; - /// - /// thread::scope(|s| { - /// s.builder() - /// .stack_size(32 * 1024) - /// .spawn(|_| println!("Running a child thread")) - /// .unwrap(); - /// }).unwrap(); - /// ``` - /// - /// [stack-size]: std::thread#stack-size - pub fn stack_size(mut self, size: usize) -> ScopedThreadBuilder<'scope, 'env> { - self.builder = self.builder.stack_size(size); - self - } - - /// Spawns a scoped thread with this configuration. - /// - /// The scoped thread is passed a reference to this scope as an argument, which can be used for - /// spawning nested threads. - /// - /// The returned handle can be used to manually join the thread before the scope exits. - /// - /// # Errors - /// - /// Unlike the [`Scope::spawn`] method, this method yields an - /// [`io::Result`] to capture any failure to create the thread at - /// the OS level. - /// - /// [`io::Result`]: std::io::Result - /// - /// # Panics - /// - /// Panics if a thread name was set and it contained null bytes. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::thread; - /// - /// thread::scope(|s| { - /// let handle = s.builder() - /// .spawn(|_| { - /// println!("A child thread is running"); - /// 42 - /// }) - /// .unwrap(); - /// - /// // Join the thread and retrieve its result. - /// let res = handle.join().unwrap(); - /// assert_eq!(res, 42); - /// }).unwrap(); - /// ``` - pub fn spawn(self, f: F) -> io::Result> - where - F: FnOnce(&Scope<'env>) -> T, - F: Send + 'env, - T: Send + 'env, - { - // The result of `f` will be stored here. - let result = SharedOption::default(); - - // Spawn the thread and grab its join handle and thread handle. - let (handle, thread) = { - let result = Arc::clone(&result); - - // A clone of the scope that will be moved into the new thread. - let scope = Scope::<'env> { - handles: Arc::clone(&self.scope.handles), - wait_group: self.scope.wait_group.clone(), - _marker: PhantomData, - }; - - // Spawn the thread. - let handle = { - let closure = move || { - // Make sure the scope is inside the closure with the proper `'env` lifetime. - let scope: Scope<'env> = scope; - - // Run the closure. - let res = f(&scope); - - // Store the result if the closure didn't panic. - *result.lock().unwrap() = Some(res); - }; - - // Allocate `closure` on the heap and erase the `'env` bound. - let closure: Box = Box::new(closure); - let closure: Box = - unsafe { mem::transmute(closure) }; - - // Finally, spawn the closure. - self.builder.spawn(closure)? - }; - - let thread = handle.thread().clone(); - let handle = Arc::new(Mutex::new(Some(handle))); - (handle, thread) - }; - - // Add the handle to the shared list of join handles. - self.scope.handles.lock().unwrap().push(Arc::clone(&handle)); - - Ok(ScopedJoinHandle { - handle, - result, - thread, - _marker: PhantomData, - }) - } -} - -unsafe impl Send for ScopedJoinHandle<'_, T> {} -unsafe impl Sync for ScopedJoinHandle<'_, T> {} - -/// A handle that can be used to join its scoped thread. -/// -/// This struct is created by the [`Scope::spawn`] method and the -/// [`ScopedThreadBuilder::spawn`] method. -pub struct ScopedJoinHandle<'scope, T> { - /// A join handle to the spawned thread. - handle: SharedOption>, - - /// Holds the result of the inner closure. - result: SharedOption, - - /// A handle to the spawned thread. - thread: thread::Thread, - - /// Borrows the parent scope with lifetime `'scope`. - _marker: PhantomData<&'scope ()>, -} - -impl ScopedJoinHandle<'_, T> { - /// Waits for the thread to finish and returns its result. - /// - /// If the child thread panics, an error is returned. Note that if panics are implemented by - /// aborting the process, no error is returned; see the notes of [std::panic::catch_unwind]. - /// - /// # Panics - /// - /// This function may panic on some platforms if a thread attempts to join itself or otherwise - /// may create a deadlock with joining threads. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::thread; - /// - /// thread::scope(|s| { - /// let handle1 = s.spawn(|_| println!("I'm a happy thread :)")); - /// let handle2 = s.spawn(|_| panic!("I'm a sad thread :(")); - /// - /// // Join the first thread and verify that it succeeded. - /// let res = handle1.join(); - /// assert!(res.is_ok()); - /// - /// // Join the second thread and verify that it panicked. - /// let res = handle2.join(); - /// assert!(res.is_err()); - /// }).unwrap(); - /// ``` - pub fn join(self) -> thread::Result { - // Take out the handle. The handle will surely be available because the root scope waits - // for nested scopes before joining remaining threads. - let handle = self.handle.lock().unwrap().take().unwrap(); - - // Join the thread and then take the result out of its inner closure. - handle - .join() - .map(|()| self.result.lock().unwrap().take().unwrap()) - } - - /// Returns a handle to the underlying thread. - /// - /// # Examples - /// - /// ``` - /// use crossbeam_utils::thread; - /// - /// thread::scope(|s| { - /// let handle = s.spawn(|_| println!("A child thread is running")); - /// println!("The child thread ID: {:?}", handle.thread().id()); - /// }).unwrap(); - /// ``` - pub fn thread(&self) -> &thread::Thread { - &self.thread - } -} - -/// Unix-specific extensions. -#[cfg(unix)] -pub mod unix { - use super::ScopedJoinHandle; - use std::os::unix::thread::JoinHandleExt as _; - - #[doc(no_inline)] - pub use std::os::unix::thread::RawPthread; - - mod sealed { - pub trait Sealed {} - } - - /// Unix-specific extensions to [`ScopedJoinHandle`]. - pub trait JoinHandleExt: sealed::Sealed { - /// Extracts the raw pthread_t without taking ownership - fn as_pthread_t(&self) -> RawPthread; - - /// Consumes the thread, returning the raw pthread_t - /// - /// This function **transfers ownership** of the underlying pthread_t to - /// the caller. Callers are then the unique owners of the pthread_t and - /// must either detach or join the pthread_t once it's no longer needed. - fn into_pthread_t(self) -> RawPthread; - } - - impl sealed::Sealed for ScopedJoinHandle<'_, T> {} - impl JoinHandleExt for ScopedJoinHandle<'_, T> { - fn as_pthread_t(&self) -> RawPthread { - // Borrow the handle. The handle will surely be available because the root scope waits - // for nested scopes before joining remaining threads. - let handle = self.handle.lock().unwrap(); - handle.as_ref().unwrap().as_pthread_t() - } - fn into_pthread_t(self) -> RawPthread { - self.as_pthread_t() - } - } -} -/// Windows-specific extensions. -#[cfg(windows)] -mod windows { - use super::ScopedJoinHandle; - use std::os::windows::io::{AsRawHandle, IntoRawHandle, RawHandle}; - - impl AsRawHandle for ScopedJoinHandle<'_, T> { - fn as_raw_handle(&self) -> RawHandle { - // Borrow the handle. The handle will surely be available because the root scope waits - // for nested scopes before joining remaining threads. - let handle = self.handle.lock().unwrap(); - handle.as_ref().unwrap().as_raw_handle() - } - } - - impl IntoRawHandle for ScopedJoinHandle<'_, T> { - fn into_raw_handle(self) -> RawHandle { - self.as_raw_handle() - } - } -} - -impl fmt::Debug for ScopedJoinHandle<'_, T> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("ScopedJoinHandle { .. }") - } -} diff --git a/crossbeam-utils/tests/atomic_cell.rs b/crossbeam-utils/tests/atomic_cell.rs deleted file mode 100644 index 00ea102cb..000000000 --- a/crossbeam-utils/tests/atomic_cell.rs +++ /dev/null @@ -1,395 +0,0 @@ -use std::mem; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::SeqCst; - -use crossbeam_utils::atomic::AtomicCell; - -// Always use fallback for now on environments that do not support inline assembly. -fn always_use_fallback() -> bool { - atomic_maybe_uninit::cfg_has_atomic_cas! { - cfg!(any( - miri, - crossbeam_loom, - crossbeam_atomic_cell_force_fallback, - )) - } - atomic_maybe_uninit::cfg_no_atomic_cas! { true } -} - -#[test] -fn is_lock_free() { - let always_use_fallback = always_use_fallback(); - - struct UsizeWrap(#[allow(dead_code)] usize); - struct U8Wrap(#[allow(dead_code)] bool); - struct I16Wrap(#[allow(dead_code)] i16); - #[repr(align(8))] - struct U64Align8(#[allow(dead_code)] u64); - - assert_eq!(AtomicCell::::is_lock_free(), !always_use_fallback); - assert_eq!(AtomicCell::::is_lock_free(), !always_use_fallback); - assert_eq!( - AtomicCell::::is_lock_free(), - !always_use_fallback - ); - - assert!(AtomicCell::<()>::is_lock_free()); - - assert_eq!(AtomicCell::::is_lock_free(), !always_use_fallback); - assert_eq!(AtomicCell::::is_lock_free(), !always_use_fallback); - assert_eq!(AtomicCell::::is_lock_free(), !always_use_fallback); - assert_eq!(AtomicCell::::is_lock_free(), !always_use_fallback); - - assert_eq!(AtomicCell::::is_lock_free(), !always_use_fallback); - assert_eq!(AtomicCell::::is_lock_free(), !always_use_fallback); - assert_eq!(AtomicCell::::is_lock_free(), !always_use_fallback); - - assert_eq!(AtomicCell::::is_lock_free(), !always_use_fallback); - assert_eq!(AtomicCell::::is_lock_free(), !always_use_fallback); - - // Sizes of both types must be equal, and the alignment of `u64` must be greater or equal than - // that of `AtomicU64`. In i686-unknown-linux-gnu, the alignment of `u64` is `4` and alignment - // of `AtomicU64` is `8`, so `AtomicCell` is not lock-free. - assert_eq!( - AtomicCell::::is_lock_free(), - cfg!(target_has_atomic = "64") && std::mem::align_of::() == 8 && !always_use_fallback - ); - assert_eq!(mem::size_of::(), 8); - assert_eq!(mem::align_of::(), 8); - assert_eq!( - AtomicCell::::is_lock_free(), - cfg!(target_has_atomic = "64") && !always_use_fallback - ); - - assert_eq!( - AtomicCell::::is_lock_free(), - cfg!(target_has_atomic = "128") - && std::mem::align_of::() == 16 - && !always_use_fallback - ); -} - -#[test] -fn const_is_lock_free() { - const _U: bool = AtomicCell::::is_lock_free(); - const _I: bool = AtomicCell::::is_lock_free(); -} - -#[test] -fn drops_unit() { - static CNT: AtomicUsize = AtomicUsize::new(0); - CNT.store(0, SeqCst); - - #[derive(Debug, PartialEq, Eq)] - struct Foo(); - - impl Foo { - fn new() -> Self { - CNT.fetch_add(1, SeqCst); - Self() - } - } - - impl Drop for Foo { - fn drop(&mut self) { - CNT.fetch_sub(1, SeqCst); - } - } - - impl Default for Foo { - fn default() -> Self { - Self::new() - } - } - - let a = AtomicCell::new(Foo::new()); - - assert_eq!(a.swap(Foo::new()), Foo::new()); - assert_eq!(CNT.load(SeqCst), 1); - - a.store(Foo::new()); - assert_eq!(CNT.load(SeqCst), 1); - - assert_eq!(a.swap(Foo::default()), Foo::new()); - assert_eq!(CNT.load(SeqCst), 1); - - drop(a); - assert_eq!(CNT.load(SeqCst), 0); -} - -#[test] -fn drops_u8() { - static CNT: AtomicUsize = AtomicUsize::new(0); - CNT.store(0, SeqCst); - - #[derive(Debug, PartialEq, Eq)] - struct Foo(u8); - - impl Foo { - fn new(val: u8) -> Self { - CNT.fetch_add(1, SeqCst); - Self(val) - } - } - - impl Drop for Foo { - fn drop(&mut self) { - CNT.fetch_sub(1, SeqCst); - } - } - - impl Default for Foo { - fn default() -> Self { - Self::new(0) - } - } - - let a = AtomicCell::new(Foo::new(5)); - - assert_eq!(a.swap(Foo::new(6)), Foo::new(5)); - assert_eq!(a.swap(Foo::new(1)), Foo::new(6)); - assert_eq!(CNT.load(SeqCst), 1); - - a.store(Foo::new(2)); - assert_eq!(CNT.load(SeqCst), 1); - - assert_eq!(a.swap(Foo::default()), Foo::new(2)); - assert_eq!(CNT.load(SeqCst), 1); - - assert_eq!(a.swap(Foo::default()), Foo::new(0)); - assert_eq!(CNT.load(SeqCst), 1); - - drop(a); - assert_eq!(CNT.load(SeqCst), 0); -} - -#[test] -fn drops_usize() { - static CNT: AtomicUsize = AtomicUsize::new(0); - CNT.store(0, SeqCst); - - #[derive(Debug, PartialEq, Eq)] - struct Foo(usize); - - impl Foo { - fn new(val: usize) -> Self { - CNT.fetch_add(1, SeqCst); - Self(val) - } - } - - impl Drop for Foo { - fn drop(&mut self) { - CNT.fetch_sub(1, SeqCst); - } - } - - impl Default for Foo { - fn default() -> Self { - Self::new(0) - } - } - - let a = AtomicCell::new(Foo::new(5)); - - assert_eq!(a.swap(Foo::new(6)), Foo::new(5)); - assert_eq!(a.swap(Foo::new(1)), Foo::new(6)); - assert_eq!(CNT.load(SeqCst), 1); - - a.store(Foo::new(2)); - assert_eq!(CNT.load(SeqCst), 1); - - assert_eq!(a.swap(Foo::default()), Foo::new(2)); - assert_eq!(CNT.load(SeqCst), 1); - - assert_eq!(a.swap(Foo::default()), Foo::new(0)); - assert_eq!(CNT.load(SeqCst), 1); - - drop(a); - assert_eq!(CNT.load(SeqCst), 0); -} - -#[test] -fn modular_u8() { - #[derive(Clone, Copy, Eq, Debug, Default)] - struct Foo(u8); - - impl PartialEq for Foo { - fn eq(&self, other: &Self) -> bool { - self.0 % 5 == other.0 % 5 - } - } - - let a = AtomicCell::new(Foo(1)); - - assert_eq!(a.load(), Foo(1)); - assert_eq!(a.swap(Foo(2)), Foo(11)); - assert_eq!(a.load(), Foo(52)); - - a.store(Foo(0)); - assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100))); - assert_eq!(a.load().0, 5); - assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100))); - assert_eq!(a.load().0, 15); -} - -#[test] -fn modular_usize() { - #[derive(Clone, Copy, Eq, Debug, Default)] - struct Foo(usize); - - impl PartialEq for Foo { - fn eq(&self, other: &Self) -> bool { - self.0 % 5 == other.0 % 5 - } - } - - let a = AtomicCell::new(Foo(1)); - - assert_eq!(a.load(), Foo(1)); - assert_eq!(a.swap(Foo(2)), Foo(11)); - assert_eq!(a.load(), Foo(52)); - - a.store(Foo(0)); - assert_eq!(a.compare_exchange(Foo(0), Foo(5)), Ok(Foo(100))); - assert_eq!(a.load().0, 5); - assert_eq!(a.compare_exchange(Foo(10), Foo(15)), Ok(Foo(100))); - assert_eq!(a.load().0, 15); -} - -#[test] -fn garbage_padding() { - #[derive(Copy, Clone, Eq, PartialEq)] - struct Object { - a: i64, - b: i32, - } - - let cell = AtomicCell::new(Object { a: 0, b: 0 }); - let _garbage = [0xfe, 0xfe, 0xfe, 0xfe, 0xfe]; // Needed - let next = Object { a: 0, b: 0 }; - - let prev = cell.load(); - assert!(cell.compare_exchange(prev, next).is_ok()); - println!(); -} - -#[test] -fn const_atomic_cell_new() { - static CELL: AtomicCell = AtomicCell::new(0); - - CELL.store(1); - assert_eq!(CELL.load(), 1); -} - -// https://github.com/crossbeam-rs/crossbeam/pull/767 -macro_rules! test_arithmetic { - ($test_name:ident, $ty:ident) => { - #[test] - fn $test_name() { - let a: AtomicCell<$ty> = AtomicCell::new(7); - - assert_eq!(a.fetch_add(3), 7); - assert_eq!(a.load(), 10); - - assert_eq!(a.fetch_sub(3), 10); - assert_eq!(a.load(), 7); - - assert_eq!(a.fetch_and(3), 7); - assert_eq!(a.load(), 3); - - assert_eq!(a.fetch_or(16), 3); - assert_eq!(a.load(), 19); - - assert_eq!(a.fetch_xor(2), 19); - assert_eq!(a.load(), 17); - - assert_eq!(a.fetch_max(18), 17); - assert_eq!(a.load(), 18); - - assert_eq!(a.fetch_min(17), 18); - assert_eq!(a.load(), 17); - - assert_eq!(a.fetch_nand(7), 17); - assert_eq!(a.load(), !(17 & 7)); - } - }; -} -test_arithmetic!(arithmetic_u8, u8); -test_arithmetic!(arithmetic_i8, i8); -test_arithmetic!(arithmetic_u16, u16); -test_arithmetic!(arithmetic_i16, i16); -test_arithmetic!(arithmetic_u32, u32); -test_arithmetic!(arithmetic_i32, i32); -test_arithmetic!(arithmetic_u64, u64); -test_arithmetic!(arithmetic_i64, i64); -test_arithmetic!(arithmetic_u128, u128); -test_arithmetic!(arithmetic_i128, i128); - -// https://github.com/crossbeam-rs/crossbeam/issues/748 -#[cfg_attr(miri, ignore)] // TODO -#[test] -fn issue_748() { - #[allow(dead_code)] - #[repr(align(8))] - #[derive(Debug, Clone, Copy, PartialEq, Eq)] - enum Test { - Field(u32), - FieldLess, - } - - assert_eq!(mem::size_of::(), 8); - assert_eq!( - AtomicCell::::is_lock_free(), - cfg!(target_has_atomic = "64") && !always_use_fallback() - ); - let x = AtomicCell::new(Test::FieldLess); - assert_eq!(x.load(), Test::FieldLess); -} - -// https://github.com/crossbeam-rs/crossbeam/issues/833 -#[test] -fn issue_833() { - use std::num::NonZeroU128; - use std::sync::atomic::{AtomicBool, Ordering}; - use std::thread; - - #[cfg(miri)] - const N: usize = 10_000; - #[cfg(not(miri))] - const N: usize = 1_000_000; - - #[allow(dead_code)] - enum Enum { - NeverConstructed, - Cell(AtomicCell), - } - - static STATIC: Enum = Enum::Cell(AtomicCell::new(match NonZeroU128::new(1) { - Some(nonzero) => nonzero, - None => unreachable!(), - })); - static FINISHED: AtomicBool = AtomicBool::new(false); - - let handle = thread::spawn(|| { - let cell = match &STATIC { - Enum::NeverConstructed => unreachable!(), - Enum::Cell(cell) => cell, - }; - let x = NonZeroU128::new(0xFFFF_FFFF_FFFF_FFFF_0000_0000_0000_0000).unwrap(); - let y = NonZeroU128::new(0x0000_0000_0000_0000_FFFF_FFFF_FFFF_FFFF).unwrap(); - while !FINISHED.load(Ordering::Relaxed) { - cell.store(x); - cell.store(y); - } - }); - - for _ in 0..N { - if let Enum::NeverConstructed = STATIC { - unreachable!(":("); - } - } - - FINISHED.store(true, Ordering::Relaxed); - handle.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371 -} diff --git a/crossbeam-utils/tests/cache_padded.rs b/crossbeam-utils/tests/cache_padded.rs deleted file mode 100644 index 7e5df215c..000000000 --- a/crossbeam-utils/tests/cache_padded.rs +++ /dev/null @@ -1,113 +0,0 @@ -use std::cell::Cell; -use std::mem; - -use crossbeam_utils::CachePadded; - -#[test] -fn default() { - let x: CachePadded = Default::default(); - assert_eq!(*x, 0); -} - -#[test] -fn store_u64() { - let x: CachePadded = CachePadded::new(17); - assert_eq!(*x, 17); -} - -#[test] -fn store_pair() { - let x: CachePadded<(u64, u64)> = CachePadded::new((17, 37)); - assert_eq!(x.0, 17); - assert_eq!(x.1, 37); -} - -#[test] -fn distance() { - let arr = [CachePadded::new(17u8), CachePadded::new(37u8)]; - let a = &*arr[0] as *const u8; - let b = &*arr[1] as *const u8; - let align = mem::align_of::>(); - assert!(align >= 32); - assert_eq!(unsafe { a.add(align) }, b); -} - -#[test] -fn different_sizes() { - CachePadded::new(17u8); - CachePadded::new(17u16); - CachePadded::new(17u32); - CachePadded::new([17u64; 0]); - CachePadded::new([17u64; 1]); - CachePadded::new([17u64; 2]); - CachePadded::new([17u64; 3]); - CachePadded::new([17u64; 4]); - CachePadded::new([17u64; 5]); - CachePadded::new([17u64; 6]); - CachePadded::new([17u64; 7]); - CachePadded::new([17u64; 8]); -} - -#[test] -fn large() { - let a = [17u64; 9]; - let b = CachePadded::new(a); - assert!(mem::size_of_val(&a) <= mem::size_of_val(&b)); -} - -#[test] -fn debug() { - assert_eq!( - format!("{:?}", CachePadded::new(17u64)), - "CachePadded { value: 17 }" - ); -} - -#[test] -fn drops() { - let count = Cell::new(0); - - struct Foo<'a>(&'a Cell); - - impl Drop for Foo<'_> { - fn drop(&mut self) { - self.0.set(self.0.get() + 1); - } - } - - let a = CachePadded::new(Foo(&count)); - let b = CachePadded::new(Foo(&count)); - - assert_eq!(count.get(), 0); - drop(a); - assert_eq!(count.get(), 1); - drop(b); - assert_eq!(count.get(), 2); -} - -#[allow(clippy::clone_on_copy)] // This is intentional. -#[test] -fn clone() { - let a = CachePadded::new(17); - let b = a.clone(); - assert_eq!(*a, *b); -} - -#[test] -fn runs_custom_clone() { - let count = Cell::new(0); - - struct Foo<'a>(&'a Cell); - - impl Clone for Foo<'_> { - fn clone(&self) -> Self { - self.0.set(self.0.get() + 1); - Self(self.0) - } - } - - let a = CachePadded::new(Foo(&count)); - let _ = a.clone(); - - assert_eq!(count.get(), 1); -} diff --git a/crossbeam-utils/tests/parker.rs b/crossbeam-utils/tests/parker.rs deleted file mode 100644 index da8b133c3..000000000 --- a/crossbeam-utils/tests/parker.rs +++ /dev/null @@ -1,49 +0,0 @@ -use std::thread::sleep; -use std::time::Duration; - -use crossbeam_utils::sync::{Parker, UnparkReason}; -use crossbeam_utils::thread; - -#[test] -fn park_timeout_unpark_before() { - let p = Parker::new(); - for _ in 0..10 { - p.unparker().unpark(); - assert_eq!( - p.park_timeout(Duration::from_millis(u32::MAX as u64)), - UnparkReason::Unparked, - ); - } -} - -#[test] -fn park_timeout_unpark_not_called() { - let p = Parker::new(); - for _ in 0..10 { - assert_eq!( - p.park_timeout(Duration::from_millis(10)), - UnparkReason::Timeout, - ); - } -} - -#[test] -fn park_timeout_unpark_called_other_thread() { - for _ in 0..10 { - let p = Parker::new(); - let u = p.unparker().clone(); - - thread::scope(|scope| { - scope.spawn(move |_| { - sleep(Duration::from_millis(50)); - u.unpark(); - }); - - assert_eq!( - p.park_timeout(Duration::from_millis(u32::MAX as u64)), - UnparkReason::Unparked, - ); - }) - .unwrap(); - } -} diff --git a/crossbeam-utils/tests/sharded_lock.rs b/crossbeam-utils/tests/sharded_lock.rs deleted file mode 100644 index 488a7be8f..000000000 --- a/crossbeam-utils/tests/sharded_lock.rs +++ /dev/null @@ -1,252 +0,0 @@ -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::mpsc::channel; -use std::sync::{Arc, TryLockError}; -use std::thread; - -use crossbeam_utils::sync::ShardedLock; -use rand::Rng; - -#[derive(Eq, PartialEq, Debug)] -struct NonCopy(i32); - -#[test] -fn smoke() { - let l = ShardedLock::new(()); - drop(l.read().unwrap()); - drop(l.write().unwrap()); - drop((l.read().unwrap(), l.read().unwrap())); - drop(l.write().unwrap()); -} - -#[test] -fn frob() { - const N: u32 = 10; - #[cfg(miri)] - const M: usize = 50; - #[cfg(not(miri))] - const M: usize = 1000; - - let r = Arc::new(ShardedLock::new(())); - - let (tx, rx) = channel::<()>(); - for _ in 0..N { - let tx = tx.clone(); - let r = r.clone(); - thread::spawn(move || { - let mut rng = rand::thread_rng(); - for _ in 0..M { - if rng.gen_bool(1.0 / (N as f64)) { - drop(r.write().unwrap()); - } else { - drop(r.read().unwrap()); - } - } - drop(tx); - }); - } - drop(tx); - let _ = rx.recv(); -} - -#[test] -fn arc_poison_wr() { - let arc = Arc::new(ShardedLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = arc2.write().unwrap(); - panic!(); - }) - .join(); - assert!(arc.read().is_err()); -} - -#[test] -fn arc_poison_ww() { - let arc = Arc::new(ShardedLock::new(1)); - assert!(!arc.is_poisoned()); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = arc2.write().unwrap(); - panic!(); - }) - .join(); - assert!(arc.write().is_err()); - assert!(arc.is_poisoned()); -} - -#[test] -fn arc_no_poison_rr() { - let arc = Arc::new(ShardedLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = arc2.read().unwrap(); - panic!(); - }) - .join(); - let lock = arc.read().unwrap(); - assert_eq!(*lock, 1); -} -#[test] -fn arc_no_poison_sl() { - let arc = Arc::new(ShardedLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = arc2.read().unwrap(); - panic!() - }) - .join(); - let lock = arc.write().unwrap(); - assert_eq!(*lock, 1); -} - -#[test] -fn arc() { - let arc = Arc::new(ShardedLock::new(0)); - let arc2 = arc.clone(); - let (tx, rx) = channel(); - - thread::spawn(move || { - let mut lock = arc2.write().unwrap(); - for _ in 0..10 { - let tmp = *lock; - *lock = -1; - thread::yield_now(); - *lock = tmp + 1; - } - tx.send(()).unwrap(); - }); - - // Readers try to catch the writer in the act - let mut children = Vec::new(); - for _ in 0..5 { - let arc3 = arc.clone(); - children.push(thread::spawn(move || { - let lock = arc3.read().unwrap(); - assert!(*lock >= 0); - })); - } - - // Wait for children to pass their asserts - for r in children { - assert!(r.join().is_ok()); - } - - // Wait for writer to finish - rx.recv().unwrap(); - let lock = arc.read().unwrap(); - assert_eq!(*lock, 10); -} - -#[test] -fn arc_access_in_unwind() { - let arc = Arc::new(ShardedLock::new(1)); - let arc2 = arc.clone(); - let _: Result<(), _> = thread::spawn(move || { - struct Unwinder { - i: Arc>, - } - impl Drop for Unwinder { - fn drop(&mut self) { - let mut lock = self.i.write().unwrap(); - *lock += 1; - } - } - let _u = Unwinder { i: arc2 }; - panic!(); - }) - .join(); - let lock = arc.read().unwrap(); - assert_eq!(*lock, 2); -} - -#[test] -fn unsized_type() { - let sl: &ShardedLock<[i32]> = &ShardedLock::new([1, 2, 3]); - { - let b = &mut *sl.write().unwrap(); - b[0] = 4; - b[2] = 5; - } - let comp: &[i32] = &[4, 2, 5]; - assert_eq!(&*sl.read().unwrap(), comp); -} - -#[test] -fn try_write() { - let lock = ShardedLock::new(0isize); - let read_guard = lock.read().unwrap(); - - let write_result = lock.try_write(); - match write_result { - Err(TryLockError::WouldBlock) => (), - Ok(_) => panic!("try_write should not succeed while read_guard is in scope"), - Err(_) => panic!("unexpected error"), - } - - drop(read_guard); -} - -#[test] -fn test_into_inner() { - let m = ShardedLock::new(NonCopy(10)); - assert_eq!(m.into_inner().unwrap(), NonCopy(10)); -} - -#[test] -fn test_into_inner_drop() { - struct Foo(Arc); - impl Drop for Foo { - fn drop(&mut self) { - self.0.fetch_add(1, Ordering::SeqCst); - } - } - let num_drops = Arc::new(AtomicUsize::new(0)); - let m = ShardedLock::new(Foo(num_drops.clone())); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - { - let _inner = m.into_inner().unwrap(); - assert_eq!(num_drops.load(Ordering::SeqCst), 0); - } - assert_eq!(num_drops.load(Ordering::SeqCst), 1); -} - -#[test] -fn test_into_inner_poison() { - let m = Arc::new(ShardedLock::new(NonCopy(10))); - let m2 = m.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = m2.write().unwrap(); - panic!("test panic in inner thread to poison ShardedLock"); - }) - .join(); - - assert!(m.is_poisoned()); - match Arc::try_unwrap(m).unwrap().into_inner() { - Err(e) => assert_eq!(e.into_inner(), NonCopy(10)), - Ok(x) => panic!("into_inner of poisoned ShardedLock is Ok: {:?}", x), - } -} - -#[test] -fn test_get_mut() { - let mut m = ShardedLock::new(NonCopy(10)); - *m.get_mut().unwrap() = NonCopy(20); - assert_eq!(m.into_inner().unwrap(), NonCopy(20)); -} - -#[test] -fn test_get_mut_poison() { - let m = Arc::new(ShardedLock::new(NonCopy(10))); - let m2 = m.clone(); - let _: Result<(), _> = thread::spawn(move || { - let _lock = m2.write().unwrap(); - panic!("test panic in inner thread to poison ShardedLock"); - }) - .join(); - - assert!(m.is_poisoned()); - match Arc::try_unwrap(m).unwrap().get_mut() { - Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)), - Ok(x) => panic!("get_mut of poisoned ShardedLock is Ok: {:?}", x), - } -} diff --git a/crossbeam-utils/tests/thread.rs b/crossbeam-utils/tests/thread.rs deleted file mode 100644 index 467454c3d..000000000 --- a/crossbeam-utils/tests/thread.rs +++ /dev/null @@ -1,215 +0,0 @@ -use std::any::Any; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::thread::sleep; -use std::time::Duration; - -use crossbeam_utils::thread; - -const THREADS: usize = 10; -const SMALL_STACK_SIZE: usize = 20; - -#[test] -fn join() { - let counter = AtomicUsize::new(0); - thread::scope(|scope| { - let handle = scope.spawn(|_| { - counter.store(1, Ordering::Relaxed); - }); - assert!(handle.join().is_ok()); - - let panic_handle = scope.spawn(|_| { - panic!("\"My honey is running out!\", said Pooh."); - }); - assert!(panic_handle.join().is_err()); - }) - .unwrap(); - - // There should be sufficient synchronization. - assert_eq!(1, counter.load(Ordering::Relaxed)); -} - -#[test] -fn counter() { - let counter = AtomicUsize::new(0); - thread::scope(|scope| { - for _ in 0..THREADS { - scope.spawn(|_| { - counter.fetch_add(1, Ordering::Relaxed); - }); - } - }) - .unwrap(); - - assert_eq!(THREADS, counter.load(Ordering::Relaxed)); -} - -#[test] -fn counter_builder() { - let counter = AtomicUsize::new(0); - thread::scope(|scope| { - for i in 0..THREADS { - scope - .builder() - .name(format!("child-{}", i)) - .stack_size(SMALL_STACK_SIZE) - .spawn(|_| { - counter.fetch_add(1, Ordering::Relaxed); - }) - .unwrap(); - } - }) - .unwrap(); - - assert_eq!(THREADS, counter.load(Ordering::Relaxed)); -} - -#[test] -fn counter_panic() { - let counter = AtomicUsize::new(0); - let result = thread::scope(|scope| { - scope.spawn(|_| { - panic!("\"My honey is running out!\", said Pooh."); - }); - sleep(Duration::from_millis(100)); - - for _ in 0..THREADS { - scope.spawn(|_| { - counter.fetch_add(1, Ordering::Relaxed); - }); - } - }); - - assert_eq!(THREADS, counter.load(Ordering::Relaxed)); - assert!(result.is_err()); -} - -#[test] -fn panic_twice() { - let result = thread::scope(|scope| { - scope.spawn(|_| { - sleep(Duration::from_millis(500)); - panic!("thread #1"); - }); - scope.spawn(|_| { - panic!("thread #2"); - }); - }); - - let err = result.unwrap_err(); - let vec = err - .downcast_ref::>>() - .unwrap(); - assert_eq!(2, vec.len()); - - let first = vec[0].downcast_ref::<&str>().unwrap(); - let second = vec[1].downcast_ref::<&str>().unwrap(); - assert_eq!("thread #1", *first); - assert_eq!("thread #2", *second) -} - -#[test] -fn panic_many() { - let result = thread::scope(|scope| { - scope.spawn(|_| panic!("deliberate panic #1")); - scope.spawn(|_| panic!("deliberate panic #2")); - scope.spawn(|_| panic!("deliberate panic #3")); - }); - - let err = result.unwrap_err(); - let vec = err - .downcast_ref::>>() - .unwrap(); - assert_eq!(3, vec.len()); - - for panic in vec.iter() { - let panic = panic.downcast_ref::<&str>().unwrap(); - assert!( - *panic == "deliberate panic #1" - || *panic == "deliberate panic #2" - || *panic == "deliberate panic #3" - ); - } -} - -#[test] -fn nesting() { - let var = "foo".to_string(); - - struct Wrapper<'a> { - var: &'a String, - } - - impl<'a> Wrapper<'a> { - fn recurse(&'a self, scope: &thread::Scope<'a>, depth: usize) { - assert_eq!(self.var, "foo"); - - if depth > 0 { - scope.spawn(move |scope| { - self.recurse(scope, depth - 1); - }); - } - } - } - - let wrapper = Wrapper { var: &var }; - - thread::scope(|scope| { - scope.spawn(|scope| { - scope.spawn(|scope| { - wrapper.recurse(scope, 5); - }); - }); - }) - .unwrap(); -} - -#[test] -fn join_nested() { - thread::scope(|scope| { - scope.spawn(|scope| { - let handle = scope.spawn(|_| 7); - - sleep(Duration::from_millis(200)); - handle.join().unwrap(); - }); - - sleep(Duration::from_millis(100)); - }) - .unwrap(); -} - -#[test] -fn scope_returns_ok() { - let result = thread::scope(|scope| scope.spawn(|_| 1234).join().unwrap()).unwrap(); - assert_eq!(result, 1234); -} - -#[cfg(unix)] -#[test] -fn as_pthread_t() { - use thread::unix::JoinHandleExt; - thread::scope(|scope| { - let handle = scope.spawn(|_scope| { - sleep(Duration::from_millis(100)); - 42 - }); - let _pthread_t = handle.as_pthread_t(); - handle.join().unwrap(); - }) - .unwrap(); -} - -#[cfg(windows)] -#[test] -fn as_raw_handle() { - use std::os::windows::io::AsRawHandle; - thread::scope(|scope| { - let handle = scope.spawn(|_scope| { - sleep(Duration::from_millis(100)); - 42 - }); - let _raw_handle = handle.as_raw_handle(); - handle.join().unwrap(); - }) - .unwrap(); -} diff --git a/crossbeam-utils/tests/wait_group.rs b/crossbeam-utils/tests/wait_group.rs deleted file mode 100644 index 5b549b849..000000000 --- a/crossbeam-utils/tests/wait_group.rs +++ /dev/null @@ -1,67 +0,0 @@ -use std::sync::mpsc; -use std::thread; -use std::time::Duration; - -use crossbeam_utils::sync::WaitGroup; - -const THREADS: usize = 10; - -#[test] -fn wait() { - let wg = WaitGroup::new(); - let (tx, rx) = mpsc::channel(); - - for _ in 0..THREADS { - let wg = wg.clone(); - let tx = tx.clone(); - - thread::spawn(move || { - wg.wait(); - tx.send(()).unwrap(); - }); - } - - thread::sleep(Duration::from_millis(100)); - - // At this point, all spawned threads should be blocked, so we shouldn't get anything from the - // channel. - assert!(rx.try_recv().is_err()); - - wg.wait(); - - // Now, the wait group is cleared and we should receive messages. - for _ in 0..THREADS { - rx.recv().unwrap(); - } -} - -#[test] -fn wait_and_drop() { - let wg = WaitGroup::new(); - let wg2 = WaitGroup::new(); - let (tx, rx) = mpsc::channel(); - - for _ in 0..THREADS { - let wg = wg.clone(); - let wg2 = wg2.clone(); - let tx = tx.clone(); - - thread::spawn(move || { - wg2.wait(); - tx.send(()).unwrap(); - drop(wg); - }); - } - - // At this point, no thread has gotten past `wg2.wait()`, so we shouldn't get anything from the - // channel. - assert!(rx.try_recv().is_err()); - drop(wg2); - - wg.wait(); - - // Now, the wait group is cleared and we should receive messages. - for _ in 0..THREADS { - rx.try_recv().unwrap(); - } -} diff --git a/crossbeam-skiplist/examples/simple.rs b/examples/simple.rs similarity index 92% rename from crossbeam-skiplist/examples/simple.rs rename to examples/simple.rs index 979d74288..7d93341e4 100644 --- a/crossbeam-skiplist/examples/simple.rs +++ b/examples/simple.rs @@ -1,7 +1,7 @@ // use std::time::Instant; fn main() { - // let map = crossbeam_skiplist::SkipMap::new(); + // let map = crossbeam_skiplist_fd::SkipMap::new(); // // let mut map = std::collections::BTreeMap::new(); // // let mut map = std::collections::HashMap::new(); // diff --git a/no_atomic.rs b/no_atomic.rs deleted file mode 100644 index f7e6d2fa4..000000000 --- a/no_atomic.rs +++ /dev/null @@ -1,9 +0,0 @@ -// This file is @generated by no_atomic.sh. -// It is not intended for manual editing. - -const NO_ATOMIC: &[&str] = &[ - "bpfeb-unknown-none", - "bpfel-unknown-none", - "mipsel-sony-psx", - "msp430-none-elf", -]; diff --git a/crossbeam-skiplist/src/base.rs b/src/base.rs similarity index 86% rename from crossbeam-skiplist/src/base.rs rename to src/base.rs index 1b4495e35..7b6d0f063 100644 --- a/crossbeam-skiplist/src/base.rs +++ b/src/base.rs @@ -1,6 +1,5 @@ //! A lock-free skip list. See [`SkipList`]. -use super::equivalent::Comparable; use alloc::alloc::{alloc, dealloc, handle_alloc_error, Layout}; use core::cmp; use core::fmt; @@ -9,6 +8,9 @@ use core::mem; use core::ops::{Bound, Deref, Index, RangeBounds}; use core::ptr; use core::sync::atomic::{fence, AtomicUsize, Ordering}; +use dbutils::equivalentor::Ascend; +use dbutils::equivalentor::Comparator; +use dbutils::equivalentor::QueryComparator; use crossbeam_epoch::{self as epoch, Atomic, Collector, Guard, Shared}; use crossbeam_utils::CachePadded; @@ -233,7 +235,7 @@ impl Node { /// Decrements the reference count of a node, pinning the thread and destroying the node /// if the count become zero. #[inline] - unsafe fn decrement_with_pin(&self, parent: &SkipList, pin: F) + unsafe fn decrement_with_pin(&self, parent: &SkipList, pin: F) where F: FnOnce() -> Guard, { @@ -322,7 +324,7 @@ struct HotData { // As a further future optimization, if `!mem::needs_drop::() && !mem::needs_drop::()` // (neither key nor the value have destructors), there's no point in creating a new local // collector, so we should simply use the global one. -pub struct SkipList { +pub struct SkipList { /// The head of the skip list (just a dummy node, not a real entry). head: Head, @@ -331,10 +333,11 @@ pub struct SkipList { /// Hot data associated with the skip list, stored in a dedicated cache line. hot_data: CachePadded, + cmp: C, } -unsafe impl Send for SkipList {} -unsafe impl Sync for SkipList {} +unsafe impl Send for SkipList {} +unsafe impl Sync for SkipList {} impl SkipList { /// Returns a new, empty skip list. @@ -347,6 +350,23 @@ impl SkipList { len: AtomicUsize::new(0), max_height: AtomicUsize::new(1), }), + cmp: Ascend, + } + } +} + +impl SkipList { + /// Returns a new, empty skip list. + pub fn with_comparator(collector: Collector, cmp: C) -> Self { + Self { + head: Head::new(), + collector, + hot_data: CachePadded::new(HotData { + seed: AtomicUsize::new(1), + len: AtomicUsize::new(0), + max_height: AtomicUsize::new(1), + }), + cmp, } } @@ -380,12 +400,71 @@ impl SkipList { } } -impl SkipList +impl SkipList { + /// Returns an iterator over all entries in the skip list. + pub fn iter<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Iter<'a, 'g, K, V, C> { + self.check_guard(guard); + Iter { + parent: self, + head: None, + tail: None, + guard, + } + } + + /// Returns an iterator over all entries in the skip list. + pub fn ref_iter(&self) -> RefIter<'_, K, V, C> { + RefIter { + parent: self, + head: None, + tail: None, + } + } + + /// Returns an iterator over a subset of entries in the skip list. + pub fn range<'a: 'g, 'g, Q, R>( + &'a self, + range: R, + guard: &'g Guard, + ) -> Range<'a, 'g, Q, R, K, V, C> + where + R: RangeBounds, + Q: ?Sized, + { + self.check_guard(guard); + Range { + parent: self, + head: None, + tail: None, + range, + guard, + _marker: PhantomData, + } + } + + /// Returns an iterator over a subset of entries in the skip list. + #[allow(clippy::needless_lifetimes)] + pub fn ref_range<'a, Q, R>(&'a self, range: R) -> RefRange<'a, Q, R, K, V, C> + where + R: RangeBounds, + Q: ?Sized, + { + RefRange { + parent: self, + range, + head: None, + tail: None, + _marker: PhantomData, + } + } +} + +impl SkipList where - K: Ord, + C: Comparator, { /// Returns the entry with the smallest key. - pub fn front<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option> { + pub fn front<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option> { self.check_guard(guard); let n = self.next_node(&self.head, Bound::Unbounded, guard)?; Some(Entry { @@ -396,9 +475,9 @@ where } /// Returns the entry with the largest key. - pub fn back<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option> { + pub fn back<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option> { self.check_guard(guard); - let n = self.search_bound::(Bound::Unbounded, true, guard)?; + let n = self.search_bound::(Bound::Unbounded, true, guard, &&self.cmp)?; Some(Entry { parent: self, node: n, @@ -409,21 +488,21 @@ where /// Returns `true` if the map contains a value for the specified key. pub fn contains_key(&self, key: &Q, guard: &Guard) -> bool where - K: Comparable, + C: QueryComparator, Q: ?Sized, { self.get(key, guard).is_some() } /// Returns an entry with the specified `key`. - pub fn get<'a: 'g, 'g, Q>(&'a self, key: &Q, guard: &'g Guard) -> Option> + pub fn get<'a: 'g, 'g, Q>(&'a self, key: &Q, guard: &'g Guard) -> Option> where - K: Comparable, + C: QueryComparator, Q: ?Sized, { self.check_guard(guard); - let n = self.search_bound(Bound::Included(key), false, guard)?; - if !n.key.equivalent(key) { + let n = self.search_bound::(Bound::Included(key), false, guard, &self.cmp)?; + if !self.cmp.query_equivalent(&n.key, key) { return None; } @@ -441,13 +520,13 @@ where &'a self, bound: Bound<&Q>, guard: &'g Guard, - ) -> Option> + ) -> Option> where - K: Comparable, + C: QueryComparator, Q: ?Sized, { self.check_guard(guard); - let n = self.search_bound(bound, false, guard)?; + let n = self.search_bound(bound, false, guard, &self.cmp)?; Some(Entry { parent: self, node: n, @@ -462,13 +541,13 @@ where &'a self, bound: Bound<&Q>, guard: &'g Guard, - ) -> Option> + ) -> Option> where - K: Comparable, + C: QueryComparator, Q: ?Sized, { self.check_guard(guard); - let n = self.search_bound(bound, true, guard)?; + let n = self.search_bound(bound, true, guard, &self.cmp)?; Some(Entry { parent: self, node: n, @@ -477,7 +556,7 @@ where } /// Finds an entry with the specified key, or inserts a new `key`-`value` pair if none exist. - pub fn get_or_insert(&self, key: K, value: V, guard: &Guard) -> RefEntry<'_, K, V> { + pub fn get_or_insert(&self, key: K, value: V, guard: &Guard) -> RefEntry<'_, K, V, C> { self.insert_internal(key, || value, |_| false, guard) } @@ -489,72 +568,13 @@ where /// discarded. If closure is modifying some other state (such as shared counters or shared /// objects), it may lead to undesired behaviour such as counters being changed without /// result of closure inserted - pub fn get_or_insert_with(&self, key: K, value: F, guard: &Guard) -> RefEntry<'_, K, V> + pub fn get_or_insert_with(&self, key: K, value: F, guard: &Guard) -> RefEntry<'_, K, V, C> where F: FnOnce() -> V, { self.insert_internal(key, value, |_| false, guard) } - /// Returns an iterator over all entries in the skip list. - pub fn iter<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Iter<'a, 'g, K, V> { - self.check_guard(guard); - Iter { - parent: self, - head: None, - tail: None, - guard, - } - } - - /// Returns an iterator over all entries in the skip list. - pub fn ref_iter(&self) -> RefIter<'_, K, V> { - RefIter { - parent: self, - head: None, - tail: None, - } - } - - /// Returns an iterator over a subset of entries in the skip list. - pub fn range<'a: 'g, 'g, Q, R>( - &'a self, - range: R, - guard: &'g Guard, - ) -> Range<'a, 'g, Q, R, K, V> - where - K: Comparable, - R: RangeBounds, - Q: ?Sized, - { - self.check_guard(guard); - Range { - parent: self, - head: None, - tail: None, - range, - guard, - _marker: PhantomData, - } - } - - /// Returns an iterator over a subset of entries in the skip list. - #[allow(clippy::needless_lifetimes)] - pub fn ref_range<'a, Q, R>(&'a self, range: R) -> RefRange<'a, Q, R, K, V> - where - K: Comparable, - R: RangeBounds, - Q: ?Sized, - { - RefRange { - parent: self, - range, - head: None, - tail: None, - _marker: PhantomData, - } - } - /// Generates a random height and returns it. fn random_height(&self) -> usize { // Pseudorandom number generation from "Xorshift RNGs" by George Marsaglia. @@ -638,7 +658,10 @@ where pred: &'a Tower, lower_bound: Bound<&K>, guard: &'a Guard, - ) -> Option<&'a Node> { + ) -> Option<&'a Node> + where + C: Comparator, + { unsafe { // Load the level 0 successor of the current node. let mut curr = pred[0].load_consume(guard); @@ -646,7 +669,7 @@ where // If `curr` is marked, that means `pred` is removed and we have to use // a key search. if curr.tag() == 1 { - return self.search_bound(lower_bound, false, guard); + return self.search_bound(lower_bound, false, guard, &&self.cmp); } while let Some(c) = curr.as_ref() { @@ -660,7 +683,7 @@ where } else { // On failure, we cannot do anything reasonable to continue // searching from the current position. Restart the search. - return self.search_bound(lower_bound, false, guard); + return self.search_bound(lower_bound, false, guard, &&self.cmp); } } @@ -679,14 +702,15 @@ where /// /// This is unsafe because the returned nodes are bound to the lifetime of /// the `SkipList`, not the `Guard`. - fn search_bound<'a, Q>( + fn search_bound<'a, Q, COMP>( &'a self, bound: Bound<&Q>, upper_bound: bool, guard: &'a Guard, + cmp: &COMP, ) -> Option<&'a Node> where - K: Comparable, + COMP: QueryComparator, Q: ?Sized, { unsafe { @@ -745,11 +769,11 @@ where // bound, we return the last node before the condition became true. For the // lower bound, we return the first node after the condition became true. if upper_bound { - if !below_upper_bound(&bound, &c.key) { + if !below_upper_bound(cmp, &bound, &c.key) { break; } result = Some(c); - } else if above_lower_bound(&bound, &c.key) { + } else if above_lower_bound(cmp, &bound, &c.key) { result = Some(c); break; } @@ -766,10 +790,15 @@ where } /// Searches for a key in the skip list and returns a list of all adjacent nodes. - fn search_position<'a, Q>(&'a self, key: &Q, guard: &'a Guard) -> Position<'a, K, V> + fn search_position<'a, Q, COMP>( + &'a self, + key: &Q, + guard: &'a Guard, + cmp: &COMP, + ) -> Position<'a, K, V> where - K: Comparable, Q: ?Sized, + COMP: QueryComparator, { unsafe { 'search: loop { @@ -826,7 +855,7 @@ where // If `curr` contains a key that is greater than or equal to `key`, we're // done with this level. - match c.key.compare(key) { + match cmp.query_compare(&c.key, key) { cmp::Ordering::Greater => break, cmp::Ordering::Equal => { result.found = Some(c); @@ -859,10 +888,11 @@ where value: F, replace: CompareF, guard: &Guard, - ) -> RefEntry<'_, K, V> + ) -> RefEntry<'_, K, V, C> where F: FnOnce() -> V, CompareF: Fn(&V) -> bool, + C: Comparator, { self.check_guard(guard); @@ -874,7 +904,7 @@ where // First try searching for the key. // Note that the `Ord` implementation for `K` may panic during the search. - let mut search = self.search_position(&key, guard); + let mut search = self.search_position(&key, guard, &&self.cmp); if let Some(r) = search.found { let replace = replace(&r.value); if !replace { @@ -941,7 +971,7 @@ where } } let sg = ScopeGuard(node.as_raw()); - search = self.search_position(&n.key, guard); + search = self.search_position(&n.key, guard, &&self.cmp); mem::forget(sg); } @@ -1006,9 +1036,11 @@ where // the tower without breaking any invariants. Note that building higher levels // is completely optional. Only the lowest level really matters, and all the // higher levels are there just to make searching faster. - if succ.as_ref().map(|s| &s.key) == Some(&n.key) { - search = self.search_position(&n.key, guard); - continue; + if let Some(k) = succ.as_ref().map(|s| &s.key) { + if self.cmp.equivalent(k, &n.key) { + search = self.search_position(&n.key, guard, &&self.cmp); + continue; + } } // Change the pointer at the current level from `next` to `succ`. If this CAS @@ -1047,7 +1079,7 @@ where // any invariants. Note that building higher levels is completely optional. // Only the lowest level really matters, and all the higher levels are there // just to make searching faster. - search = self.search_position(&n.key, guard); + search = self.search_position(&n.key, guard, &&self.cmp); } } @@ -1059,7 +1091,7 @@ where // level. // TODO(Amanieu): can we use relaxed ordering here? if n.tower[height - 1].load(Ordering::SeqCst, guard).tag() == 1 { - self.search_bound(Bound::Included(&n.key), false, guard); + self.search_bound(Bound::Included(&n.key), false, guard, &&self.cmp); } // Finally, return the new entry. @@ -1068,16 +1100,17 @@ where } } -impl SkipList +impl SkipList where - K: Ord + Send + 'static, + K: Send + 'static, V: Send + 'static, + C: Comparator + Send + 'static, { /// Inserts a `key`-`value` pair into the skip list and returns the new entry. /// /// If there is an existing entry with this key, it will be removed before inserting the new /// one. - pub fn insert(&self, key: K, value: V, guard: &Guard) -> RefEntry<'_, K, V> { + pub fn insert(&self, key: K, value: V, guard: &Guard) -> RefEntry<'_, K, V, C> { self.insert_internal(key, || value, |_| true, guard) } @@ -1092,7 +1125,7 @@ where value: V, compare_fn: F, guard: &Guard, - ) -> RefEntry<'_, K, V> + ) -> RefEntry<'_, K, V, C> where F: Fn(&V) -> bool, { @@ -1100,10 +1133,10 @@ where } /// Removes an entry with the specified `key` from the map and returns it. - pub fn remove(&self, key: &Q, guard: &Guard) -> Option> + pub fn remove(&self, key: &Q, guard: &Guard) -> Option> where - K: Comparable, Q: ?Sized, + C: QueryComparator, { self.check_guard(guard); @@ -1115,7 +1148,7 @@ where loop { // Try searching for the key. - let search = self.search_position(key, guard); + let search = self.search_position(key, guard, &self.cmp); let n = search.found?; @@ -1154,7 +1187,7 @@ where n.decrement(guard); } else { // Failed! Just repeat the search to completely unlink the node. - self.search_bound(Bound::Included(key), false, guard); + self.search_bound(Bound::Included(key), false, guard, &self.cmp); break; } } @@ -1169,7 +1202,10 @@ where } /// Removes an entry from the front of the skip list. - pub fn pop_front(&self, guard: &Guard) -> Option> { + pub fn pop_front(&self, guard: &Guard) -> Option> + where + C: Comparator, + { self.check_guard(guard); loop { let e = self.front(guard)?; @@ -1184,7 +1220,7 @@ where } /// Removes an entry from the back of the skip list. - pub fn pop_back(&self, guard: &Guard) -> Option> { + pub fn pop_back(&self, guard: &Guard) -> Option> { self.check_guard(guard); loop { let e = self.back(guard)?; @@ -1199,7 +1235,10 @@ where } /// Iterates over the map and removes every entry. - pub fn clear(&self, guard: &mut Guard) { + pub fn clear(&self, guard: &mut Guard) + where + C: Comparator, + { self.check_guard(guard); /// Number of steps after which we repin the current thread and unlink removed nodes. @@ -1213,7 +1252,14 @@ where // By unlinking nodes in batches we make sure that the final search doesn't // unlink all nodes at once, which could keep the current thread pinned for a // long time. - let mut entry = self.lower_bound::(Bound::Unbounded, guard); + let mut entry = { + self.search_bound(Bound::Unbounded, false, guard, &&self.cmp) + .map(|n| Entry { + parent: self, + node: n, + guard, + }) + }; for _ in 0..BATCH_SIZE { // Stop if we have reached the end of the list. @@ -1242,7 +1288,7 @@ where } } -impl Drop for SkipList { +impl Drop for SkipList { fn drop(&mut self) { unsafe { let mut node = self.head[0] @@ -1266,9 +1312,9 @@ impl Drop for SkipList { } } -impl fmt::Debug for SkipList +impl fmt::Debug for SkipList where - K: Ord + fmt::Debug, + K: fmt::Debug, V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -1276,7 +1322,7 @@ where } } -impl IntoIterator for SkipList { +impl IntoIterator for SkipList { type Item = (K, V); type IntoIter = IntoIter; @@ -1307,13 +1353,13 @@ impl IntoIterator for SkipList { /// The lifetimes of the key and value are the same as that of the `Guard` /// used when creating the `Entry` (`'g`). This lifetime is also constrained to /// not outlive the `SkipList`. -pub struct Entry<'a: 'g, 'g, K, V> { - parent: &'a SkipList, +pub struct Entry<'a: 'g, 'g, K, V, C> { + parent: &'a SkipList, node: &'g Node, guard: &'g Guard, } -impl<'a: 'g, 'g, K: 'a, V: 'a> Entry<'a, 'g, K, V> { +impl<'a: 'g, 'g, K: 'a, V: 'a, C: 'a> Entry<'a, 'g, K, V, C> { /// Returns `true` if the entry is removed from the skip list. pub fn is_removed(&self) -> bool { self.node.is_removed() @@ -1330,7 +1376,7 @@ impl<'a: 'g, 'g, K: 'a, V: 'a> Entry<'a, 'g, K, V> { } /// Returns a reference to the parent `SkipList` - pub fn skiplist(&self) -> &'a SkipList { + pub fn skiplist(&self) -> &'a SkipList { self.parent } @@ -1339,15 +1385,16 @@ impl<'a: 'g, 'g, K: 'a, V: 'a> Entry<'a, 'g, K, V> { /// /// This method may return `None` if the reference count is already 0 and /// the node has been queued for deletion. - pub fn pin(&self) -> Option> { + pub fn pin(&self) -> Option> { unsafe { RefEntry::try_acquire(self.parent, self.node) } } } -impl Entry<'_, '_, K, V> +impl Entry<'_, '_, K, V, C> where - K: Ord + Send + 'static, + K: Send + 'static, V: Send + 'static, + C: Comparator + Send + 'static, { /// Removes the entry from the skip list. /// @@ -1359,8 +1406,12 @@ where self.parent.hot_data.len.fetch_sub(1, Ordering::Relaxed); // Search for the key to unlink the node from the skip list. - self.parent - .search_bound(Bound::Included(&self.node.key), false, self.guard); + self.parent.search_bound( + Bound::Included(&self.node.key), + false, + self.guard, + &&self.parent.cmp, + ); true } else { @@ -1369,7 +1420,7 @@ where } } -impl Clone for Entry<'_, '_, K, V> { +impl Clone for Entry<'_, '_, K, V, C> { fn clone(&self) -> Self { Self { parent: self.parent, @@ -1379,7 +1430,7 @@ impl Clone for Entry<'_, '_, K, V> { } } -impl fmt::Debug for Entry<'_, '_, K, V> +impl fmt::Debug for Entry<'_, '_, K, V, C> where K: fmt::Debug, V: fmt::Debug, @@ -1392,9 +1443,9 @@ where } } -impl<'a: 'g, 'g, K, V> Entry<'a, 'g, K, V> +impl<'a: 'g, 'g, K, V, C> Entry<'a, 'g, K, V, C> where - K: Ord, + C: Comparator, { /// Moves to the next entry in the skip list. pub fn move_next(&mut self) -> bool { @@ -1408,7 +1459,7 @@ where } /// Returns the next entry in the skip list. - pub fn next(&self) -> Option> { + pub fn next(&self) -> Option> { let n = self.parent.next_node( &self.node.tower, Bound::Excluded(&self.node.key), @@ -1433,10 +1484,13 @@ where } /// Returns the previous entry in the skip list. - pub fn prev(&self) -> Option> { - let n = self - .parent - .search_bound(Bound::Excluded(&self.node.key), true, self.guard)?; + pub fn prev(&self) -> Option> { + let n = self.parent.search_bound( + Bound::Excluded(&self.node.key), + true, + self.guard, + &&self.parent.cmp, + )?; Some(Entry { parent: self.parent, node: n, @@ -1449,12 +1503,12 @@ where /// /// You *must* call `release` to free this type, otherwise the node will be /// leaked. This is because releasing the entry requires a `Guard`. -pub struct RefEntry<'a, K, V> { - parent: &'a SkipList, +pub struct RefEntry<'a, K, V, C> { + parent: &'a SkipList, node: &'a Node, } -impl<'a, K: 'a, V: 'a> RefEntry<'a, K, V> { +impl<'a, K: 'a, V: 'a, C: 'a> RefEntry<'a, K, V, C> { /// Returns `true` if the entry is removed from the skip list. pub fn is_removed(&self) -> bool { self.node.is_removed() @@ -1470,8 +1524,13 @@ impl<'a, K: 'a, V: 'a> RefEntry<'a, K, V> { &self.node.value } + /// Returns a reference to the comparator. + pub fn comparator(&self) -> &'a C { + &self.parent.cmp + } + /// Returns a reference to the parent `SkipList` - pub fn skiplist(&self) -> &'a SkipList { + pub fn skiplist(&self) -> &'a SkipList { self.parent } @@ -1493,9 +1552,9 @@ impl<'a, K: 'a, V: 'a> RefEntry<'a, K, V> { /// Tries to create a new `RefEntry` by incrementing the reference count of /// a node. unsafe fn try_acquire( - parent: &'a SkipList, + parent: &'a SkipList, node: &Node, - ) -> Option> { + ) -> Option> { if unsafe { node.try_increment() } { Some(RefEntry { parent, @@ -1510,10 +1569,11 @@ impl<'a, K: 'a, V: 'a> RefEntry<'a, K, V> { } } -impl RefEntry<'_, K, V> +impl RefEntry<'_, K, V, C> where - K: Ord + Send + 'static, + K: Send + 'static, V: Send + 'static, + C: Comparator + Send + 'static, { /// Removes the entry from the skip list. /// @@ -1527,8 +1587,12 @@ where self.parent.hot_data.len.fetch_sub(1, Ordering::Relaxed); // Search for the key to unlink the node from the skip list. - self.parent - .search_bound(Bound::Included(&self.node.key), false, guard); + self.parent.search_bound( + Bound::Included(&self.node.key), + false, + guard, + &&self.parent.cmp, + ); true } else { @@ -1537,7 +1601,7 @@ where } } -impl Clone for RefEntry<'_, K, V> { +impl Clone for RefEntry<'_, K, V, C> { fn clone(&self) -> Self { unsafe { // Incrementing will always succeed since we're already holding a reference to the node. @@ -1550,7 +1614,7 @@ impl Clone for RefEntry<'_, K, V> { } } -impl fmt::Debug for RefEntry<'_, K, V> +impl fmt::Debug for RefEntry<'_, K, V, C> where K: fmt::Debug, V: fmt::Debug, @@ -1563,9 +1627,9 @@ where } } -impl<'a, K, V> RefEntry<'a, K, V> +impl<'a, K, V, C> RefEntry<'a, K, V, C> where - K: Ord, + C: Comparator, { /// Moves to the next entry in the skip list. pub fn move_next(&mut self, guard: &Guard) -> bool { @@ -1579,7 +1643,7 @@ where } /// Returns the next entry in the skip list. - pub fn next(&self, guard: &Guard) -> Option> { + pub fn next(&self, guard: &Guard) -> Option> { self.parent.check_guard(guard); unsafe { let mut n = self.node; @@ -1606,14 +1670,17 @@ where } /// Returns the previous entry in the skip list. - pub fn prev(&self, guard: &Guard) -> Option> { + pub fn prev(&self, guard: &Guard) -> Option> { self.parent.check_guard(guard); unsafe { let mut n = self.node; loop { - n = self - .parent - .search_bound(Bound::Excluded(&n.key), true, guard)?; + n = self.parent.search_bound( + Bound::Excluded(&n.key), + true, + guard, + &&self.parent.cmp, + )?; if let Some(e) = RefEntry::try_acquire(self.parent, n) { return Some(e); } @@ -1623,20 +1690,20 @@ where } /// An iterator over the entries of a `SkipList`. -pub struct Iter<'a: 'g, 'g, K, V> { - parent: &'a SkipList, +pub struct Iter<'a: 'g, 'g, K, V, C> { + parent: &'a SkipList, head: Option<&'g Node>, tail: Option<&'g Node>, guard: &'g Guard, } -impl<'a: 'g, 'g, K: 'a, V: 'a> Iterator for Iter<'a, 'g, K, V> +impl<'a: 'g, 'g, K: 'a, V: 'a, C: 'a> Iterator for Iter<'a, 'g, K, V, C> where - K: Ord, + C: Comparator, { - type Item = Entry<'a, 'g, K, V>; + type Item = Entry<'a, 'g, K, V, C>; - fn next(&mut self) -> Option> { + fn next(&mut self) -> Option> { self.head = match self.head { Some(n) => self .parent @@ -1646,7 +1713,7 @@ where .next_node(&self.parent.head, Bound::Unbounded, self.guard), }; if let (Some(h), Some(t)) = (self.head, self.tail) { - if h.key >= t.key { + if self.parent.cmp.compare(&h.key, &t.key).is_ge() { self.head = None; self.tail = None; } @@ -1659,21 +1726,24 @@ where } } -impl<'a: 'g, 'g, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, 'g, K, V> +impl<'a: 'g, 'g, K: 'a, V: 'a, C: 'a> DoubleEndedIterator for Iter<'a, 'g, K, V, C> where - K: Ord, + C: Comparator, { - fn next_back(&mut self) -> Option> { + fn next_back(&mut self) -> Option> { self.tail = match self.tail { - Some(n) => self - .parent - .search_bound(Bound::Excluded(&n.key), true, self.guard), + Some(n) => self.parent.search_bound( + Bound::Excluded(&n.key), + true, + self.guard, + &&self.parent.cmp, + ), None => self .parent - .search_bound::(Bound::Unbounded, true, self.guard), + .search_bound(Bound::Unbounded, true, self.guard, &&self.parent.cmp), }; if let (Some(h), Some(t)) = (self.head, self.tail) { - if h.key >= t.key { + if self.parent.cmp.compare(&h.key, &t.key).is_ge() { self.head = None; self.tail = None; } @@ -1686,7 +1756,7 @@ where } } -impl fmt::Debug for Iter<'_, '_, K, V> +impl fmt::Debug for Iter<'_, '_, K, V, C> where K: fmt::Debug, V: fmt::Debug, @@ -1700,13 +1770,13 @@ where } /// An iterator over reference-counted entries of a `SkipList`. -pub struct RefIter<'a, K, V> { - parent: &'a SkipList, - head: Option>, - tail: Option>, +pub struct RefIter<'a, K, V, C> { + parent: &'a SkipList, + head: Option>, + tail: Option>, } -impl fmt::Debug for RefIter<'_, K, V> +impl fmt::Debug for RefIter<'_, K, V, C> where K: fmt::Debug, V: fmt::Debug, @@ -1725,12 +1795,12 @@ where } } -impl<'a, K: 'a, V: 'a> RefIter<'a, K, V> +impl<'a, K: 'a, V: 'a, C: 'a> RefIter<'a, K, V, C> where - K: Ord, + C: Comparator, { /// Advances the iterator and returns the next value. - pub fn next(&mut self, guard: &Guard) -> Option> { + pub fn next(&mut self, guard: &Guard) -> Option> { self.parent.check_guard(guard); let next_head = match &self.head { Some(e) => e.next(guard), @@ -1738,7 +1808,7 @@ where }; match (&next_head, &self.tail) { // The next key is larger than the latest tail key we observed with this iterator. - (Some(ref next), Some(t)) if next.key() >= t.key() => { + (Some(ref next), Some(t)) if self.parent.cmp.compare(next.key(), t.key()).is_ge() => { unsafe { next.node.decrement(guard); } @@ -1757,7 +1827,7 @@ where } /// Removes and returns an element from the end of the iterator. - pub fn next_back(&mut self, guard: &Guard) -> Option> { + pub fn next_back(&mut self, guard: &Guard) -> Option> { self.parent.check_guard(guard); let next_tail = match &self.tail { Some(e) => e.prev(guard), @@ -1765,7 +1835,7 @@ where }; match (&self.head, &next_tail) { // The prev key is smaller than the latest head key we observed with this iterator. - (Some(h), Some(next)) if h.key() >= next.key() => { + (Some(h), Some(next)) if self.parent.cmp.compare(h.key(), next.key()).is_ge() => { unsafe { next.node.decrement(guard); } @@ -1784,7 +1854,7 @@ where } } -impl<'a, K: 'a, V: 'a> RefIter<'a, K, V> { +impl<'a, K: 'a, V: 'a, C: 'a> RefIter<'a, K, V, C> { /// Decrements the reference count of `RefEntry` owned by the iterator. pub fn drop_impl(&mut self, guard: &Guard) { self.parent.check_guard(guard); @@ -1798,13 +1868,12 @@ impl<'a, K: 'a, V: 'a> RefIter<'a, K, V> { } /// An iterator over a subset of entries of a `SkipList`. -pub struct Range<'a: 'g, 'g, Q, R, K, V> +pub struct Range<'a: 'g, 'g, Q, R, K, V, C> where - K: Ord + Comparable, R: RangeBounds, Q: ?Sized, { - parent: &'a SkipList, + parent: &'a SkipList, head: Option<&'g Node>, tail: Option<&'g Node>, range: R, @@ -1812,35 +1881,38 @@ where _marker: PhantomData Q>, // covariant over `Q` } -impl<'a: 'g, 'g, Q, R, K: 'a, V: 'a> Iterator for Range<'a, 'g, Q, R, K, V> +impl<'a: 'g, 'g, Q, R, K: 'a, V: 'a, C: 'a> Iterator for Range<'a, 'g, Q, R, K, V, C> where - K: Ord + Comparable, R: RangeBounds, Q: ?Sized, + C: QueryComparator, { - type Item = Entry<'a, 'g, K, V>; + type Item = Entry<'a, 'g, K, V, C>; - fn next(&mut self) -> Option> { + fn next(&mut self) -> Option> { self.head = match self.head { Some(n) => self .parent .next_node(&n.tower, Bound::Excluded(&n.key), self.guard), - None => self - .parent - .search_bound(self.range.start_bound(), false, self.guard), + None => self.parent.search_bound( + self.range.start_bound(), + false, + self.guard, + &self.parent.cmp, + ), }; if let Some(h) = self.head { match self.tail { Some(t) => { let bound = Bound::Excluded(&t.key); - if !below_upper_bound(&bound, &h.key) { + if !below_upper_bound(&&self.parent.cmp, &bound, &h.key) { self.head = None; self.tail = None; } } None => { let bound = self.range.end_bound(); - if !below_upper_bound(&bound, &h.key) { + if !below_upper_bound(&self.parent.cmp, &bound, &h.key) { self.head = None; self.tail = None; } @@ -1855,33 +1927,37 @@ where } } -impl<'a: 'g, 'g, Q, R, K: 'a, V: 'a> DoubleEndedIterator for Range<'a, 'g, Q, R, K, V> +impl<'a: 'g, 'g, Q, R, K: 'a, V: 'a, C: 'a> DoubleEndedIterator for Range<'a, 'g, Q, R, K, V, C> where - K: Ord + Comparable, R: RangeBounds, Q: ?Sized, + C: QueryComparator, { - fn next_back(&mut self) -> Option> { + fn next_back(&mut self) -> Option> { self.tail = match self.tail { - Some(n) => self - .parent - .search_bound::(Bound::Excluded(&n.key), true, self.guard), - None => self - .parent - .search_bound(self.range.end_bound(), true, self.guard), + Some(n) => self.parent.search_bound( + Bound::Excluded(&n.key), + true, + self.guard, + &&self.parent.cmp, + ), + None => { + self.parent + .search_bound(self.range.end_bound(), true, self.guard, &self.parent.cmp) + } }; if let Some(t) = self.tail { match self.head { Some(h) => { let bound = Bound::Excluded(&h.key); - if !above_lower_bound(&bound, &t.key) { + if !above_lower_bound(&&self.parent.cmp, &bound, &t.key) { self.head = None; self.tail = None; } } None => { let bound = self.range.start_bound(); - if !above_lower_bound(&bound, &t.key) { + if !above_lower_bound(&self.parent.cmp, &bound, &t.key) { self.head = None; self.tail = None; } @@ -1896,9 +1972,9 @@ where } } -impl fmt::Debug for Range<'_, '_, Q, R, K, V> +impl fmt::Debug for Range<'_, '_, Q, R, K, V, C> where - K: Ord + fmt::Debug + Comparable, + K: fmt::Debug, V: fmt::Debug, R: RangeBounds + fmt::Debug, Q: ?Sized, @@ -1913,38 +1989,35 @@ where } /// An iterator over reference-counted subset of entries of a `SkipList`. -pub struct RefRange<'a, Q, R, K, V> +pub struct RefRange<'a, Q, R, K, V, C> where - K: Ord + Comparable, R: RangeBounds, Q: ?Sized, { - parent: &'a SkipList, - pub(crate) head: Option>, - pub(crate) tail: Option>, + parent: &'a SkipList, + pub(crate) head: Option>, + pub(crate) tail: Option>, pub(crate) range: R, _marker: PhantomData Q>, // covariant over `Q` } -unsafe impl Send for RefRange<'_, Q, R, K, V> +unsafe impl Send for RefRange<'_, Q, R, K, V, C> where - K: Ord + Comparable, R: RangeBounds, Q: ?Sized, { } -unsafe impl Sync for RefRange<'_, Q, R, K, V> +unsafe impl Sync for RefRange<'_, Q, R, K, V, C> where - K: Ord + Comparable, R: RangeBounds, Q: ?Sized, { } -impl fmt::Debug for RefRange<'_, Q, R, K, V> +impl fmt::Debug for RefRange<'_, Q, R, K, V, C> where - K: Ord + fmt::Debug + Comparable, + K: fmt::Debug, V: fmt::Debug, R: RangeBounds + fmt::Debug, Q: ?Sized, @@ -1958,14 +2031,14 @@ where } } -impl<'a, Q, R, K: 'a, V: 'a> RefRange<'a, Q, R, K, V> +impl<'a, Q, R, K: 'a, V: 'a, C: 'a> RefRange<'a, Q, R, K, V, C> where - K: Ord + Comparable, R: RangeBounds, Q: ?Sized, + C: QueryComparator, { /// Advances the iterator and returns the next value. - pub fn next(&mut self, guard: &Guard) -> Option> { + pub fn next(&mut self, guard: &Guard) -> Option> { self.parent.check_guard(guard); let next_head = match self.head { Some(ref e) => e.next(guard), @@ -1976,7 +2049,7 @@ where match self.tail { Some(ref t) => { let bound = Bound::Excluded(t.key()); - if below_upper_bound(&bound, h.key()) { + if below_upper_bound(&&self.parent.cmp, &bound, h.key()) { self.head.clone_from(&next_head); next_head } else { @@ -1988,7 +2061,7 @@ where } None => { let bound = self.range.end_bound(); - if below_upper_bound(&bound, h.key()) { + if below_upper_bound(&self.parent.cmp, &bound, h.key()) { self.head.clone_from(&next_head); next_head } else { @@ -2005,7 +2078,7 @@ where } /// Removes and returns an element from the end of the iterator. - pub fn next_back(&mut self, guard: &Guard) -> Option> { + pub fn next_back(&mut self, guard: &Guard) -> Option> { self.parent.check_guard(guard); let next_tail = match self.tail { Some(ref e) => e.prev(guard), @@ -2016,7 +2089,7 @@ where match self.head { Some(ref h) => { let bound = Bound::Excluded(h.key()); - if above_lower_bound(&bound, t.key()) { + if above_lower_bound(&&self.parent.cmp, &bound, t.key()) { self.tail.clone_from(&next_tail); next_tail } else { @@ -2028,7 +2101,7 @@ where } None => { let bound = self.range.start_bound(); - if above_lower_bound(&bound, t.key()) { + if above_lower_bound(&self.parent.cmp, &bound, t.key()) { self.tail.clone_from(&next_tail); next_tail } else { @@ -2043,7 +2116,13 @@ where None } } +} +impl<'a, Q, R, K: 'a, V: 'a, C: 'a> RefRange<'a, Q, R, K, V, C> +where + R: RangeBounds, + Q: ?Sized, +{ /// Decrements a reference count owned by this iterator. pub fn drop_impl(&mut self, guard: &Guard) { self.parent.check_guard(guard); @@ -2126,9 +2205,9 @@ impl fmt::Debug for IntoIter { /// Helper function to retry an operation until pinning succeeds or `None` is /// returned. -pub(crate) fn try_pin_loop<'a: 'g, 'g, F, K, V>(mut f: F) -> Option> +pub(crate) fn try_pin_loop<'a: 'g, 'g, F, K, V, C>(mut f: F) -> Option> where - F: FnMut() -> Option>, + F: FnMut() -> Option>, { loop { if let Some(e) = f()?.pin() { @@ -2138,27 +2217,29 @@ where } /// Helper function to check if a value is above a lower bound -fn above_lower_bound(bound: &Bound<&T>, other: &V) -> bool +fn above_lower_bound(cmp: &C, bound: &Bound<&T>, other: &V) -> bool where T: ?Sized, - V: Comparable, + V: ?Sized, + C: QueryComparator, { match *bound { Bound::Unbounded => true, - Bound::Included(key) => other.compare(key).is_ge(), - Bound::Excluded(key) => other.compare(key).is_gt(), + Bound::Included(key) => cmp.query_compare(other, key).is_ge(), + Bound::Excluded(key) => cmp.query_compare(other, key).is_gt(), } } /// Helper function to check if a value is below an upper bound -fn below_upper_bound(bound: &Bound<&T>, other: &V) -> bool +fn below_upper_bound(cmp: &C, bound: &Bound<&T>, other: &V) -> bool where T: ?Sized, - V: Comparable, + V: ?Sized, + C: QueryComparator, { match *bound { Bound::Unbounded => true, - Bound::Included(key) => other.compare(key).is_le(), - Bound::Excluded(key) => other.compare(key).is_lt(), + Bound::Included(key) => cmp.query_compare(other, key).is_le(), + Bound::Excluded(key) => cmp.query_compare(other, key).is_lt(), } } diff --git a/src/equivalentor.rs b/src/equivalentor.rs new file mode 100644 index 000000000..b4519645b --- /dev/null +++ b/src/equivalentor.rs @@ -0,0 +1,12 @@ +// These traits are based on `equivalent` crate, but `K` and `Q` are flipped to avoid type inference issues: +// https://github.com/indexmap-rs/equivalent/issues/5 + +//! Traits for key comparison in maps. + +pub use dbutils::{ + equivalent::*, + equivalentor::{ + Ascend, Comparator, Descend, Equivalentor, QueryComparator, QueryEquivalentor, + QueryRangeComparator, RangeComparator, Reverse, + }, +}; diff --git a/src/lib.rs b/src/lib.rs index 4f5465707..77115ba21 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,42 +1,234 @@ -//! Tools for concurrent programming. +//! A long-term maintained forked version of the [`crossbeam-skiplist`](https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-skiplist) for supporting more flexible key comparison customization. //! -//! ## Atomics +//! Concurrent maps and sets based on [skip lists]. //! -//! * [`AtomicCell`], a thread-safe mutable memory location. -//! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering. +//! This crate provides the types [`SkipMap`] and [`SkipSet`]. +//! These data structures provide an interface similar to [`BTreeMap`] and [`BTreeSet`], +//! respectively, except they support safe concurrent access across +//! multiple threads. //! -//! ## Data structures +//! # Concurrent access +//! [`SkipMap`] and [`SkipSet`] implement [`Send`] and [`Sync`], +//! so they can be shared across threads with ease. //! -//! * [`deque`], work-stealing deques for building task schedulers. -//! * [`ArrayQueue`], a bounded MPMC queue that allocates a fixed-capacity buffer on construction. -//! * [`SegQueue`], an unbounded MPMC queue that allocates small buffers, segments, on demand. +//! Methods which mutate the map, such as [`insert`], +//! take `&self` rather than `&mut self`. This allows +//! them to be invoked concurrently. //! -//! ## Memory management +//! ``` +//! use crossbeam_skiplist_fd::SkipMap; +//! use crossbeam_utils::thread::scope; //! -//! * [`epoch`], an epoch-based garbage collector. +//! let person_ages = SkipMap::new(); //! -//! ## Thread synchronization +//! scope(|s| { +//! // Insert entries into the map from multiple threads. +//! s.spawn(|_| { +//! person_ages.insert("Spike Garrett", 22); +//! person_ages.insert("Stan Hancock", 47); +//! person_ages.insert("Rea Bryan", 234); //! -//! * [`channel`], multi-producer multi-consumer channels for message passing. -//! * [`Parker`], a thread parking primitive. -//! * [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads. -//! * [`WaitGroup`], for synchronizing the beginning or end of some computation. +//! assert_eq!(person_ages.get("Spike Garrett").unwrap().value(), &22); +//! }); +//! s.spawn(|_| { +//! person_ages.insert("Bryon Conroy", 65); +//! person_ages.insert("Lauren Reilly", 2); +//! }); +//! }).unwrap(); //! -//! ## Utilities +//! assert!(person_ages.contains_key("Spike Garrett")); +//! person_ages.remove("Rea Bryan"); +//! assert!(!person_ages.contains_key("Rea Bryan")); //! -//! * [`Backoff`], for exponential backoff in spin loops. -//! * [`CachePadded`], for padding and aligning a value to the length of a cache line. -//! * [`scope`], for spawning threads that borrow local variables from the stack. +//! ``` //! -//! [`AtomicCell`]: atomic::AtomicCell -//! [`AtomicConsume`]: atomic::AtomicConsume -//! [`ArrayQueue`]: queue::ArrayQueue -//! [`SegQueue`]: queue::SegQueue -//! [`Parker`]: sync::Parker -//! [`ShardedLock`]: sync::ShardedLock -//! [`WaitGroup`]: sync::WaitGroup -//! [`Backoff`]: utils::Backoff -//! [`CachePadded`]: utils::CachePadded +//! Concurrent access to skip lists is lock-free and sound. +//! Threads won't get blocked waiting for other threads to finish operating +//! on the map. +//! +//! Be warned that, because of this lock-freedom, it's easy to introduce +//! race conditions into your code. For example: +//! ```no_run +//! use crossbeam_skiplist_fd::SkipSet; +//! use crossbeam_utils::thread::scope; +//! +//! let numbers = SkipSet::new(); +//! scope(|s| { +//! // Spawn a thread which will remove 5 from the set. +//! s.spawn(|_| { +//! numbers.remove(&5); +//! }); +//! +//! // While the thread above is running, insert a value into the set. +//! numbers.insert(5); +//! +//! // This check can fail! +//! // The other thread may remove the value +//! // before we perform this check. +//! assert!(numbers.contains(&5)); +//! }).unwrap(); +//! ``` +//! +//! In effect, a _single_ operation on the map, such as [`insert`], +//! operates atomically: race conditions are impossible. However, +//! concurrent calls to functions can become interleaved across +//! threads, introducing non-determinism. +//! +//! To avoid this sort of race condition, never assume that a collection's +//! state will remain the same across multiple lines of code. For instance, +//! in the example above, the problem arises from the assumption that +//! the map won't be mutated between the calls to `insert` and `contains`. +//! In sequential code, this would be correct. But when multiple +//! threads are introduced, more care is needed. +//! +//! Note that race conditions do not violate Rust's memory safety rules. +//! A race between multiple threads can never cause memory errors or +//! segfaults. A race condition is a _logic error_ in its entirety. +//! +//! # Mutable access to elements +//! [`SkipMap`] and [`SkipSet`] provide no way to retrieve a mutable reference +//! to a value. Since access methods can be called concurrently, providing +//! e.g. a `get_mut` function could cause data races. +//! +//! A solution to the above is to have the implementation wrap +//! each value in a lock. However, this has some repercussions: +//! * The map would no longer be lock-free, inhibiting scalability +//! and allowing for deadlocks. +//! * If a user of the map doesn't need mutable access, then they pay +//! the price of locks without actually needing them. +//! +//! Instead, the approach taken by this crate gives more control to the user. +//! If mutable access is needed, then you can use interior mutability, +//! such as [`RwLock`]: `SkipMap>`. +//! +//! # Garbage collection +//! A problem faced by many concurrent data structures +//! is choosing when to free unused memory. Care must be +//! taken to prevent use-after-frees and double-frees, both +//! of which cause undefined behavior. +//! +//! Consider the following sequence of events operating on a [`SkipMap`]: +//! * Thread A calls [`get`] and holds a reference to a value in the map. +//! * Thread B removes that key from the map. +//! * Thread A now attempts to access the value. +//! +//! What happens here? If the map implementation frees the memory +//! belonging to a value when it is +//! removed, then a user-after-free occurs, resulting in memory corruption. +//! +//! To solve the above, this crate uses the _epoch-based memory reclamation_ mechanism +//! implemented in [`crossbeam-epoch`]. Simplified, a value removed from the map +//! is not freed until after all references to it have been dropped. This mechanism +//! is similar to the garbage collection found in some languages, such as Java, except +//! it operates solely on the values inside the map. +//! +//! This garbage collection scheme functions automatically; users don't have to worry about it. +//! However, keep in mind that holding [`Entry`] handles to entries in the map will prevent +//! that memory from being freed until at least after the handles are dropped. +//! +//! # Performance versus B-trees +//! In general, when you need concurrent writes +//! to an ordered collection, skip lists are a reasonable choice. +//! However, they can be substantially slower than B-trees +//! in some scenarios. +//! +//! The main benefit of a skip list over a `RwLock` +//! is that it allows concurrent writes to progress without +//! mutual exclusion. However, when the frequency +//! of writes is low, this benefit isn't as useful. +//! In these cases, a shared [`BTreeMap`] may be a faster option. +//! +//! These guidelines should be taken with a grain of salt—performance +//! in practice varies depending on your use case. +//! In the end, the best way to choose between [`BTreeMap`] and [`SkipMap`] +//! is to benchmark them in your own application. +//! +//! # Alternatives +//! This crate implements _ordered_ maps and sets, akin to [`BTreeMap`] and [`BTreeSet`]. +//! In many situations, however, a defined order on elements is not required. For these +//! purposes, unordered maps will suffice. In addition, unordered maps +//! often have better performance characteristics than their ordered alternatives. +//! +//! Crossbeam [does not currently provide a concurrent unordered map](https://github.com/crossbeam-rs/rfcs/issues/32). +//! That said, here are some other crates which may suit you: +//! * [`DashMap`](https://docs.rs/dashmap) implements a novel concurrent hash map +//! with good performance characteristics. +//! * [`flurry`](https://docs.rs/flurry) is a Rust port of Java's `ConcurrentHashMap`. +//! +//! [`insert`]: SkipMap::insert +//! [`get`]: SkipMap::get +//! [`Entry`]: map::Entry +//! [skip lists]: https://en.wikipedia.org/wiki/Skip_list +//! [`crossbeam-epoch`]: https://docs.rs/crossbeam-epoch +//! [`BTreeMap`]: std::collections::BTreeMap +//! [`BTreeSet`]: std::collections::BTreeSet +//! [`RwLock`]: std::sync::RwLock +//! +//! # Examples +//! [`SkipMap`] basic usage: +//! ``` +//! use crossbeam_skiplist_fd::SkipMap; +//! +//! // Note that the variable doesn't have to be mutable: +//! // SkipMap methods take &self to support concurrent access. +//! let movie_reviews = SkipMap::new(); +//! +//! // Insert some key-value pairs. +//! movie_reviews.insert("Office Space", "Deals with real issues in the workplace."); +//! movie_reviews.insert("Pulp Fiction", "Masterpiece."); +//! movie_reviews.insert("The Godfather", "Very enjoyable."); +//! movie_reviews.insert("The Blues Brothers", "Eye lyked it a lot."); +//! +//! // Get the value associated with a key. +//! // get() returns an Entry, which gives +//! // references to the key and value. +//! let pulp_fiction = movie_reviews.get("Pulp Fiction").unwrap(); +//! assert_eq!(*pulp_fiction.key(), "Pulp Fiction"); +//! assert_eq!(*pulp_fiction.value(), "Masterpiece."); +//! +//! // Remove a key-value pair. +//! movie_reviews.remove("The Blues Brothers"); +//! assert!(movie_reviews.get("The Blues Brothers").is_none()); +//! +//! // Iterate over the reviews. Since SkipMap +//! // is an ordered map, the iterator will yield +//! // keys in lexicographical order. +//! for entry in &movie_reviews { +//! let movie = entry.key(); +//! let review = entry.value(); +//! println!("{}: \"{}\"", movie, review); +//! } +//! ``` +//! +//! [`SkipSet`] basic usage: +//! ``` +//! use crossbeam_skiplist_fd::SkipSet; +//! +//! let books = SkipSet::new(); +//! +//! // Add some books to the set. +//! books.insert("A Dance With Dragons"); +//! books.insert("To Kill a Mockingbird"); +//! books.insert("The Odyssey"); +//! books.insert("The Great Gatsby"); +//! +//! // Check for a specific one. +//! if !books.contains("The Winds of Winter") { +//! println!("We have {} books, but The Winds of Winter ain't one.", +//! books.len()); +//! } +//! +//! // Remove a book from the set. +//! books.remove("To Kill a Mockingbird"); +//! assert!(!books.contains("To Kill a Mockingbird")); +//! +//! // Iterate over the books in the set. +//! // Values are returned in lexicographical order. +//! for entry in &books { +//! let book = entry.value(); +//! println!("{}", book); +//! } +//! ``` #![no_std] #![doc(test( @@ -48,32 +240,26 @@ ))] #![warn(missing_docs, unsafe_op_in_unsafe_fn)] +#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] +extern crate alloc; #[cfg(feature = "std")] extern crate std; -pub use crossbeam_utils::atomic; - -pub mod utils { - //! Miscellaneous utilities. - //! - //! * [`Backoff`], for exponential backoff in spin loops. - //! * [`CachePadded`], for padding and aligning a value to the length of a cache line. - - pub use crossbeam_utils::Backoff; - pub use crossbeam_utils::CachePadded; -} +#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] +pub mod base; -#[cfg(feature = "alloc")] +#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))] #[doc(inline)] -pub use {crossbeam_epoch as epoch, crossbeam_queue as queue}; +pub use crate::base::SkipList; #[cfg(feature = "std")] -#[doc(inline)] -pub use { - crossbeam_channel as channel, crossbeam_channel::select, crossbeam_deque as deque, - crossbeam_utils::sync, -}; +pub mod map; +#[cfg(feature = "std")] +pub mod set; #[cfg(feature = "std")] -#[cfg(not(crossbeam_loom))] -pub use crossbeam_utils::thread::{self, scope}; +#[doc(inline)] +pub use crate::{map::SkipMap, set::SkipSet}; + +pub mod equivalentor; +pub use equivalentor::{Ascend, Descend}; diff --git a/crossbeam-skiplist/src/map.rs b/src/map.rs similarity index 78% rename from crossbeam-skiplist/src/map.rs rename to src/map.rs index 9502662d3..d982bcc61 100644 --- a/crossbeam-skiplist/src/map.rs +++ b/src/map.rs @@ -5,11 +5,9 @@ use std::mem::ManuallyDrop; use std::ops::{Bound, RangeBounds}; use std::ptr; -use crate::{ - base::{self, try_pin_loop}, - equivalent::Comparable, -}; +use crate::base::{self, try_pin_loop}; use crossbeam_epoch as epoch; +use dbutils::equivalentor::{Ascend, Comparator, QueryComparator}; /// An ordered map based on a lock-free skip list. /// @@ -17,8 +15,8 @@ use crossbeam_epoch as epoch; /// concurrent access across multiple threads. /// /// [`BTreeMap`]: std::collections::BTreeMap -pub struct SkipMap { - inner: base::SkipList, +pub struct SkipMap { + inner: base::SkipList, } impl SkipMap { @@ -27,7 +25,7 @@ impl SkipMap { /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let map: SkipMap = SkipMap::new(); /// ``` @@ -36,12 +34,29 @@ impl SkipMap { inner: base::SkipList::new(epoch::default_collector().clone()), } } +} + +impl SkipMap { + /// Returns a new, empty map. + /// + /// # Example + /// + /// ``` + /// use crossbeam_skiplist_fd::{SkipMap, Ascend}; + /// + /// let map: SkipMap = SkipMap::with_comparator(Ascend); + /// ``` + pub fn with_comparator(cmp: C) -> Self { + Self { + inner: base::SkipList::with_comparator(epoch::default_collector().clone(), cmp), + } + } /// Returns `true` if the map is empty. /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let map: SkipMap<&str, &str> = SkipMap::new(); /// assert!(map.is_empty()); @@ -60,7 +75,7 @@ impl SkipMap { /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let map = SkipMap::new(); /// map.insert(0, 1); @@ -77,10 +92,7 @@ impl SkipMap { } } -impl SkipMap -where - K: Ord, -{ +impl SkipMap { /// Returns the entry with the smallest key. /// /// This function returns an [`Entry`] which @@ -88,7 +100,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let numbers = SkipMap::new(); /// numbers.insert(5, "five"); @@ -96,7 +108,10 @@ where /// numbers.insert(6, "six"); /// assert_eq!(*numbers.front().unwrap().value(), "five"); /// ``` - pub fn front(&self) -> Option> { + pub fn front(&self) -> Option> + where + C: Comparator, + { let guard = &epoch::pin(); try_pin_loop(|| self.inner.front(guard)).map(Entry::new) } @@ -108,7 +123,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let numbers = SkipMap::new(); /// numbers.insert(5, "five"); @@ -116,7 +131,10 @@ where /// numbers.insert(6, "six"); /// assert_eq!(*numbers.back().unwrap().value(), "six"); /// ``` - pub fn back(&self) -> Option> { + pub fn back(&self) -> Option> + where + C: Comparator, + { let guard = &epoch::pin(); try_pin_loop(|| self.inner.back(guard)).map(Entry::new) } @@ -125,7 +143,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let ages = SkipMap::new(); /// ages.insert("Bill Gates", 64); @@ -135,7 +153,7 @@ where /// ``` pub fn contains_key(&self, key: &Q) -> bool where - K: Comparable, + C: QueryComparator, Q: ?Sized, { let guard = &epoch::pin(); @@ -149,7 +167,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let numbers: SkipMap<&str, i32> = SkipMap::new(); /// assert!(numbers.get("six").is_none()); @@ -157,9 +175,9 @@ where /// numbers.insert("six", 6); /// assert_eq!(*numbers.get("six").unwrap().value(), 6); /// ``` - pub fn get(&self, key: &Q) -> Option> + pub fn get(&self, key: &Q) -> Option> where - K: Comparable, + C: QueryComparator, Q: ?Sized, { let guard = &epoch::pin(); @@ -175,7 +193,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// use std::ops::Bound::*; /// /// let numbers = SkipMap::new(); @@ -192,9 +210,9 @@ where /// let greater_than_thirteen = numbers.lower_bound(Excluded(&13)); /// assert!(greater_than_thirteen.is_none()); /// ``` - pub fn lower_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> + pub fn lower_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> where - K: Comparable, + C: QueryComparator, Q: ?Sized, { let guard = &epoch::pin(); @@ -210,7 +228,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// use std::ops::Bound::*; /// /// let numbers = SkipMap::new(); @@ -224,9 +242,9 @@ where /// let less_than_six = numbers.upper_bound(Excluded(&6)); /// assert!(less_than_six.is_none()); /// ``` - pub fn upper_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> + pub fn upper_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> where - K: Comparable, + C: QueryComparator, Q: ?Sized, { let guard = &epoch::pin(); @@ -240,7 +258,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let ages = SkipMap::new(); /// let gates_age = ages.get_or_insert("Bill Gates", 64); @@ -250,7 +268,10 @@ where /// let jobs_age = ages.get_or_insert("Steve Jobs", -1); /// assert_eq!(*jobs_age.value(), 65); /// ``` - pub fn get_or_insert(&self, key: K, value: V) -> Entry<'_, K, V> { + pub fn get_or_insert(&self, key: K, value: V) -> Entry<'_, K, V, C> + where + C: Comparator, + { let guard = &epoch::pin(); Entry::new(self.inner.get_or_insert(key, value, guard)) } @@ -270,7 +291,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let ages = SkipMap::new(); /// let gates_age = ages.get_or_insert_with("Bill Gates", || 64); @@ -280,9 +301,10 @@ where /// let jobs_age = ages.get_or_insert_with("Steve Jobs", || -1); /// assert_eq!(*jobs_age.value(), 65); /// ``` - pub fn get_or_insert_with(&self, key: K, value_fn: F) -> Entry<'_, K, V> + pub fn get_or_insert_with(&self, key: K, value_fn: F) -> Entry<'_, K, V, C> where F: FnOnce() -> V, + C: Comparator, { let guard = &epoch::pin(); Entry::new(self.inner.get_or_insert_with(key, value_fn, guard)) @@ -296,7 +318,7 @@ where /// /// # Examples /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let numbers = SkipMap::new(); /// numbers.insert(6, "six"); @@ -310,7 +332,7 @@ where /// println!("{} is {}", number, number_str); /// } /// ``` - pub fn iter(&self) -> Iter<'_, K, V> { + pub fn iter(&self) -> Iter<'_, K, V, C> { Iter { inner: self.inner.ref_iter(), } @@ -323,7 +345,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let numbers = SkipMap::new(); /// numbers.insert(6, "six"); @@ -337,10 +359,10 @@ where /// println!("{} is {}", number, number_str); /// } /// ``` - pub fn range(&self, range: R) -> Range<'_, Q, R, K, V> + pub fn range(&self, range: R) -> Range<'_, Q, R, K, V, C> where R: RangeBounds, - K: Comparable, + Q: ?Sized, { Range { @@ -349,10 +371,11 @@ where } } -impl SkipMap +impl SkipMap where - K: Ord + Send + 'static, + K: Send + 'static, V: Send + 'static, + C: Comparator + Send + 'static, { /// Inserts a `key`-`value` pair into the map and returns the new entry. /// @@ -364,14 +387,14 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let map = SkipMap::new(); /// map.insert("key", "value"); /// /// assert_eq!(*map.get("key").unwrap().value(), "value"); /// ``` - pub fn insert(&self, key: K, value: V) -> Entry<'_, K, V> { + pub fn insert(&self, key: K, value: V) -> Entry<'_, K, V, C> { let guard = &epoch::pin(); Entry::new(self.inner.insert(key, value, guard)) } @@ -387,7 +410,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let map = SkipMap::new(); /// map.insert("key", 1); @@ -398,7 +421,7 @@ where /// map.compare_insert("absent_key", 0, |_| false); /// assert_eq!(*map.get("absent_key").unwrap().value(), 0); /// ``` - pub fn compare_insert(&self, key: K, value: V, compare_fn: F) -> Entry<'_, K, V> + pub fn compare_insert(&self, key: K, value: V, compare_fn: F) -> Entry<'_, K, V, C> where F: Fn(&V) -> bool, { @@ -416,7 +439,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let map: SkipMap<&str, &str> = SkipMap::new(); /// assert!(map.remove("invalid key").is_none()); @@ -424,9 +447,9 @@ where /// map.insert("key", "value"); /// assert_eq!(*map.remove("key").unwrap().value(), "value"); /// ``` - pub fn remove(&self, key: &Q) -> Option> + pub fn remove(&self, key: &Q) -> Option> where - K: Comparable, + C: QueryComparator, Q: ?Sized, { let guard = &epoch::pin(); @@ -441,7 +464,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let numbers = SkipMap::new(); /// numbers.insert(6, "six"); @@ -455,7 +478,7 @@ where /// // All entries have been removed now. /// assert!(numbers.is_empty()); /// ``` - pub fn pop_front(&self) -> Option> { + pub fn pop_front(&self) -> Option> { let guard = &epoch::pin(); self.inner.pop_front(guard).map(Entry::new) } @@ -468,7 +491,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let numbers = SkipMap::new(); /// numbers.insert(6, "six"); @@ -482,7 +505,7 @@ where /// // All entries have been removed now. /// assert!(numbers.is_empty()); /// ``` - pub fn pop_back(&self) -> Option> { + pub fn pop_back(&self) -> Option> { let guard = &epoch::pin(); self.inner.pop_back(guard).map(Entry::new) } @@ -491,7 +514,7 @@ where /// /// # Example /// ``` - /// use crossbeam_skiplist::SkipMap; + /// use crossbeam_skiplist_fd::SkipMap; /// /// let people = SkipMap::new(); /// people.insert("Bill", "Gates"); @@ -506,15 +529,15 @@ where } } -impl Default for SkipMap { +impl Default for SkipMap { fn default() -> Self { - Self::new() + Self::with_comparator(C::default()) } } -impl fmt::Debug for SkipMap +impl fmt::Debug for SkipMap where - K: Ord + fmt::Debug, + K: fmt::Debug, V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -522,7 +545,7 @@ where } } -impl IntoIterator for SkipMap { +impl IntoIterator for SkipMap { type Item = (K, V); type IntoIter = IntoIter; @@ -533,27 +556,27 @@ impl IntoIterator for SkipMap { } } -impl<'a, K, V> IntoIterator for &'a SkipMap +impl<'a, K, V, C> IntoIterator for &'a SkipMap where - K: Ord, + C: Comparator, { - type Item = Entry<'a, K, V>; - type IntoIter = Iter<'a, K, V>; + type Item = Entry<'a, K, V, C>; + type IntoIter = Iter<'a, K, V, C>; - fn into_iter(self) -> Iter<'a, K, V> { + fn into_iter(self) -> Iter<'a, K, V, C> { self.iter() } } -impl FromIterator<(K, V)> for SkipMap +impl FromIterator<(K, V)> for SkipMap where - K: Ord, + C: Comparator + Default, { fn from_iter(iter: I) -> Self where I: IntoIterator, { - let s = Self::new(); + let s = Self::with_comparator(C::default()); for (k, v) in iter { s.get_or_insert(k, v); } @@ -562,12 +585,12 @@ where } /// A reference-counted entry in a map. -pub struct Entry<'a, K, V> { - inner: ManuallyDrop>, +pub struct Entry<'a, K, V, C> { + inner: ManuallyDrop>, } -impl<'a, K, V> Entry<'a, K, V> { - fn new(inner: base::RefEntry<'a, K, V>) -> Self { +impl<'a, K, V, C> Entry<'a, K, V, C> { + fn new(inner: base::RefEntry<'a, K, V, C>) -> Self { Self { inner: ManuallyDrop::new(inner), } @@ -583,13 +606,18 @@ impl<'a, K, V> Entry<'a, K, V> { self.inner.value() } + /// Returns a reference to the comparator. + pub fn comparator(&self) -> &'a C { + self.inner.comparator() + } + /// Returns `true` if the entry is removed from the map. pub fn is_removed(&self) -> bool { self.inner.is_removed() } } -impl Drop for Entry<'_, K, V> { +impl Drop for Entry<'_, K, V, C> { fn drop(&mut self) { unsafe { ManuallyDrop::into_inner(ptr::read(&self.inner)).release_with_pin(epoch::pin); @@ -597,9 +625,9 @@ impl Drop for Entry<'_, K, V> { } } -impl<'a, K, V> Entry<'a, K, V> +impl<'a, K, V, C> Entry<'a, K, V, C> where - K: Ord, + C: Comparator, { /// Moves to the next entry in the map. pub fn move_next(&mut self) -> bool { @@ -614,22 +642,23 @@ where } /// Returns the next entry in the map. - pub fn next(&self) -> Option> { + pub fn next(&self) -> Option> { let guard = &epoch::pin(); self.inner.next(guard).map(Entry::new) } /// Returns the previous entry in the map. - pub fn prev(&self) -> Option> { + pub fn prev(&self) -> Option> { let guard = &epoch::pin(); self.inner.prev(guard).map(Entry::new) } } -impl Entry<'_, K, V> +impl Entry<'_, K, V, C> where - K: Ord + Send + 'static, + K: Send + 'static, V: Send + 'static, + C: Comparator + Send + 'static, { /// Removes the entry from the map. /// @@ -640,7 +669,7 @@ where } } -impl Clone for Entry<'_, K, V> { +impl Clone for Entry<'_, K, V, C> { fn clone(&self) -> Self { Self { inner: self.inner.clone(), @@ -648,7 +677,7 @@ impl Clone for Entry<'_, K, V> { } } -impl fmt::Debug for Entry<'_, K, V> +impl fmt::Debug for Entry<'_, K, V, C> where K: fmt::Debug, V: fmt::Debug, @@ -681,39 +710,39 @@ impl fmt::Debug for IntoIter { } /// An iterator over the entries of a `SkipMap`. -pub struct Iter<'a, K, V> { - inner: base::RefIter<'a, K, V>, +pub struct Iter<'a, K, V, C> { + inner: base::RefIter<'a, K, V, C>, } -impl<'a, K, V> Iterator for Iter<'a, K, V> +impl<'a, K, V, C> Iterator for Iter<'a, K, V, C> where - K: Ord, + C: Comparator, { - type Item = Entry<'a, K, V>; + type Item = Entry<'a, K, V, C>; - fn next(&mut self) -> Option> { + fn next(&mut self) -> Option> { let guard = &epoch::pin(); self.inner.next(guard).map(Entry::new) } } -impl<'a, K, V> DoubleEndedIterator for Iter<'a, K, V> +impl<'a, K, V, C> DoubleEndedIterator for Iter<'a, K, V, C> where - K: Ord, + C: Comparator, { - fn next_back(&mut self) -> Option> { + fn next_back(&mut self) -> Option> { let guard = &epoch::pin(); self.inner.next_back(guard).map(Entry::new) } } -impl fmt::Debug for Iter<'_, K, V> { +impl fmt::Debug for Iter<'_, K, V, C> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("Iter { .. }") } } -impl Drop for Iter<'_, K, V> { +impl Drop for Iter<'_, K, V, C> { fn drop(&mut self) { let guard = &epoch::pin(); self.inner.drop_impl(guard); @@ -721,44 +750,43 @@ impl Drop for Iter<'_, K, V> { } /// An iterator over a subset of entries of a `SkipMap`. -pub struct Range<'a, Q, R, K, V> +pub struct Range<'a, Q, R, K, V, C> where - K: Ord + Comparable, R: RangeBounds, Q: ?Sized, { - pub(crate) inner: base::RefRange<'a, Q, R, K, V>, + pub(crate) inner: base::RefRange<'a, Q, R, K, V, C>, } -impl<'a, Q, R, K, V> Iterator for Range<'a, Q, R, K, V> +impl<'a, Q, R, K, V, C> Iterator for Range<'a, Q, R, K, V, C> where - K: Ord + Comparable, R: RangeBounds, Q: ?Sized, + C: QueryComparator, { - type Item = Entry<'a, K, V>; + type Item = Entry<'a, K, V, C>; - fn next(&mut self) -> Option> { + fn next(&mut self) -> Option> { let guard = &epoch::pin(); self.inner.next(guard).map(Entry::new) } } -impl<'a, Q, R, K, V> DoubleEndedIterator for Range<'a, Q, R, K, V> +impl<'a, Q, R, K, V, C> DoubleEndedIterator for Range<'a, Q, R, K, V, C> where - K: Ord + Comparable, R: RangeBounds, Q: ?Sized, + C: QueryComparator, { - fn next_back(&mut self) -> Option> { + fn next_back(&mut self) -> Option> { let guard = &epoch::pin(); self.inner.next_back(guard).map(Entry::new) } } -impl fmt::Debug for Range<'_, Q, R, K, V> +impl fmt::Debug for Range<'_, Q, R, K, V, C> where - K: Ord + fmt::Debug + Comparable, + K: fmt::Debug, V: fmt::Debug, R: RangeBounds + fmt::Debug, Q: ?Sized, @@ -772,9 +800,8 @@ where } } -impl Drop for Range<'_, Q, R, K, V> +impl Drop for Range<'_, Q, R, K, V, C> where - K: Ord + Comparable, R: RangeBounds, Q: ?Sized, { diff --git a/crossbeam-skiplist/src/set.rs b/src/set.rs similarity index 73% rename from crossbeam-skiplist/src/set.rs rename to src/set.rs index 96ac46e96..478ebe32b 100644 --- a/crossbeam-skiplist/src/set.rs +++ b/src/set.rs @@ -4,7 +4,9 @@ use std::fmt; use std::ops::Deref; use std::ops::{Bound, RangeBounds}; -use crate::{equivalent::Comparable, map}; +use dbutils::equivalentor::{Ascend, Comparator, QueryComparator}; + +use crate::map; /// A set based on a lock-free skip list. /// @@ -12,8 +14,8 @@ use crate::{equivalent::Comparable, map}; /// concurrent access across multiple threads. /// /// [`BTreeSet`]: std::collections::BTreeSet -pub struct SkipSet { - inner: map::SkipMap, +pub struct SkipSet { + inner: map::SkipMap, } impl SkipSet { @@ -22,7 +24,7 @@ impl SkipSet { /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set: SkipSet = SkipSet::new(); /// ``` @@ -31,13 +33,30 @@ impl SkipSet { inner: map::SkipMap::new(), } } +} + +impl SkipSet { + /// Returns a new, empty set. + /// + /// # Example + /// + /// ``` + /// use crossbeam_skiplist_fd::{SkipSet, Descend}; + /// + /// let set: SkipSet = SkipSet::with_comparator(Descend); + /// ``` + pub fn with_comparator(cmp: C) -> Self { + Self { + inner: map::SkipMap::with_comparator(cmp), + } + } /// Returns `true` if the set is empty. /// /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set = SkipSet::new(); /// assert!(set.is_empty()); @@ -57,7 +76,7 @@ impl SkipSet { /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set = SkipSet::new(); /// assert_eq!(set.len(), 0); @@ -70,16 +89,13 @@ impl SkipSet { } } -impl SkipSet -where - T: Ord, -{ +impl SkipSet { /// Returns the entry with the smallest key. /// /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set = SkipSet::new(); /// set.insert(1); @@ -87,7 +103,10 @@ where /// set.insert(2); /// assert_eq!(*set.front().unwrap(), 1); /// ``` - pub fn front(&self) -> Option> { + pub fn front(&self) -> Option> + where + C: Comparator, + { self.inner.front().map(Entry::new) } @@ -96,7 +115,7 @@ where /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set = SkipSet::new(); /// set.insert(1); @@ -104,7 +123,10 @@ where /// set.insert(2); /// assert_eq!(*set.back().unwrap(), 2); /// ``` - pub fn back(&self) -> Option> { + pub fn back(&self) -> Option> + where + C: Comparator, + { self.inner.back().map(Entry::new) } @@ -113,7 +135,7 @@ where /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set: SkipSet<_> = (1..=3).collect(); /// assert!(set.contains(&1)); @@ -121,7 +143,7 @@ where /// ``` pub fn contains(&self, key: &Q) -> bool where - T: Comparable, + C: QueryComparator, Q: ?Sized, { self.inner.contains_key(key) @@ -132,15 +154,15 @@ where /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set: SkipSet<_> = (1..=3).collect(); /// assert_eq!(*set.get(&3).unwrap(), 3); /// assert!(set.get(&4).is_none()); /// ``` - pub fn get(&self, key: &Q) -> Option> + pub fn get(&self, key: &Q) -> Option> where - T: Comparable, + C: QueryComparator, Q: ?Sized, { self.inner.get(key).map(Entry::new) @@ -153,7 +175,7 @@ where /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// use std::ops::Bound::*; /// /// let set = SkipSet::new(); @@ -170,9 +192,9 @@ where /// let greater_than_thirteen = set.lower_bound(Excluded(&13)); /// assert!(greater_than_thirteen.is_none()); /// ``` - pub fn lower_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> + pub fn lower_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> where - T: Comparable, + C: QueryComparator, Q: ?Sized, { self.inner.lower_bound(bound).map(Entry::new) @@ -185,7 +207,7 @@ where /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// use std::ops::Bound::*; /// /// let set = SkipSet::new(); @@ -199,9 +221,9 @@ where /// let less_than_six = set.upper_bound(Excluded(&6)); /// assert!(less_than_six.is_none()); /// ``` - pub fn upper_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> + pub fn upper_bound<'a, Q>(&'a self, bound: Bound<&Q>) -> Option> where - T: Comparable, + C: QueryComparator, Q: ?Sized, { self.inner.upper_bound(bound).map(Entry::new) @@ -212,13 +234,16 @@ where /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set = SkipSet::new(); /// let entry = set.get_or_insert(2); /// assert_eq!(*entry, 2); /// ``` - pub fn get_or_insert(&self, key: T) -> Entry<'_, T> { + pub fn get_or_insert(&self, key: T) -> Entry<'_, T, C> + where + C: Comparator, + { Entry::new(self.inner.get_or_insert(key, ())) } @@ -227,7 +252,7 @@ where /// # Examples /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set = SkipSet::new(); /// set.insert(6); @@ -240,7 +265,7 @@ where /// assert_eq!(*set_iter.next().unwrap(), 12); /// assert!(set_iter.next().is_none()); /// ``` - pub fn iter(&self) -> Iter<'_, T> { + pub fn iter(&self) -> Iter<'_, T, C> { Iter { inner: self.inner.iter(), } @@ -251,7 +276,7 @@ where /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set = SkipSet::new(); /// set.insert(6); @@ -263,10 +288,9 @@ where /// assert_eq!(*set_range.next().unwrap(), 7); /// assert!(set_range.next().is_none()); /// ``` - pub fn range(&self, range: R) -> Range<'_, Q, R, T> + pub fn range(&self, range: R) -> Range<'_, Q, R, T, C> where R: RangeBounds, - T: Comparable, Q: ?Sized, { Range { @@ -275,9 +299,10 @@ where } } -impl SkipSet +impl SkipSet where - T: Ord + Send + 'static, + T: Send + 'static, + C: Comparator + Send + 'static, { /// Inserts a `key`-`value` pair into the set and returns the new entry. /// @@ -287,13 +312,13 @@ where /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set = SkipSet::new(); /// set.insert(2); /// assert_eq!(*set.get(&2).unwrap(), 2); /// ``` - pub fn insert(&self, key: T) -> Entry<'_, T> { + pub fn insert(&self, key: T) -> Entry<'_, T, C> { Entry::new(self.inner.insert(key, ())) } @@ -305,17 +330,17 @@ where /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set = SkipSet::new(); /// set.insert(2); /// assert_eq!(*set.remove(&2).unwrap(), 2); /// assert!(set.remove(&2).is_none()); /// ``` - pub fn remove(&self, key: &Q) -> Option> + pub fn remove(&self, key: &Q) -> Option> where - T: Comparable, Q: ?Sized, + C: QueryComparator, { self.inner.remove(key).map(Entry::new) } @@ -329,7 +354,7 @@ where /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set = SkipSet::new(); /// set.insert(1); @@ -341,7 +366,10 @@ where /// // All entries have been removed now. /// assert!(set.is_empty()); /// ``` - pub fn pop_front(&self) -> Option> { + pub fn pop_front(&self) -> Option> + where + C: Comparator, + { self.inner.pop_front().map(Entry::new) } @@ -354,7 +382,7 @@ where /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set = SkipSet::new(); /// set.insert(1); @@ -366,7 +394,7 @@ where /// // All entries have been removed now. /// assert!(set.is_empty()); /// ``` - pub fn pop_back(&self) -> Option> { + pub fn pop_back(&self) -> Option> { self.inner.pop_back().map(Entry::new) } @@ -375,7 +403,7 @@ where /// # Example /// /// ``` - /// use crossbeam_skiplist::SkipSet; + /// use crossbeam_skiplist_fd::SkipSet; /// /// let set = SkipSet::new(); /// set.insert(1); @@ -397,7 +425,7 @@ impl Default for SkipSet { impl fmt::Debug for SkipSet where - T: Ord + fmt::Debug, + T: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("SkipSet { .. }") @@ -415,27 +443,27 @@ impl IntoIterator for SkipSet { } } -impl<'a, T> IntoIterator for &'a SkipSet +impl<'a, T, C> IntoIterator for &'a SkipSet where - T: Ord, + C: Comparator, { - type Item = Entry<'a, T>; - type IntoIter = Iter<'a, T>; + type Item = Entry<'a, T, C>; + type IntoIter = Iter<'a, T, C>; - fn into_iter(self) -> Iter<'a, T> { + fn into_iter(self) -> Iter<'a, T, C> { self.iter() } } -impl FromIterator for SkipSet +impl FromIterator for SkipSet where - T: Ord, + C: Comparator + Default, { fn from_iter(iter: I) -> Self where I: IntoIterator, { - let s = Self::new(); + let s = Self::with_comparator(C::default()); for t in iter { s.get_or_insert(t); } @@ -444,12 +472,12 @@ where } /// A reference-counted entry in a set. -pub struct Entry<'a, T> { - inner: map::Entry<'a, T, ()>, +pub struct Entry<'a, T, C> { + inner: map::Entry<'a, T, (), C>, } -impl<'a, T> Entry<'a, T> { - fn new(inner: map::Entry<'a, T, ()>) -> Self { +impl<'a, T, C> Entry<'a, T, C> { + fn new(inner: map::Entry<'a, T, (), C>) -> Self { Self { inner } } @@ -458,15 +486,20 @@ impl<'a, T> Entry<'a, T> { self.inner.key() } + /// Returns a reference to the comparator. + pub fn comparator(&self) -> &C { + self.inner.comparator() + } + /// Returns `true` if the entry is removed from the set. pub fn is_removed(&self) -> bool { self.inner.is_removed() } } -impl<'a, T> Entry<'a, T> +impl<'a, T, C> Entry<'a, T, C> where - T: Ord, + C: Comparator, { /// Moves to the next entry in the set. pub fn move_next(&mut self) -> bool { @@ -479,19 +512,20 @@ where } /// Returns the next entry in the set. - pub fn next(&self) -> Option> { + pub fn next(&self) -> Option> { self.inner.next().map(Entry::new) } /// Returns the previous entry in the set. - pub fn prev(&self) -> Option> { + pub fn prev(&self) -> Option> { self.inner.prev().map(Entry::new) } } -impl Entry<'_, T> +impl Entry<'_, T, C> where - T: Ord + Send + 'static, + T: Send + 'static, + C: Comparator + Send + 'static, { /// Removes the entry from the set. /// @@ -501,7 +535,7 @@ where } } -impl Clone for Entry<'_, T> { +impl Clone for Entry<'_, T, C> { fn clone(&self) -> Self { Self { inner: self.inner.clone(), @@ -509,7 +543,7 @@ impl Clone for Entry<'_, T> { } } -impl fmt::Debug for Entry<'_, T> +impl fmt::Debug for Entry<'_, T, C> where T: fmt::Debug, { @@ -520,7 +554,7 @@ where } } -impl Deref for Entry<'_, T> { +impl Deref for Entry<'_, T, C> { type Target = T; fn deref(&self) -> &Self::Target { @@ -548,73 +582,72 @@ impl fmt::Debug for IntoIter { } /// An iterator over the entries of a `SkipSet`. -pub struct Iter<'a, T> { - inner: map::Iter<'a, T, ()>, +pub struct Iter<'a, T, C> { + inner: map::Iter<'a, T, (), C>, } -impl<'a, T> Iterator for Iter<'a, T> +impl<'a, T, C> Iterator for Iter<'a, T, C> where - T: Ord, + C: Comparator, { - type Item = Entry<'a, T>; + type Item = Entry<'a, T, C>; - fn next(&mut self) -> Option> { + fn next(&mut self) -> Option> { self.inner.next().map(Entry::new) } } -impl<'a, T> DoubleEndedIterator for Iter<'a, T> +impl<'a, T, C> DoubleEndedIterator for Iter<'a, T, C> where - T: Ord, + C: Comparator, { - fn next_back(&mut self) -> Option> { + fn next_back(&mut self) -> Option> { self.inner.next_back().map(Entry::new) } } -impl fmt::Debug for Iter<'_, T> { +impl fmt::Debug for Iter<'_, T, C> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("Iter { .. }") } } /// An iterator over a subset of entries of a `SkipSet`. -pub struct Range<'a, Q, R, T> +pub struct Range<'a, Q, R, T, C> where - T: Ord + Comparable, R: RangeBounds, Q: ?Sized, { - inner: map::Range<'a, Q, R, T, ()>, + inner: map::Range<'a, Q, R, T, (), C>, } -impl<'a, Q, R, T> Iterator for Range<'a, Q, R, T> +impl<'a, Q, R, T, C> Iterator for Range<'a, Q, R, T, C> where - T: Ord + Comparable, R: RangeBounds, Q: ?Sized, + C: QueryComparator, { - type Item = Entry<'a, T>; + type Item = Entry<'a, T, C>; - fn next(&mut self) -> Option> { + fn next(&mut self) -> Option> { self.inner.next().map(Entry::new) } } -impl<'a, Q, R, T> DoubleEndedIterator for Range<'a, Q, R, T> +impl<'a, Q, R, T, C> DoubleEndedIterator for Range<'a, Q, R, T, C> where - T: Ord + Comparable, R: RangeBounds, Q: ?Sized, + C: QueryComparator, { - fn next_back(&mut self) -> Option> { + fn next_back(&mut self) -> Option> { self.inner.next_back().map(Entry::new) } } -impl fmt::Debug for Range<'_, Q, R, T> +impl fmt::Debug for Range<'_, Q, R, T, C> where - T: Ord + Comparable + fmt::Debug, + T: fmt::Debug, R: RangeBounds + fmt::Debug, Q: ?Sized, { diff --git a/crossbeam-skiplist/tests/base.rs b/tests/base.rs similarity index 98% rename from crossbeam-skiplist/tests/base.rs rename to tests/base.rs index d12b7fde7..3d0176610 100644 --- a/crossbeam-skiplist/tests/base.rs +++ b/tests/base.rs @@ -4,18 +4,18 @@ use std::ops::Bound; use std::sync::atomic::{AtomicUsize, Ordering}; use crossbeam_epoch as epoch; -use crossbeam_skiplist::{base, SkipList}; +use crossbeam_skiplist_fd::{base, SkipList}; -fn ref_entry<'a, K, V>(e: impl Into>>) -> Entry<'a, K, V> { +fn ref_entry<'a, K, V, C>(e: impl Into>>) -> Entry<'a, K, V, C> { Entry(e.into()) } -struct Entry<'a, K, V>(Option>); -impl Entry<'_, K, V> { +struct Entry<'a, K, V, C>(Option>); +impl Entry<'_, K, V, C> { fn value(&self) -> &V { self.0.as_ref().unwrap().value() } } -impl Drop for Entry<'_, K, V> { +impl Drop for Entry<'_, K, V, C> { fn drop(&mut self) { if let Some(e) = self.0.take() { e.release_with_pin(epoch::pin) @@ -903,7 +903,7 @@ fn drops() { #[test] fn comparable_get() { - use crossbeam_skiplist::equivalent::{Comparable, Equivalent}; + use crossbeam_skiplist_fd::equivalentor::{Comparable, Equivalent}; #[derive(PartialEq, Eq, PartialOrd, Ord)] struct Foo { diff --git a/crossbeam-skiplist/tests/map.rs b/tests/map.rs similarity index 99% rename from crossbeam-skiplist/tests/map.rs rename to tests/map.rs index 3b734f110..cef0afd3f 100644 --- a/crossbeam-skiplist/tests/map.rs +++ b/tests/map.rs @@ -2,7 +2,7 @@ use std::iter; use std::ops::Bound; use std::sync::{Arc, Barrier}; -use crossbeam_skiplist::SkipMap; +use crossbeam_skiplist_fd::SkipMap; use crossbeam_utils::thread; #[test] diff --git a/crossbeam-skiplist/tests/set.rs b/tests/set.rs similarity index 99% rename from crossbeam-skiplist/tests/set.rs rename to tests/set.rs index 65003798b..34b9d1a8e 100644 --- a/crossbeam-skiplist/tests/set.rs +++ b/tests/set.rs @@ -1,4 +1,4 @@ -use crossbeam_skiplist::SkipSet; +use crossbeam_skiplist_fd::SkipSet; use crossbeam_utils::thread; use std::{ iter, diff --git a/tests/subcrates.rs b/tests/subcrates.rs deleted file mode 100644 index 21b99fb0e..000000000 --- a/tests/subcrates.rs +++ /dev/null @@ -1,47 +0,0 @@ -//! Makes sure subcrates are properly re-exported. - -use crossbeam::select; - -#[test] -fn channel() { - let (s, r) = crossbeam::channel::bounded(1); - - select! { - send(s, 0) -> res => res.unwrap(), - recv(r) -> res => assert!(res.is_ok()), - } -} - -#[test] -fn deque() { - let w = crossbeam::deque::Worker::new_fifo(); - w.push(1); - let _ = w.pop(); -} - -#[test] -fn epoch() { - crossbeam::epoch::pin(); -} - -#[test] -fn queue() { - let a = crossbeam::queue::ArrayQueue::new(10); - let _ = a.push(1); - let _ = a.pop(); -} - -#[test] -fn utils() { - crossbeam::utils::CachePadded::new(7); - - crossbeam::scope(|scope| { - scope.spawn(|_| ()); - }) - .unwrap(); - - crossbeam::thread::scope(|scope| { - scope.spawn(|_| ()); - }) - .unwrap(); -} diff --git a/tools/publish.sh b/tools/publish.sh deleted file mode 100755 index 814b3beb5..000000000 --- a/tools/publish.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash -set -euo pipefail -IFS=$'\n\t' -cd "$(dirname "$0")"/.. - -# Publish a new release. -# -# USAGE: -# ./tools/publish.sh - -bail() { - echo >&2 "error: $*" - exit 1 -} - -crate="${1:?}" -version="${2:?}" -version="${version#v}" -tag="${crate}-${version}" -if [[ ! "${version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[0-9A-Za-z\.-]+)?(\+[0-9A-Za-z\.-]+)?$ ]]; then - bail "invalid version format '${version}'" -fi -if [[ $# -gt 2 ]]; then - bail "invalid argument '$3'" -fi - -# Make sure there is no uncommitted change. -git diff --exit-code -git diff --exit-code --staged - -# Make sure the same release has not been created in the past. -if gh release view "${tag}" &>/dev/null; then - bail "tag '${tag}' has already been created and pushed" -fi - -if ! git branch | grep -q '\* master'; then - bail "current branch is not 'master'" -fi - -git tag "${tag}" - -( - if [[ "${crate}" != "crossbeam" ]]; then - cd "${crate}" - fi - cargo +stable publish -) - -git push origin --tags