aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2021-04-22 12:22:19 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2021-04-22 12:22:19 +0000
commit74012c187da02987ad4ab8f712cd5e4d6095f82b (patch)
treec39809fba43deb9ee717b07b2afaeb71ef36ca59
parent19afdb1c562c4b78ca760537bacc589608fde0ab (diff)
parent0584d80c843a5d157eac7d26444117011df46350 (diff)
downloadtokio-74012c187da02987ad4ab8f712cd5e4d6095f82b.tar.gz
Update to 1.50.0 am: 28f5548b7b am: 0a0b41a09b am: 0584d80c84
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/tokio/+/1682012 Change-Id: Iaf32292e11352e344fc82afa92d98b8c68830979
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--Android.bp842
-rw-r--r--CHANGELOG.md130
-rw-r--r--Cargo.toml8
-rw-r--r--Cargo.toml.orig9
-rw-r--r--LICENSE2
-rw-r--r--METADATA8
-rw-r--r--TEST_MAPPING202
-rw-r--r--patches/Android.bp.patch159
-rw-r--r--src/coop.rs81
-rw-r--r--src/io/async_fd.rs26
-rw-r--r--src/io/async_read.rs6
-rw-r--r--src/io/driver/interest.rs2
-rw-r--r--src/io/driver/registration.rs7
-rw-r--r--src/io/driver/scheduled_io.rs4
-rw-r--r--src/io/mod.rs2
-rw-r--r--src/io/poll_evented.rs5
-rw-r--r--src/io/util/async_buf_read_ext.rs2
-rw-r--r--src/io/util/async_read_ext.rs2
-rw-r--r--src/io/util/async_seek_ext.rs11
-rw-r--r--src/io/util/async_write_ext.rs45
-rw-r--r--src/io/util/copy.rs128
-rw-r--r--src/io/util/copy_bidirectional.rs119
-rw-r--r--src/io/util/mod.rs6
-rw-r--r--src/io/util/write_vectored.rs47
-rw-r--r--src/lib.rs5
-rw-r--r--src/macros/cfg.rs18
-rw-r--r--src/macros/pin.rs2
-rw-r--r--src/macros/select.rs121
-rw-r--r--src/net/tcp/listener.rs43
-rw-r--r--src/net/tcp/socket.rs11
-rw-r--r--src/net/tcp/stream.rs11
-rw-r--r--src/net/udp.rs52
-rw-r--r--src/net/unix/datagram/socket.rs32
-rw-r--r--src/net/unix/listener.rs31
-rw-r--r--src/net/unix/stream.rs47
-rw-r--r--src/park/mod.rs3
-rw-r--r--src/process/mod.rs4
-rw-r--r--src/process/unix/driver.rs60
-rw-r--r--src/process/unix/reap.rs17
-rw-r--r--src/runtime/basic_scheduler.rs122
-rw-r--r--src/runtime/builder.rs30
-rw-r--r--src/runtime/context.rs10
-rw-r--r--src/runtime/handle.rs88
-rw-r--r--src/runtime/mod.rs6
-rw-r--r--src/runtime/queue.rs2
-rw-r--r--src/runtime/tests/loom_basic_scheduler.rs82
-rw-r--r--src/runtime/tests/mod.rs1
-rw-r--r--src/runtime/thread_pool/worker.rs2
-rw-r--r--src/signal/mod.rs40
-rw-r--r--src/signal/registry.rs131
-rw-r--r--src/signal/reusable_box.rs227
-rw-r--r--src/signal/unix.rs56
-rw-r--r--src/signal/windows.rs24
-rw-r--r--src/sync/barrier.rs24
-rw-r--r--src/sync/batch_semaphore.rs1
-rw-r--r--src/sync/mod.rs29
-rw-r--r--src/sync/mpsc/block.rs4
-rw-r--r--src/sync/mpsc/bounded.rs74
-rw-r--r--src/sync/mpsc/chan.rs29
-rw-r--r--src/sync/mpsc/error.rs33
-rw-r--r--src/sync/mpsc/unbounded.rs16
-rw-r--r--src/sync/mutex.rs43
-rw-r--r--src/sync/notify.rs34
-rw-r--r--src/sync/once_cell.rs400
-rw-r--r--src/sync/oneshot.rs115
-rw-r--r--src/sync/rwlock.rs601
-rw-r--r--src/sync/rwlock/owned_read_guard.rs149
-rw-r--r--src/sync/rwlock/owned_write_guard.rs234
-rw-r--r--src/sync/rwlock/owned_write_guard_mapped.rs171
-rw-r--r--src/sync/rwlock/read_guard.rs156
-rw-r--r--src/sync/rwlock/write_guard.rs240
-rw-r--r--src/sync/rwlock/write_guard_mapped.rs176
-rw-r--r--src/sync/semaphore.rs60
-rw-r--r--src/sync/tests/loom_rwlock.rs10
-rw-r--r--src/sync/watch.rs33
-rw-r--r--src/task/local.rs2
-rw-r--r--src/task/mod.rs58
-rw-r--r--src/task/task_local.rs47
-rw-r--r--src/task/unconstrained.rs43
-rw-r--r--src/time/driver/entry.rs4
-rw-r--r--src/time/driver/handle.rs23
-rw-r--r--src/time/driver/mod.rs71
-rw-r--r--src/time/driver/sleep.rs17
-rw-r--r--src/time/driver/tests/mod.rs14
-rw-r--r--src/time/instant.rs8
-rw-r--r--src/time/mod.rs20
-rw-r--r--src/time/timeout.rs6
-rw-r--r--src/util/error.rs6
-rw-r--r--src/util/linked_list.rs153
-rw-r--r--src/util/wake.rs2
-rw-r--r--tests/async_send_sync.rs25
-rw-r--r--tests/io_copy_bidirectional.rs128
-rw-r--r--tests/macros_select.rs59
-rw-r--r--tests/rt_common.rs26
-rw-r--r--tests/rt_handle_block_on.rs511
-rw-r--r--tests/sync_notify.rs17
-rw-r--r--tests/sync_once_cell.rs268
-rw-r--r--tests/sync_oneshot.rs22
-rw-r--r--tests/sync_rwlock.rs2
-rw-r--r--tests/sync_semaphore_owned.rs31
-rw-r--r--tests/task_abort.rs67
-rw-r--r--tests/time_timeout.rs27
103 files changed, 5432 insertions, 1990 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 9e33e70..b75e29b 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,5 @@
{
"git": {
- "sha1": "572a897d43d5e4942f26b7a67bed862d642679e4"
+ "sha1": "a5ee2f0d3d78daa01e2c6c12d22b82474dc5c32a"
}
}
diff --git a/Android.bp b/Android.bp
index 037a710..b654899 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1,8 +1,6 @@
-// This file is generated by cargo2android.py --device --run --features io-util,macros,rt-multi-thread,sync,net,fs,time --tests --patch=patches/Android.bp.patch.
+// This file is generated by cargo2android.py --device --run --features io-util,macros,rt-multi-thread,sync,net,fs,time --tests.
// Do not modify this file as changes will be overridden on upgrade.
-
-
package {
default_applicable_licenses: ["external_rust_crates_tokio_license"],
}
@@ -42,9 +40,7 @@ rust_library {
"time",
"tokio-macros",
],
- flags: [
- "--cfg tokio_track_caller",
- ],
+ cfgs: ["tokio_track_caller"],
rustlibs: [
"libbytes",
"liblibc",
@@ -59,49 +55,6 @@ rust_library {
rust_defaults {
name: "tokio_defaults",
crate_name: "tokio",
- srcs: ["src/lib.rs"],
- test_suites: ["general-tests"],
- auto_gen_config: true,
- edition: "2018",
- features: [
- "bytes",
- "fs",
- "io-util",
- "libc",
- "macros",
- "memchr",
- "mio",
- "net",
- "num_cpus",
- "rt",
- "rt-multi-thread",
- "sync",
- "time",
- "tokio-macros",
- ],
- flags: [
- "--cfg tokio_track_caller",
- ],
- rustlibs: [
- "libasync_stream",
- "libbytes",
- "libfutures",
- "liblibc",
- "libmemchr",
- "libmio",
- "libnix",
- "libnum_cpus",
- "libpin_project_lite",
- "librand",
- "libtokio_stream",
- "libtokio_test",
- ],
- proc_macros: ["libtokio_macros"],
-}
-
-rust_defaults {
- name: "tokio_defaults_tokio",
- crate_name: "tokio",
test_suites: ["general-tests"],
auto_gen_config: true,
edition: "2018",
@@ -121,9 +74,7 @@ rust_defaults {
"time",
"tokio-macros",
],
- flags: [
- "--cfg tokio_track_caller",
- ],
+ cfgs: ["tokio_track_caller"],
rustlibs: [
"libasync_stream",
"libbytes",
@@ -143,23 +94,8 @@ rust_defaults {
}
rust_test_host {
- name: "tokio_host_test_tests_async_send_sync",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/async_send_sync.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_async_send_sync",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/async_send_sync.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_buffered",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/buffered.rs"],
test_options: {
unit_test: true,
@@ -168,58 +104,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_buffered",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/buffered.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_fs",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/fs.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_fs",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/fs.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_fs_copy",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/fs_copy.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_fs_copy",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/fs_copy.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_fs_dir",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/fs_dir.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_fs_dir",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/fs_dir.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_fs_file",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/fs_file.rs"],
test_options: {
unit_test: true,
@@ -228,28 +119,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_fs_file",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/fs_file.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_fs_file_mocked",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/fs_file_mocked.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_fs_file_mocked",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/fs_file_mocked.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_fs_link",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/fs_link.rs"],
test_options: {
unit_test: true,
@@ -258,28 +134,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_fs_link",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/fs_link.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_io_async_fd",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_async_fd.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_io_async_fd",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_async_fd.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_io_async_read",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_async_read.rs"],
test_options: {
unit_test: true,
@@ -288,73 +149,28 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_io_async_read",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_async_read.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_io_chain",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_chain.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_io_chain",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_chain.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_io_copy",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_copy.rs"],
+ name: "tokio_host_test_tests_io_copy_bidirectional",
+ defaults: ["tokio_defaults"],
+ srcs: ["tests/io_copy_bidirectional.rs"],
test_options: {
unit_test: true,
},
}
rust_test {
- name: "tokio_device_test_tests_io_copy",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_copy.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_io_driver",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_driver.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_io_driver",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_driver.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_io_driver_drop",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_driver_drop.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_io_driver_drop",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_driver_drop.rs"],
+ name: "tokio_device_test_tests_io_copy_bidirectional",
+ defaults: ["tokio_defaults"],
+ srcs: ["tests/io_copy_bidirectional.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_io_lines",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_lines.rs"],
test_options: {
unit_test: true,
@@ -363,13 +179,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_io_lines",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_lines.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_io_mem_stream",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_mem_stream.rs"],
test_options: {
unit_test: true,
@@ -378,13 +194,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_io_mem_stream",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_mem_stream.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_io_read",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_read.rs"],
test_options: {
unit_test: true,
@@ -393,13 +209,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_io_read",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_read.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_io_read_buf",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_read_buf.rs"],
test_options: {
unit_test: true,
@@ -408,43 +224,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_io_read_buf",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_read_buf.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_io_read_exact",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_read_exact.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_io_read_exact",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_read_exact.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_io_read_line",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_read_line.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_io_read_line",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_read_line.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_io_read_to_end",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_read_to_end.rs"],
test_options: {
unit_test: true,
@@ -453,58 +239,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_io_read_to_end",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_read_to_end.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_io_read_to_string",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_read_to_string.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_io_read_to_string",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_read_to_string.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_io_read_until",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_read_until.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_io_read_until",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_read_until.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_io_split",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_split.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_io_split",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/io_split.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_io_take",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_take.rs"],
test_options: {
unit_test: true,
@@ -513,13 +254,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_io_take",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_take.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_io_write",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_write.rs"],
test_options: {
unit_test: true,
@@ -528,13 +269,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_io_write",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_write.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_io_write_all",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_write_all.rs"],
test_options: {
unit_test: true,
@@ -543,13 +284,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_io_write_all",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_write_all.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_io_write_buf",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_write_buf.rs"],
test_options: {
unit_test: true,
@@ -558,13 +299,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_io_write_buf",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_write_buf.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_io_write_int",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_write_int.rs"],
test_options: {
unit_test: true,
@@ -573,13 +314,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_io_write_int",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/io_write_int.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_macros_join",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/macros_join.rs"],
test_options: {
unit_test: true,
@@ -588,103 +329,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_macros_join",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/macros_join.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_macros_pin",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/macros_pin.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_macros_pin",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/macros_pin.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_macros_select",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/macros_select.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_macros_select",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/macros_select.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_macros_test",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/macros_test.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_macros_test",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/macros_test.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_macros_try_join",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/macros_try_join.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_macros_try_join",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/macros_try_join.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_net_bind_resource",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/net_bind_resource.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_net_bind_resource",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/net_bind_resource.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_net_lookup_host",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/net_lookup_host.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_net_lookup_host",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/net_lookup_host.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_no_rt",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/no_rt.rs"],
test_options: {
unit_test: true,
@@ -693,13 +344,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_no_rt",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/no_rt.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_process_issue_2174",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/process_issue_2174.rs"],
test_options: {
unit_test: true,
@@ -708,13 +359,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_process_issue_2174",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/process_issue_2174.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_process_issue_42",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/process_issue_42.rs"],
test_options: {
unit_test: true,
@@ -723,28 +374,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_process_issue_42",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/process_issue_42.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_process_kill_on_drop",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/process_kill_on_drop.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_process_kill_on_drop",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/process_kill_on_drop.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_process_smoke",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/process_smoke.rs"],
test_options: {
unit_test: true,
@@ -753,13 +389,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_process_smoke",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/process_smoke.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_rt_basic",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/rt_basic.rs"],
test_options: {
unit_test: true,
@@ -768,28 +404,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_rt_basic",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/rt_basic.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_rt_common",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/rt_common.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_rt_common",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/rt_common.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_rt_threaded",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/rt_threaded.rs"],
test_options: {
unit_test: true,
@@ -798,13 +419,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_rt_threaded",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/rt_threaded.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_signal_ctrl_c",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_ctrl_c.rs"],
test_options: {
unit_test: true,
@@ -813,28 +434,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_signal_ctrl_c",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_ctrl_c.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_signal_drop_recv",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/signal_drop_recv.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_signal_drop_recv",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/signal_drop_recv.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_signal_drop_rt",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_drop_rt.rs"],
test_options: {
unit_test: true,
@@ -843,13 +449,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_signal_drop_rt",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_drop_rt.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_signal_drop_signal",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_drop_signal.rs"],
test_options: {
unit_test: true,
@@ -858,13 +464,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_signal_drop_signal",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_drop_signal.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_signal_multi_rt",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_multi_rt.rs"],
test_options: {
unit_test: true,
@@ -873,13 +479,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_signal_multi_rt",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_multi_rt.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_signal_no_rt",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_no_rt.rs"],
test_options: {
unit_test: true,
@@ -888,13 +494,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_signal_no_rt",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_no_rt.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_signal_notify_both",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_notify_both.rs"],
test_options: {
unit_test: true,
@@ -903,13 +509,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_signal_notify_both",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_notify_both.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_signal_twice",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_twice.rs"],
test_options: {
unit_test: true,
@@ -918,28 +524,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_signal_twice",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/signal_twice.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_signal_usr1",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/signal_usr1.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_signal_usr1",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/signal_usr1.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_sync_barrier",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_barrier.rs"],
test_options: {
unit_test: true,
@@ -948,13 +539,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_sync_barrier",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_barrier.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_sync_broadcast",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_broadcast.rs"],
test_options: {
unit_test: true,
@@ -963,13 +554,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_sync_broadcast",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_broadcast.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_sync_errors",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_errors.rs"],
test_options: {
unit_test: true,
@@ -978,13 +569,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_sync_errors",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_errors.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_sync_mpsc",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_mpsc.rs"],
test_options: {
unit_test: true,
@@ -993,28 +584,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_sync_mpsc",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_mpsc.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_sync_mutex",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/sync_mutex.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_sync_mutex",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/sync_mutex.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_sync_mutex_owned",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_mutex_owned.rs"],
test_options: {
unit_test: true,
@@ -1023,43 +599,28 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_sync_mutex_owned",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_mutex_owned.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_sync_notify",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/sync_notify.rs"],
+ name: "tokio_host_test_tests_sync_once_cell",
+ defaults: ["tokio_defaults"],
+ srcs: ["tests/sync_once_cell.rs"],
test_options: {
unit_test: true,
},
}
rust_test {
- name: "tokio_device_test_tests_sync_notify",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/sync_notify.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_sync_oneshot",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/sync_oneshot.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_sync_oneshot",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/sync_oneshot.rs"],
+ name: "tokio_device_test_tests_sync_once_cell",
+ defaults: ["tokio_defaults"],
+ srcs: ["tests/sync_once_cell.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_sync_rwlock",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_rwlock.rs"],
test_options: {
unit_test: true,
@@ -1068,43 +629,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_sync_rwlock",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_rwlock.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_sync_semaphore",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/sync_semaphore.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_sync_semaphore",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/sync_semaphore.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_sync_semaphore_owned",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/sync_semaphore_owned.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_sync_semaphore_owned",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/sync_semaphore_owned.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_sync_watch",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_watch.rs"],
test_options: {
unit_test: true,
@@ -1113,43 +644,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_sync_watch",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/sync_watch.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_task_abort",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/task_abort.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_task_abort",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/task_abort.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_task_blocking",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/task_blocking.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_task_blocking",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/task_blocking.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_task_local",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/task_local.rs"],
test_options: {
unit_test: true,
@@ -1158,13 +659,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_task_local",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/task_local.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_task_local_set",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/task_local_set.rs"],
test_options: {
unit_test: true,
@@ -1173,13 +674,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_task_local_set",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/task_local_set.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_tcp_accept",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/tcp_accept.rs"],
test_options: {
unit_test: true,
@@ -1188,28 +689,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_tcp_accept",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/tcp_accept.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_tcp_connect",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/tcp_connect.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_tcp_connect",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/tcp_connect.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_tcp_echo",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/tcp_echo.rs"],
test_options: {
unit_test: true,
@@ -1218,28 +704,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_tcp_echo",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/tcp_echo.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_tcp_into_split",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/tcp_into_split.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_tcp_into_split",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/tcp_into_split.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_tcp_into_std",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/tcp_into_std.rs"],
test_options: {
unit_test: true,
@@ -1248,28 +719,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_tcp_into_std",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/tcp_into_std.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_tcp_peek",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/tcp_peek.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_tcp_peek",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/tcp_peek.rs"],
-}
-
-rust_test_host {
name: "tokio_host_test_tests_tcp_shutdown",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/tcp_shutdown.rs"],
test_options: {
unit_test: true,
@@ -1278,118 +734,58 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_tcp_shutdown",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/tcp_shutdown.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_tcp_socket",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/tcp_socket.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_tcp_socket",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/tcp_socket.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_tcp_split",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/tcp_split.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_tcp_split",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/tcp_split.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_tcp_stream",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/tcp_stream.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_tcp_stream",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/tcp_stream.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_test_clock",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/test_clock.rs"],
- test_options: {
- unit_test: true,
- },
-}
-
-rust_test {
- name: "tokio_device_test_tests_test_clock",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/test_clock.rs"],
-}
-
-rust_test_host {
- name: "tokio_host_test_tests_udp",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/udp.rs"],
+ name: "tokio_host_test_tests_time_interval",
+ defaults: ["tokio_defaults"],
+ srcs: ["tests/time_interval.rs"],
test_options: {
unit_test: true,
},
}
rust_test {
- name: "tokio_device_test_tests_udp",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/udp.rs"],
+ name: "tokio_device_test_tests_time_interval",
+ defaults: ["tokio_defaults"],
+ srcs: ["tests/time_interval.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_uds_cred",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/uds_cred.rs"],
+ name: "tokio_host_test_tests_time_rt",
+ defaults: ["tokio_defaults"],
+ srcs: ["tests/time_rt.rs"],
test_options: {
unit_test: true,
},
}
rust_test {
- name: "tokio_device_test_tests_uds_cred",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/uds_cred.rs"],
+ name: "tokio_device_test_tests_time_rt",
+ defaults: ["tokio_defaults"],
+ srcs: ["tests/time_rt.rs"],
}
rust_test_host {
- name: "tokio_host_test_tests_uds_datagram",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/uds_datagram.rs"],
+ name: "tokio_host_test_tests_time_timeout",
+ defaults: ["tokio_defaults"],
+ srcs: ["tests/time_timeout.rs"],
test_options: {
unit_test: true,
},
}
rust_test {
- name: "tokio_device_test_tests_uds_datagram",
- defaults: ["tokio_defaults_tokio"],
- srcs: ["tests/uds_datagram.rs"],
+ name: "tokio_device_test_tests_time_timeout",
+ defaults: ["tokio_defaults"],
+ srcs: ["tests/time_timeout.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_uds_split",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/uds_split.rs"],
test_options: {
unit_test: true,
@@ -1398,13 +794,13 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_uds_split",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/uds_split.rs"],
}
rust_test_host {
name: "tokio_host_test_tests_uds_stream",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/uds_stream.rs"],
test_options: {
unit_test: true,
@@ -1413,6 +809,6 @@ rust_test_host {
rust_test {
name: "tokio_device_test_tests_uds_stream",
- defaults: ["tokio_defaults_tokio"],
+ defaults: ["tokio_defaults"],
srcs: ["tests/uds_stream.rs"],
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index cc1a305..0808920 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,133 @@
+# 1.5.0 (April 12, 2021)
+
+### Added
+
+- io: add `AsyncSeekExt::stream_position` ([#3650])
+- io: add `AsyncWriteExt::write_vectored` ([#3678])
+- io: add a `copy_bidirectional` utility ([#3572])
+- net: implement `IntoRawFd` for `TcpSocket` ([#3684])
+- sync: add `OnceCell` ([#3591])
+- sync: add `OwnedRwLockReadGuard` and `OwnedRwLockWriteGuard` ([#3340])
+- sync: add `Semaphore::is_closed` ([#3673])
+- sync: add `mpsc::Sender::capacity` ([#3690])
+- sync: allow configuring `RwLock` max reads ([#3644])
+- task: add `sync_scope` for `LocalKey` ([#3612])
+
+### Fixed
+
+- chore: try to avoid `noalias` attributes on intrusive linked list ([#3654])
+- rt: fix panic in `JoinHandle::abort()` when called from other threads ([#3672])
+- sync: don't panic in `oneshot::try_recv` ([#3674])
+- sync: fix notifications getting dropped on receiver drop ([#3652])
+
+### Documented
+
+- io: clarify requirements of `AsyncFd` ([#3635])
+- runtime: fix unclear docs for `{Handle,Runtime}::block_on` ([#3628])
+- sync: document that `Semaphore` is fair ([#3693])
+- sync: improve doc on blocking mutex ([#3645])
+
+[#3340]: https://github.com/tokio-rs/tokio/pull/3340
+[#3572]: https://github.com/tokio-rs/tokio/pull/3572
+[#3591]: https://github.com/tokio-rs/tokio/pull/3591
+[#3612]: https://github.com/tokio-rs/tokio/pull/3612
+[#3628]: https://github.com/tokio-rs/tokio/pull/3628
+[#3635]: https://github.com/tokio-rs/tokio/pull/3635
+[#3644]: https://github.com/tokio-rs/tokio/pull/3644
+[#3645]: https://github.com/tokio-rs/tokio/pull/3645
+[#3650]: https://github.com/tokio-rs/tokio/pull/3650
+[#3652]: https://github.com/tokio-rs/tokio/pull/3652
+[#3654]: https://github.com/tokio-rs/tokio/pull/3654
+[#3672]: https://github.com/tokio-rs/tokio/pull/3672
+[#3673]: https://github.com/tokio-rs/tokio/pull/3673
+[#3674]: https://github.com/tokio-rs/tokio/pull/3674
+[#3678]: https://github.com/tokio-rs/tokio/pull/3678
+[#3684]: https://github.com/tokio-rs/tokio/pull/3684
+[#3690]: https://github.com/tokio-rs/tokio/pull/3690
+[#3693]: https://github.com/tokio-rs/tokio/pull/3693
+
+# 1.4.0 (March 20, 2021)
+
+### Added
+
+- macros: introduce biased argument for `select!` ([#3603])
+- runtime: add `Handle::block_on` ([#3569])
+
+### Fixed
+
+- runtime: avoid unnecessary polling of `block_on` future ([#3582])
+- runtime: fix memory leak/growth when creating many runtimes ([#3564])
+- runtime: mark `EnterGuard` with `must_use` ([#3609])
+
+### Documented
+
+- chore: mention fix for building docs in contributing guide ([#3618])
+- doc: add link to `PollSender` ([#3613])
+- doc: alias sleep to delay ([#3604])
+- sync: improve `Mutex` FIFO explanation ([#3615])
+- timer: fix double newline in module docs ([#3617])
+
+[#3564]: https://github.com/tokio-rs/tokio/pull/3564
+[#3613]: https://github.com/tokio-rs/tokio/pull/3613
+[#3618]: https://github.com/tokio-rs/tokio/pull/3618
+[#3617]: https://github.com/tokio-rs/tokio/pull/3617
+[#3582]: https://github.com/tokio-rs/tokio/pull/3582
+[#3615]: https://github.com/tokio-rs/tokio/pull/3615
+[#3603]: https://github.com/tokio-rs/tokio/pull/3603
+[#3609]: https://github.com/tokio-rs/tokio/pull/3609
+[#3604]: https://github.com/tokio-rs/tokio/pull/3604
+[#3569]: https://github.com/tokio-rs/tokio/pull/3569
+
+# 1.3.0 (March 9, 2021)
+
+### Added
+
+- coop: expose an `unconstrained()` opt-out ([#3547])
+- net: add `into_std` for net types without it ([#3509])
+- sync: add `same_channel` method to `mpsc::Sender` ([#3532])
+- sync: add `{try_,}acquire_many_owned` to `Semaphore` ([#3535])
+- sync: add back `RwLockWriteGuard::map` and `RwLockWriteGuard::try_map` ([#3348])
+
+### Fixed
+
+- sync: allow `oneshot::Receiver::close` after successful `try_recv` ([#3552])
+- time: do not panic on `timeout(Duration::MAX)` ([#3551])
+
+### Documented
+
+- doc: doc aliases for pre-1.0 function names ([#3523])
+- io: fix typos ([#3541])
+- io: note the EOF behaviour of `read_until` ([#3536])
+- io: update `AsyncRead::poll_read` doc ([#3557])
+- net: update `UdpSocket` splitting doc ([#3517])
+- runtime: add link to `LocalSet` on `new_current_thread` ([#3508])
+- runtime: update documentation of thread limits ([#3527])
+- sync: do not recommend `join_all` for `Barrier` ([#3514])
+- sync: documentation for `oneshot` ([#3592])
+- sync: rename `notify` to `notify_one` ([#3526])
+- time: fix typo in `Sleep` doc ([#3515])
+- time: sync `interval.rs` and `time/mod.rs` docs ([#3533])
+
+[#3348]: https://github.com/tokio-rs/tokio/pull/3348
+[#3508]: https://github.com/tokio-rs/tokio/pull/3508
+[#3509]: https://github.com/tokio-rs/tokio/pull/3509
+[#3514]: https://github.com/tokio-rs/tokio/pull/3514
+[#3515]: https://github.com/tokio-rs/tokio/pull/3515
+[#3517]: https://github.com/tokio-rs/tokio/pull/3517
+[#3523]: https://github.com/tokio-rs/tokio/pull/3523
+[#3526]: https://github.com/tokio-rs/tokio/pull/3526
+[#3527]: https://github.com/tokio-rs/tokio/pull/3527
+[#3532]: https://github.com/tokio-rs/tokio/pull/3532
+[#3533]: https://github.com/tokio-rs/tokio/pull/3533
+[#3535]: https://github.com/tokio-rs/tokio/pull/3535
+[#3536]: https://github.com/tokio-rs/tokio/pull/3536
+[#3541]: https://github.com/tokio-rs/tokio/pull/3541
+[#3547]: https://github.com/tokio-rs/tokio/pull/3547
+[#3551]: https://github.com/tokio-rs/tokio/pull/3551
+[#3552]: https://github.com/tokio-rs/tokio/pull/3552
+[#3557]: https://github.com/tokio-rs/tokio/pull/3557
+[#3592]: https://github.com/tokio-rs/tokio/pull/3592
+
# 1.2.0 (February 5, 2021)
### Added
diff --git a/Cargo.toml b/Cargo.toml
index 9f23031..05ab658 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,11 +13,11 @@
[package]
edition = "2018"
name = "tokio"
-version = "1.2.0"
+version = "1.5.0"
authors = ["Tokio Contributors <team@tokio.rs>"]
description = "An event-driven, non-blocking I/O platform for writing asynchronous I/O\nbacked applications.\n"
homepage = "https://tokio.rs"
-documentation = "https://docs.rs/tokio/1.2.0/tokio/"
+documentation = "https://docs.rs/tokio/1.5.0/tokio/"
readme = "README.md"
keywords = ["io", "async", "non-blocking", "futures"]
categories = ["asynchronous", "network-programming"]
@@ -67,7 +67,7 @@ version = "0.3.0"
features = ["async-await"]
[dev-dependencies.proptest]
-version = "0.10.0"
+version = "1"
[dev-dependencies.rand]
version = "0.8.0"
@@ -99,7 +99,7 @@ sync = []
test-util = []
time = []
[target."cfg(loom)".dev-dependencies.loom]
-version = "0.4"
+version = "0.5"
features = ["futures", "checkpoint"]
[target."cfg(tokio_unstable)".dependencies.tracing]
version = "0.1.21"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index 33c371c..5e53c3f 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -2,18 +2,17 @@
name = "tokio"
# When releasing to crates.io:
# - Remove path dependencies
-# - Update html_root_url.
# - Update doc url
# - Cargo.toml
# - README.md
# - Update CHANGELOG.md.
# - Create "v1.0.x" git tag.
-version = "1.2.0"
+version = "1.5.0"
edition = "2018"
authors = ["Tokio Contributors <team@tokio.rs>"]
license = "MIT"
readme = "README.md"
-documentation = "https://docs.rs/tokio/1.2.0/tokio/"
+documentation = "https://docs.rs/tokio/1.5.0/tokio/"
repository = "https://github.com/tokio-rs/tokio"
homepage = "https://tokio.rs"
description = """
@@ -120,13 +119,13 @@ optional = true
tokio-test = { version = "0.4.0", path = "../tokio-test" }
tokio-stream = { version = "0.1", path = "../tokio-stream" }
futures = { version = "0.3.0", features = ["async-await"] }
-proptest = "0.10.0"
+proptest = "1"
rand = "0.8.0"
tempfile = "3.1.0"
async-stream = "0.3"
[target.'cfg(loom)'.dev-dependencies]
-loom = { version = "0.4", features = ["futures", "checkpoint"] }
+loom = { version = "0.5", features = ["futures", "checkpoint"] }
[build-dependencies]
autocfg = "1" # Needed for conditionally enabling `track-caller`
diff --git a/LICENSE b/LICENSE
index 243fcd6..ffa38bb 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2020 Tokio Contributors
+Copyright (c) 2021 Tokio Contributors
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
diff --git a/METADATA b/METADATA
index 3f041be..0a2c7af 100644
--- a/METADATA
+++ b/METADATA
@@ -7,13 +7,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/tokio/tokio-1.2.0.crate"
+ value: "https://static.crates.io/crates/tokio/tokio-1.5.0.crate"
}
- version: "1.2.0"
+ version: "1.5.0"
license_type: NOTICE
last_upgrade_date {
year: 2021
- month: 2
- day: 9
+ month: 4
+ day: 21
}
}
diff --git a/TEST_MAPPING b/TEST_MAPPING
index 5c61ac1..fd9e265 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -2,55 +2,43 @@
{
"presubmit": [
{
- "name": "tokio_device_test_tests_io_read_line"
+ "name": "doh_unit_test"
},
{
- "name": "tokio_device_test_tests_sync_semaphore"
- },
- {
- "name": "tokio_device_test_tests_signal_drop_signal"
- },
- {
- "name": "tokio_device_test_tests_signal_ctrl_c"
- },
- {
- "name": "tokio_device_test_tests_io_write_buf"
- },
- {
- "name": "tokio_device_test_tests_fs_file"
+ "name": "futures-util_device_test_src_lib"
},
{
- "name": "tokio_device_test_tests_io_write_int"
+ "name": "tokio-test_device_test_src_lib"
},
{
- "name": "tokio_device_test_tests_signal_no_rt"
+ "name": "tokio-test_device_test_tests_block_on"
},
{
- "name": "tokio_device_test_tests_sync_semaphore_owned"
+ "name": "tokio-test_device_test_tests_io"
},
{
- "name": "tokio_device_test_tests_process_issue_42"
+ "name": "tokio-test_device_test_tests_macros"
},
{
- "name": "tokio_device_test_tests_task_local_set"
+ "name": "tokio_device_test_tests_buffered"
},
{
- "name": "tokio_device_test_tests_tcp_stream"
+ "name": "tokio_device_test_tests_fs_file"
},
{
- "name": "tokio_device_test_tests_task_blocking"
+ "name": "tokio_device_test_tests_fs_link"
},
{
- "name": "tokio_device_test_tests_signal_multi_rt"
+ "name": "tokio_device_test_tests_io_async_read"
},
{
- "name": "tokio_device_test_tests_uds_split"
+ "name": "tokio_device_test_tests_io_copy_bidirectional"
},
{
- "name": "tokio_device_test_tests_tcp_split"
+ "name": "tokio_device_test_tests_io_lines"
},
{
- "name": "tokio_device_test_tests_macros_join"
+ "name": "tokio_device_test_tests_io_mem_stream"
},
{
"name": "tokio_device_test_tests_io_read"
@@ -59,217 +47,121 @@
"name": "tokio_device_test_tests_io_read_buf"
},
{
- "name": "tokio_device_test_tests_signal_notify_both"
- },
- {
- "name": "tokio_device_test_tests_sync_mutex_owned"
- },
- {
- "name": "tokio_device_test_tests_net_lookup_host"
- },
- {
- "name": "tokio_device_test_tests_buffered"
- },
- {
- "name": "tokio_device_test_tests_signal_usr1"
- },
- {
- "name": "tokio_device_test_tests_rt_basic"
- },
- {
- "name": "tokio_device_test_tests_tcp_connect"
- },
- {
- "name": "futures-util_device_test_src_lib"
+ "name": "tokio_device_test_tests_io_read_to_end"
},
{
- "name": "tokio_device_test_tests_sync_oneshot"
+ "name": "tokio_device_test_tests_io_take"
},
{
- "name": "tokio_device_test_tests_uds_cred"
+ "name": "tokio_device_test_tests_io_write"
},
{
- "name": "tokio_device_test_tests_tcp_into_std"
+ "name": "tokio_device_test_tests_io_write_all"
},
{
- "name": "tokio-test_device_test_tests_macros"
+ "name": "tokio_device_test_tests_io_write_buf"
},
{
- "name": "tokio_device_test_tests_io_write_all"
+ "name": "tokio_device_test_tests_io_write_int"
},
{
- "name": "tokio_device_test_tests_io_read_exact"
+ "name": "tokio_device_test_tests_macros_join"
},
{
"name": "tokio_device_test_tests_no_rt"
},
{
- "name": "tokio_device_test_tests_rt_threaded"
- },
- {
- "name": "tokio_device_test_tests_fs_dir"
+ "name": "tokio_device_test_tests_process_issue_2174"
},
{
- "name": "tokio_device_test_tests_sync_rwlock"
+ "name": "tokio_device_test_tests_process_issue_42"
},
{
- "name": "tokio_device_test_tests_io_read_to_string"
+ "name": "tokio_device_test_tests_process_smoke"
},
{
- "name": "tokio_device_test_tests_tcp_shutdown"
+ "name": "tokio_device_test_tests_rt_basic"
},
{
- "name": "tokio_device_test_tests_macros_select"
+ "name": "tokio_device_test_tests_rt_threaded"
},
{
- "name": "tokio_device_test_tests_tcp_accept"
+ "name": "tokio_device_test_tests_signal_ctrl_c"
},
{
"name": "tokio_device_test_tests_signal_drop_rt"
},
{
- "name": "tokio_device_test_tests_rt_common"
- },
- {
- "name": "tokio_device_test_tests_sync_broadcast"
- },
- {
- "name": "tokio_device_test_tests_uds_stream"
- },
- {
- "name": "tokio_device_test_tests_io_split"
- },
- {
- "name": "tokio_device_test_tests_io_lines"
- },
- {
- "name": "tokio_device_test_tests_process_issue_2174"
- },
- {
- "name": "tokio_device_test_tests_sync_mpsc"
- },
- {
- "name": "tokio_device_test_tests_udp"
- },
- {
- "name": "tokio_device_test_tests_macros_try_join"
- },
- {
- "name": "tokio_device_test_tests_sync_notify"
- },
- {
- "name": "tokio_device_test_tests_sync_mutex"
+ "name": "tokio_device_test_tests_signal_drop_signal"
},
{
- "name": "tokio_device_test_tests_task_abort"
+ "name": "tokio_device_test_tests_signal_multi_rt"
},
{
- "name": "tokio-test_device_test_tests_io"
+ "name": "tokio_device_test_tests_signal_no_rt"
},
{
- "name": "tokio_device_test_tests_fs_link"
+ "name": "tokio_device_test_tests_signal_notify_both"
},
{
"name": "tokio_device_test_tests_signal_twice"
},
{
- "name": "tokio_device_test_tests_signal_drop_recv"
- },
- {
- "name": "tokio_device_test_tests_io_async_fd"
- },
- {
- "name": "tokio_device_test_tests_io_read_to_end"
- },
- {
- "name": "tokio_device_test_tests_macros_pin"
+ "name": "tokio_device_test_tests_sync_barrier"
},
{
- "name": "tokio_device_test_tests_io_read_until"
+ "name": "tokio_device_test_tests_sync_broadcast"
},
{
- "name": "tokio_device_test_tests_io_take"
+ "name": "tokio_device_test_tests_sync_errors"
},
{
- "name": "tokio_device_test_tests_tcp_echo"
+ "name": "tokio_device_test_tests_sync_mpsc"
},
{
- "name": "tokio_device_test_tests_net_bind_resource"
+ "name": "tokio_device_test_tests_sync_mutex_owned"
},
{
- "name": "tokio_device_test_tests_macros_test"
+ "name": "tokio_device_test_tests_sync_once_cell"
},
{
- "name": "tokio_device_test_tests_fs"
+ "name": "tokio_device_test_tests_sync_rwlock"
},
{
"name": "tokio_device_test_tests_sync_watch"
},
{
- "name": "tokio_device_test_tests_async_send_sync"
- },
- {
- "name": "tokio_device_test_tests_io_copy"
- },
- {
"name": "tokio_device_test_tests_task_local"
},
{
- "name": "tokio_device_test_tests_sync_errors"
- },
- {
- "name": "tokio_device_test_tests_fs_copy"
- },
- {
- "name": "tokio_device_test_tests_process_kill_on_drop"
- },
- {
- "name": "tokio_device_test_tests_tcp_into_split"
- },
- {
- "name": "tokio_device_test_tests_tcp_peek"
- },
- {
- "name": "tokio_device_test_tests_io_driver"
- },
- {
- "name": "tokio_device_test_tests_io_mem_stream"
- },
- {
- "name": "tokio_device_test_tests_process_smoke"
- },
- {
- "name": "tokio_device_test_tests_uds_datagram"
- },
- {
- "name": "tokio-test_device_test_tests_block_on"
+ "name": "tokio_device_test_tests_task_local_set"
},
{
- "name": "tokio_device_test_tests_sync_barrier"
+ "name": "tokio_device_test_tests_tcp_accept"
},
{
- "name": "tokio_device_test_tests_tcp_socket"
+ "name": "tokio_device_test_tests_tcp_echo"
},
{
- "name": "tokio-test_device_test_src_lib"
+ "name": "tokio_device_test_tests_tcp_into_std"
},
{
- "name": "tokio_device_test_tests_test_clock"
+ "name": "tokio_device_test_tests_tcp_shutdown"
},
{
- "name": "tokio_device_test_tests_fs_file_mocked"
+ "name": "tokio_device_test_tests_time_interval"
},
{
- "name": "tokio_device_test_tests_io_write"
+ "name": "tokio_device_test_tests_time_rt"
},
{
- "name": "tokio_device_test_tests_io_driver_drop"
+ "name": "tokio_device_test_tests_time_timeout"
},
{
- "name": "tokio_device_test_tests_io_chain"
+ "name": "tokio_device_test_tests_uds_split"
},
{
- "name": "tokio_device_test_tests_io_async_read"
+ "name": "tokio_device_test_tests_uds_stream"
}
]
}
diff --git a/patches/Android.bp.patch b/patches/Android.bp.patch
deleted file mode 100644
index 1670d12..0000000
--- a/patches/Android.bp.patch
+++ /dev/null
@@ -1,159 +0,0 @@
-diff --git a/Android.bp b/Android.bp
-index 1b9828d..6ae37fc 100644
---- a/Android.bp
-+++ b/Android.bp
-@@ -75,28 +75,13 @@ rust_defaults {
- "libnix",
- "libnum_cpus",
- "libpin_project_lite",
-- "libproptest",
- "librand",
-- "libtempfile",
- "libtokio_stream",
- "libtokio_test",
- ],
- proc_macros: ["libtokio_macros"],
- }
-
--rust_test_host {
-- name: "tokio_host_test_src_lib",
-- defaults: ["tokio_defaults"],
-- test_options: {
-- unit_test: true,
-- },
--}
--
--rust_test {
-- name: "tokio_device_test_src_lib",
-- defaults: ["tokio_defaults"],
--}
--
- rust_defaults {
- name: "tokio_defaults_tokio",
- crate_name: "tokio",
-@@ -132,9 +117,7 @@ rust_defaults {
- "libnix",
- "libnum_cpus",
- "libpin_project_lite",
-- "libproptest",
- "librand",
-- "libtempfile",
- "libtokio",
- "libtokio_stream",
- "libtokio_test",
-@@ -142,21 +125,6 @@ rust_defaults {
- proc_macros: ["libtokio_macros"],
- }
-
--rust_test_host {
-- name: "tokio_host_test_tests__require_full",
-- defaults: ["tokio_defaults_tokio"],
-- srcs: ["tests/_require_full.rs"],
-- test_options: {
-- unit_test: true,
-- },
--}
--
--rust_test {
-- name: "tokio_device_test_tests__require_full",
-- defaults: ["tokio_defaults_tokio"],
-- srcs: ["tests/_require_full.rs"],
--}
--
- rust_test_host {
- name: "tokio_host_test_tests_async_send_sync",
- defaults: ["tokio_defaults_tokio"],
-@@ -1357,81 +1325,6 @@ rust_test {
- srcs: ["tests/test_clock.rs"],
- }
-
--rust_test_host {
-- name: "tokio_host_test_tests_time_interval",
-- defaults: ["tokio_defaults_tokio"],
-- srcs: ["tests/time_interval.rs"],
-- test_options: {
-- unit_test: true,
-- },
--}
--
--rust_test {
-- name: "tokio_device_test_tests_time_interval",
-- defaults: ["tokio_defaults_tokio"],
-- srcs: ["tests/time_interval.rs"],
--}
--
--rust_test_host {
-- name: "tokio_host_test_tests_time_pause",
-- defaults: ["tokio_defaults_tokio"],
-- srcs: ["tests/time_pause.rs"],
-- test_options: {
-- unit_test: true,
-- },
--}
--
--rust_test {
-- name: "tokio_device_test_tests_time_pause",
-- defaults: ["tokio_defaults_tokio"],
-- srcs: ["tests/time_pause.rs"],
--}
--
--rust_test_host {
-- name: "tokio_host_test_tests_time_rt",
-- defaults: ["tokio_defaults_tokio"],
-- srcs: ["tests/time_rt.rs"],
-- test_options: {
-- unit_test: true,
-- },
--}
--
--rust_test {
-- name: "tokio_device_test_tests_time_rt",
-- defaults: ["tokio_defaults_tokio"],
-- srcs: ["tests/time_rt.rs"],
--}
--
--rust_test_host {
-- name: "tokio_host_test_tests_time_sleep",
-- defaults: ["tokio_defaults_tokio"],
-- srcs: ["tests/time_sleep.rs"],
-- test_options: {
-- unit_test: true,
-- },
--}
--
--rust_test {
-- name: "tokio_device_test_tests_time_sleep",
-- defaults: ["tokio_defaults_tokio"],
-- srcs: ["tests/time_sleep.rs"],
--}
--
--rust_test_host {
-- name: "tokio_host_test_tests_time_timeout",
-- defaults: ["tokio_defaults_tokio"],
-- srcs: ["tests/time_timeout.rs"],
-- test_options: {
-- unit_test: true,
-- },
--}
--
--rust_test {
-- name: "tokio_device_test_tests_time_timeout",
-- defaults: ["tokio_defaults_tokio"],
-- srcs: ["tests/time_timeout.rs"],
--}
--
- rust_test_host {
- name: "tokio_host_test_tests_udp",
- defaults: ["tokio_defaults_tokio"],
-@@ -1507,11 +1400,3 @@ rust_test {
- srcs: ["tests/uds_stream.rs"],
- }
-
--Errors in cargo.out:
--error: run main Tokio tests with `--features full`
--error: aborting due to previous error
--error: could not compile `tokio`
--error[E0599]: no method named `pause` found for struct `clock::Clock` in the current scope
--error[E0599]: no method named `pause` found for struct `clock::Clock` in the current scope
--error: aborting due to 2 previous errors
--error: build failed
diff --git a/src/coop.rs b/src/coop.rs
index 05b2ae8..16d93fb 100644
--- a/src/coop.rs
+++ b/src/coop.rs
@@ -1,55 +1,33 @@
#![cfg_attr(not(feature = "full"), allow(dead_code))]
-//! Opt-in yield points for improved cooperative scheduling.
+//! Yield points for improved cooperative scheduling.
//!
-//! A single call to [`poll`] on a top-level task may potentially do a lot of
-//! work before it returns `Poll::Pending`. If a task runs for a long period of
-//! time without yielding back to the executor, it can starve other tasks
-//! waiting on that executor to execute them, or drive underlying resources.
-//! Since Rust does not have a runtime, it is difficult to forcibly preempt a
-//! long-running task. Instead, this module provides an opt-in mechanism for
-//! futures to collaborate with the executor to avoid starvation.
+//! Documentation for this can be found in the [`tokio::task`] module.
//!
-//! Consider a future like this one:
-//!
-//! ```
-//! # use tokio_stream::{Stream, StreamExt};
-//! async fn drop_all<I: Stream + Unpin>(mut input: I) {
-//! while let Some(_) = input.next().await {}
-//! }
-//! ```
-//!
-//! It may look harmless, but consider what happens under heavy load if the
-//! input stream is _always_ ready. If we spawn `drop_all`, the task will never
-//! yield, and will starve other tasks and resources on the same executor. With
-//! opt-in yield points, this problem is alleviated:
-//!
-//! ```ignore
-//! # use tokio_stream::{Stream, StreamExt};
-//! async fn drop_all<I: Stream + Unpin>(mut input: I) {
-//! while let Some(_) = input.next().await {
-//! tokio::coop::proceed().await;
-//! }
-//! }
-//! ```
-//!
-//! The `proceed` future will coordinate with the executor to make sure that
-//! every so often control is yielded back to the executor so it can run other
-//! tasks.
-//!
-//! # Placing yield points
-//!
-//! Voluntary yield points should be placed _after_ at least some work has been
-//! done. If they are not, a future sufficiently deep in the task hierarchy may
-//! end up _never_ getting to run because of the number of yield points that
-//! inevitably appear before it is reached. In general, you will want yield
-//! points to only appear in "leaf" futures -- those that do not themselves poll
-//! other futures. By doing this, you avoid double-counting each iteration of
-//! the outer future against the cooperating budget.
-//!
-//! [`poll`]: method@std::future::Future::poll
-
-// NOTE: The doctests in this module are ignored since the whole module is (currently) private.
+//! [`tokio::task`]: crate::task.
+
+// ```ignore
+// # use tokio_stream::{Stream, StreamExt};
+// async fn drop_all<I: Stream + Unpin>(mut input: I) {
+// while let Some(_) = input.next().await {
+// tokio::coop::proceed().await;
+// }
+// }
+// ```
+//
+// The `proceed` future will coordinate with the executor to make sure that
+// every so often control is yielded back to the executor so it can run other
+// tasks.
+//
+// # Placing yield points
+//
+// Voluntary yield points should be placed _after_ at least some work has been
+// done. If they are not, a future sufficiently deep in the task hierarchy may
+// end up _never_ getting to run because of the number of yield points that
+// inevitably appear before it is reached. In general, you will want yield
+// points to only appear in "leaf" futures -- those that do not themselves poll
+// other futures. By doing this, you avoid double-counting each iteration of
+// the outer future against the cooperating budget.
use std::cell::Cell;
@@ -98,6 +76,13 @@ pub(crate) fn budget<R>(f: impl FnOnce() -> R) -> R {
with_budget(Budget::initial(), f)
}
+/// Run the given closure with an unconstrained task budget. When the function returns, the budget
+/// is reset to the value prior to calling the function.
+#[inline(always)]
+pub(crate) fn with_unconstrained<R>(f: impl FnOnce() -> R) -> R {
+ with_budget(Budget::unconstrained(), f)
+}
+
#[inline(always)]
fn with_budget<R>(budget: Budget, f: impl FnOnce() -> R) -> R {
struct ResetGuard<'a> {
diff --git a/src/io/async_fd.rs b/src/io/async_fd.rs
index 13f4f2d..5a68d30 100644
--- a/src/io/async_fd.rs
+++ b/src/io/async_fd.rs
@@ -8,7 +8,8 @@ use std::{task::Context, task::Poll};
/// Associates an IO object backed by a Unix file descriptor with the tokio
/// reactor, allowing for readiness to be polled. The file descriptor must be of
/// a type that can be used with the OS polling facilities (ie, `poll`, `epoll`,
-/// `kqueue`, etc), such as a network socket or pipe.
+/// `kqueue`, etc), such as a network socket or pipe, and the file descriptor
+/// must have the nonblocking mode set to true.
///
/// Creating an AsyncFd registers the file descriptor with the current tokio
/// Reactor, allowing you to directly await the file descriptor being readable
@@ -36,18 +37,19 @@ use std::{task::Context, task::Poll};
///
/// On some platforms, the readiness detecting mechanism relies on
/// edge-triggered notifications. This means that the OS will only notify Tokio
-/// when the file descriptor transitions from not-ready to ready. Tokio
-/// internally tracks when it has received a ready notification, and when
+/// when the file descriptor transitions from not-ready to ready. For this to
+/// work you should first try to read or write and only poll for readiness
+/// if that fails with an error of [`std::io::ErrorKind::WouldBlock`].
+///
+/// Tokio internally tracks when it has received a ready notification, and when
/// readiness checking functions like [`readable`] and [`writable`] are called,
/// if the readiness flag is set, these async functions will complete
-/// immediately.
-///
-/// This however does mean that it is critical to ensure that this ready flag is
-/// cleared when (and only when) the file descriptor ceases to be ready. The
-/// [`AsyncFdReadyGuard`] returned from readiness checking functions serves this
-/// function; after calling a readiness-checking async function, you must use
-/// this [`AsyncFdReadyGuard`] to signal to tokio whether the file descriptor is no
-/// longer in a ready state.
+/// immediately. This however does mean that it is critical to ensure that this
+/// ready flag is cleared when (and only when) the file descriptor ceases to be
+/// ready. The [`AsyncFdReadyGuard`] returned from readiness checking functions
+/// serves this function; after calling a readiness-checking async function,
+/// you must use this [`AsyncFdReadyGuard`] to signal to tokio whether the file
+/// descriptor is no longer in a ready state.
///
/// ## Use with to a poll-based API
///
@@ -519,6 +521,8 @@ impl<'a, Inner: AsRawFd> AsyncFdReadyGuard<'a, Inner> {
/// create this `AsyncFdReadyGuard`.
///
/// [`WouldBlock`]: std::io::ErrorKind::WouldBlock
+ // Alias for old name in 0.x
+ #[cfg_attr(docsrs, doc(alias = "with_io"))]
pub fn try_io<R>(
&mut self,
f: impl FnOnce(&AsyncFd<Inner>) -> io::Result<R>,
diff --git a/src/io/async_read.rs b/src/io/async_read.rs
index d075443..93e5d3e 100644
--- a/src/io/async_read.rs
+++ b/src/io/async_read.rs
@@ -43,9 +43,9 @@ use std::task::{Context, Poll};
pub trait AsyncRead {
/// Attempts to read from the `AsyncRead` into `buf`.
///
- /// On success, returns `Poll::Ready(Ok(()))` and fills `buf` with data
- /// read. If no data was read (`buf.filled().is_empty()`) it implies that
- /// EOF has been reached.
+ /// On success, returns `Poll::Ready(Ok(()))` and places data in the
+ /// unfilled portion of `buf`. If no data was read (`buf.filled().len()` is
+ /// unchanged), it implies that EOF has been reached.
///
/// If no data is available for reading, the method returns `Poll::Pending`
/// and arranges for the current task (via `cx.waker()`) to receive a
diff --git a/src/io/driver/interest.rs b/src/io/driver/interest.rs
index 8c8049d..9eead08 100644
--- a/src/io/driver/interest.rs
+++ b/src/io/driver/interest.rs
@@ -1,4 +1,4 @@
-#![cfg_attr(not(feature = "net"), allow(unreachable_pub))]
+#![cfg_attr(not(feature = "net"), allow(dead_code, unreachable_pub))]
use crate::io::driver::Ready;
diff --git a/src/io/driver/registration.rs b/src/io/driver/registration.rs
index 1451224..8251fe6 100644
--- a/src/io/driver/registration.rs
+++ b/src/io/driver/registration.rs
@@ -1,3 +1,5 @@
+#![cfg_attr(not(feature = "net"), allow(dead_code))]
+
use crate::io::driver::{Direction, Handle, Interest, ReadyEvent, ScheduledIo};
use crate::util::slab;
@@ -233,7 +235,10 @@ cfg_io_readiness! {
crate::future::poll_fn(|cx| {
if self.handle.inner().is_none() {
- return Poll::Ready(Err(io::Error::new(io::ErrorKind::Other, "reactor gone")));
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::Other,
+ crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR
+ )));
}
Pin::new(&mut fut).poll(cx).map(Ok)
diff --git a/src/io/driver/scheduled_io.rs b/src/io/driver/scheduled_io.rs
index 71864b3..2626b40 100644
--- a/src/io/driver/scheduled_io.rs
+++ b/src/io/driver/scheduled_io.rs
@@ -443,7 +443,7 @@ cfg_io_readiness! {
// Currently ready!
let tick = TICK.unpack(curr) as u8;
*state = State::Done;
- return Poll::Ready(ReadyEvent { ready, tick });
+ return Poll::Ready(ReadyEvent { tick, ready });
}
// Wasn't ready, take the lock (and check again while locked).
@@ -462,7 +462,7 @@ cfg_io_readiness! {
// Currently ready!
let tick = TICK.unpack(curr) as u8;
*state = State::Done;
- return Poll::Ready(ReadyEvent { ready, tick });
+ return Poll::Ready(ReadyEvent { tick, ready });
}
// Not ready even after locked, insert into list...
diff --git a/src/io/mod.rs b/src/io/mod.rs
index 3e7c943..14a4a63 100644
--- a/src/io/mod.rs
+++ b/src/io/mod.rs
@@ -246,7 +246,7 @@ cfg_io_util! {
pub(crate) mod seek;
pub(crate) mod util;
pub use util::{
- copy, copy_buf, duplex, empty, repeat, sink, AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt,
+ copy, copy_bidirectional, copy_buf, duplex, empty, repeat, sink, AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt,
BufReader, BufStream, BufWriter, DuplexStream, Empty, Lines, Repeat, Sink, Split, Take,
};
}
diff --git a/src/io/poll_evented.rs b/src/io/poll_evented.rs
index 27a4cb7..47ae558 100644
--- a/src/io/poll_evented.rs
+++ b/src/io/poll_evented.rs
@@ -121,6 +121,11 @@ impl<E: Source> PollEvented<E> {
}
/// Returns a reference to the registration
+ #[cfg(any(
+ feature = "net",
+ all(unix, feature = "process"),
+ all(unix, feature = "signal"),
+ ))]
pub(crate) fn registration(&self) -> &Registration {
&self.registration
}
diff --git a/src/io/util/async_buf_read_ext.rs b/src/io/util/async_buf_read_ext.rs
index 7977a0e..233ac31 100644
--- a/src/io/util/async_buf_read_ext.rs
+++ b/src/io/util/async_buf_read_ext.rs
@@ -23,6 +23,8 @@ cfg_io_util! {
///
/// If successful, this function will return the total number of bytes read.
///
+ /// If this function returns `Ok(0)`, the stream has reached EOF.
+ ///
/// # Errors
///
/// This function will ignore all instances of [`ErrorKind::Interrupted`] and
diff --git a/src/io/util/async_read_ext.rs b/src/io/util/async_read_ext.rs
index ebcbce6..e715f9d 100644
--- a/src/io/util/async_read_ext.rs
+++ b/src/io/util/async_read_ext.rs
@@ -35,7 +35,7 @@ cfg_io_util! {
/// Reads bytes from a source.
///
- /// Implemented as an extention trait, adding utility methods to all
+ /// Implemented as an extension trait, adding utility methods to all
/// [`AsyncRead`] types. Callers will tend to import this trait instead of
/// [`AsyncRead`].
///
diff --git a/src/io/util/async_seek_ext.rs b/src/io/util/async_seek_ext.rs
index 813913f..297a4a6 100644
--- a/src/io/util/async_seek_ext.rs
+++ b/src/io/util/async_seek_ext.rs
@@ -66,6 +66,17 @@ cfg_io_util! {
{
seek(self, pos)
}
+
+ /// Creates a future which will return the current seek position from the
+ /// start of the stream.
+ ///
+ /// This is equivalent to `self.seek(SeekFrom::Current(0))`.
+ fn stream_position(&mut self) -> Seek<'_, Self>
+ where
+ Self: Unpin,
+ {
+ self.seek(SeekFrom::Current(0))
+ }
}
}
diff --git a/src/io/util/async_write_ext.rs b/src/io/util/async_write_ext.rs
index dc500f2..d011d82 100644
--- a/src/io/util/async_write_ext.rs
+++ b/src/io/util/async_write_ext.rs
@@ -11,7 +11,9 @@ use crate::io::util::write_int::{
WriteU128, WriteU128Le, WriteU16, WriteU16Le, WriteU32, WriteU32Le, WriteU64, WriteU64Le,
WriteU8,
};
+use crate::io::util::write_vectored::{write_vectored, WriteVectored};
use crate::io::AsyncWrite;
+use std::io::IoSlice;
use bytes::Buf;
@@ -35,7 +37,7 @@ cfg_io_util! {
/// Writes bytes to a sink.
///
- /// Implemented as an extention trait, adding utility methods to all
+ /// Implemented as an extension trait, adding utility methods to all
/// [`AsyncWrite`] types. Callers will tend to import this trait instead of
/// [`AsyncWrite`].
///
@@ -116,6 +118,47 @@ cfg_io_util! {
write(self, src)
}
+ /// Like [`write`], except that it writes from a slice of buffers.
+ ///
+ /// Equivalent to:
+ ///
+ /// ```ignore
+ /// async fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize>;
+ /// ```
+ ///
+ /// See [`AsyncWrite::poll_write_vectored`] for more details.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use tokio::io::{self, AsyncWriteExt};
+ /// use tokio::fs::File;
+ /// use std::io::IoSlice;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> io::Result<()> {
+ /// let mut file = File::create("foo.txt").await?;
+ ///
+ /// let bufs: &[_] = &[
+ /// IoSlice::new(b"hello"),
+ /// IoSlice::new(b" "),
+ /// IoSlice::new(b"world"),
+ /// ];
+ ///
+ /// file.write_vectored(&bufs).await?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`write`]: AsyncWriteExt::write
+ fn write_vectored<'a, 'b>(&'a mut self, bufs: &'a [IoSlice<'b>]) -> WriteVectored<'a, 'b, Self>
+ where
+ Self: Unpin,
+ {
+ write_vectored(self, bufs)
+ }
+
/// Writes a buffer into this writer, advancing the buffer's internal
/// cursor.
diff --git a/src/io/util/copy.rs b/src/io/util/copy.rs
index c5981cf..3cd425b 100644
--- a/src/io/util/copy.rs
+++ b/src/io/util/copy.rs
@@ -5,18 +5,85 @@ use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
+#[derive(Debug)]
+pub(super) struct CopyBuffer {
+ read_done: bool,
+ pos: usize,
+ cap: usize,
+ amt: u64,
+ buf: Box<[u8]>,
+}
+
+impl CopyBuffer {
+ pub(super) fn new() -> Self {
+ Self {
+ read_done: false,
+ pos: 0,
+ cap: 0,
+ amt: 0,
+ buf: vec![0; 2048].into_boxed_slice(),
+ }
+ }
+
+ pub(super) fn poll_copy<R, W>(
+ &mut self,
+ cx: &mut Context<'_>,
+ mut reader: Pin<&mut R>,
+ mut writer: Pin<&mut W>,
+ ) -> Poll<io::Result<u64>>
+ where
+ R: AsyncRead + ?Sized,
+ W: AsyncWrite + ?Sized,
+ {
+ loop {
+ // If our buffer is empty, then we need to read some data to
+ // continue.
+ if self.pos == self.cap && !self.read_done {
+ let me = &mut *self;
+ let mut buf = ReadBuf::new(&mut me.buf);
+ ready!(reader.as_mut().poll_read(cx, &mut buf))?;
+ let n = buf.filled().len();
+ if n == 0 {
+ self.read_done = true;
+ } else {
+ self.pos = 0;
+ self.cap = n;
+ }
+ }
+
+ // If our buffer has some data, let's write it out!
+ while self.pos < self.cap {
+ let me = &mut *self;
+ let i = ready!(writer.as_mut().poll_write(cx, &me.buf[me.pos..me.cap]))?;
+ if i == 0 {
+ return Poll::Ready(Err(io::Error::new(
+ io::ErrorKind::WriteZero,
+ "write zero byte into writer",
+ )));
+ } else {
+ self.pos += i;
+ self.amt += i as u64;
+ }
+ }
+
+ // If we've written all the data and we've seen EOF, flush out the
+ // data and finish the transfer.
+ if self.pos == self.cap && self.read_done {
+ ready!(writer.as_mut().poll_flush(cx))?;
+ return Poll::Ready(Ok(self.amt));
+ }
+ }
+ }
+}
+
/// A future that asynchronously copies the entire contents of a reader into a
/// writer.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
struct Copy<'a, R: ?Sized, W: ?Sized> {
reader: &'a mut R,
- read_done: bool,
writer: &'a mut W,
- pos: usize,
- cap: usize,
- amt: u64,
- buf: Box<[u8]>,
+ buf: CopyBuffer,
}
cfg_io_util! {
@@ -35,8 +102,8 @@ cfg_io_util! {
///
/// # Errors
///
- /// The returned future will finish with an error will return an error
- /// immediately if any call to `poll_read` or `poll_write` returns an error.
+ /// The returned future will return an error immediately if any call to
+ /// `poll_read` or `poll_write` returns an error.
///
/// # Examples
///
@@ -60,12 +127,8 @@ cfg_io_util! {
{
Copy {
reader,
- read_done: false,
writer,
- amt: 0,
- pos: 0,
- cap: 0,
- buf: vec![0; 2048].into_boxed_slice(),
+ buf: CopyBuffer::new()
}.await
}
}
@@ -78,44 +141,9 @@ where
type Output = io::Result<u64>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
- loop {
- // If our buffer is empty, then we need to read some data to
- // continue.
- if self.pos == self.cap && !self.read_done {
- let me = &mut *self;
- let mut buf = ReadBuf::new(&mut me.buf);
- ready!(Pin::new(&mut *me.reader).poll_read(cx, &mut buf))?;
- let n = buf.filled().len();
- if n == 0 {
- self.read_done = true;
- } else {
- self.pos = 0;
- self.cap = n;
- }
- }
+ let me = &mut *self;
- // If our buffer has some data, let's write it out!
- while self.pos < self.cap {
- let me = &mut *self;
- let i = ready!(Pin::new(&mut *me.writer).poll_write(cx, &me.buf[me.pos..me.cap]))?;
- if i == 0 {
- return Poll::Ready(Err(io::Error::new(
- io::ErrorKind::WriteZero,
- "write zero byte into writer",
- )));
- } else {
- self.pos += i;
- self.amt += i as u64;
- }
- }
-
- // If we've written all the data and we've seen EOF, flush out the
- // data and finish the transfer.
- if self.pos == self.cap && self.read_done {
- let me = &mut *self;
- ready!(Pin::new(&mut *me.writer).poll_flush(cx))?;
- return Poll::Ready(Ok(self.amt));
- }
- }
+ me.buf
+ .poll_copy(cx, Pin::new(&mut *me.reader), Pin::new(&mut *me.writer))
}
}
diff --git a/src/io/util/copy_bidirectional.rs b/src/io/util/copy_bidirectional.rs
new file mode 100644
index 0000000..cc43f0f
--- /dev/null
+++ b/src/io/util/copy_bidirectional.rs
@@ -0,0 +1,119 @@
+use super::copy::CopyBuffer;
+
+use crate::io::{AsyncRead, AsyncWrite};
+
+use std::future::Future;
+use std::io;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+enum TransferState {
+ Running(CopyBuffer),
+ ShuttingDown(u64),
+ Done(u64),
+}
+
+struct CopyBidirectional<'a, A: ?Sized, B: ?Sized> {
+ a: &'a mut A,
+ b: &'a mut B,
+ a_to_b: TransferState,
+ b_to_a: TransferState,
+}
+
+fn transfer_one_direction<A, B>(
+ cx: &mut Context<'_>,
+ state: &mut TransferState,
+ r: &mut A,
+ w: &mut B,
+) -> Poll<io::Result<u64>>
+where
+ A: AsyncRead + AsyncWrite + Unpin + ?Sized,
+ B: AsyncRead + AsyncWrite + Unpin + ?Sized,
+{
+ let mut r = Pin::new(r);
+ let mut w = Pin::new(w);
+
+ loop {
+ match state {
+ TransferState::Running(buf) => {
+ let count = ready!(buf.poll_copy(cx, r.as_mut(), w.as_mut()))?;
+ *state = TransferState::ShuttingDown(count);
+ }
+ TransferState::ShuttingDown(count) => {
+ ready!(w.as_mut().poll_shutdown(cx))?;
+
+ *state = TransferState::Done(*count);
+ }
+ TransferState::Done(count) => return Poll::Ready(Ok(*count)),
+ }
+ }
+}
+
+impl<'a, A, B> Future for CopyBidirectional<'a, A, B>
+where
+ A: AsyncRead + AsyncWrite + Unpin + ?Sized,
+ B: AsyncRead + AsyncWrite + Unpin + ?Sized,
+{
+ type Output = io::Result<(u64, u64)>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ // Unpack self into mut refs to each field to avoid borrow check issues.
+ let CopyBidirectional {
+ a,
+ b,
+ a_to_b,
+ b_to_a,
+ } = &mut *self;
+
+ let a_to_b = transfer_one_direction(cx, a_to_b, &mut *a, &mut *b)?;
+ let b_to_a = transfer_one_direction(cx, b_to_a, &mut *b, &mut *a)?;
+
+ // It is not a problem if ready! returns early because transfer_one_direction for the
+ // other direction will keep returning TransferState::Done(count) in future calls to poll
+ let a_to_b = ready!(a_to_b);
+ let b_to_a = ready!(b_to_a);
+
+ Poll::Ready(Ok((a_to_b, b_to_a)))
+ }
+}
+
+/// Copies data in both directions between `a` and `b`.
+///
+/// This function returns a future that will read from both streams,
+/// writing any data read to the opposing stream.
+/// This happens in both directions concurrently.
+///
+/// If an EOF is observed on one stream, [`shutdown()`] will be invoked on
+/// the other, and reading from that stream will stop. Copying of data in
+/// the other direction will continue.
+///
+/// The future will complete successfully once both directions of communication has been shut down.
+/// A direction is shut down when the reader reports EOF,
+/// at which point [`shutdown()`] is called on the corresponding writer. When finished,
+/// it will return a tuple of the number of bytes copied from a to b
+/// and the number of bytes copied from b to a, in that order.
+///
+/// [`shutdown()`]: crate::io::AsyncWriteExt::shutdown
+///
+/// # Errors
+///
+/// The future will immediately return an error if any IO operation on `a`
+/// or `b` returns an error. Some data read from either stream may be lost (not
+/// written to the other stream) in this case.
+///
+/// # Return value
+///
+/// Returns a tuple of bytes copied `a` to `b` and bytes copied `b` to `a`.
+pub async fn copy_bidirectional<A, B>(a: &mut A, b: &mut B) -> Result<(u64, u64), std::io::Error>
+where
+ A: AsyncRead + AsyncWrite + Unpin + ?Sized,
+ B: AsyncRead + AsyncWrite + Unpin + ?Sized,
+{
+ CopyBidirectional {
+ a,
+ b,
+ a_to_b: TransferState::Running(CopyBuffer::new()),
+ b_to_a: TransferState::Running(CopyBuffer::new()),
+ }
+ .await
+}
diff --git a/src/io/util/mod.rs b/src/io/util/mod.rs
index e06e7e2..ab38664 100644
--- a/src/io/util/mod.rs
+++ b/src/io/util/mod.rs
@@ -27,6 +27,9 @@ cfg_io_util! {
mod copy;
pub use copy::copy;
+ mod copy_bidirectional;
+ pub use copy_bidirectional::copy_bidirectional;
+
mod copy_buf;
pub use copy_buf::copy_buf;
@@ -71,13 +74,14 @@ cfg_io_util! {
pub use take::Take;
mod write;
+ mod write_vectored;
mod write_all;
mod write_buf;
mod write_int;
// used by `BufReader` and `BufWriter`
- // https://github.com/rust-lang/rust/blob/master/src/libstd/sys_common/io.rs#L1
+ // https://github.com/rust-lang/rust/blob/master/library/std/src/sys_common/io.rs#L1
const DEFAULT_BUF_SIZE: usize = 8 * 1024;
}
diff --git a/src/io/util/write_vectored.rs b/src/io/util/write_vectored.rs
new file mode 100644
index 0000000..be40322
--- /dev/null
+++ b/src/io/util/write_vectored.rs
@@ -0,0 +1,47 @@
+use crate::io::AsyncWrite;
+
+use pin_project_lite::pin_project;
+use std::io;
+use std::marker::PhantomPinned;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+use std::{future::Future, io::IoSlice};
+
+pin_project! {
+ /// A future to write a slice of buffers to an `AsyncWrite`.
+ #[derive(Debug)]
+ #[must_use = "futures do nothing unless you `.await` or poll them"]
+ pub struct WriteVectored<'a, 'b, W: ?Sized> {
+ writer: &'a mut W,
+ bufs: &'a [IoSlice<'b>],
+ // Make this future `!Unpin` for compatibility with async trait methods.
+ #[pin]
+ _pin: PhantomPinned,
+ }
+}
+
+pub(crate) fn write_vectored<'a, 'b, W>(
+ writer: &'a mut W,
+ bufs: &'a [IoSlice<'b>],
+) -> WriteVectored<'a, 'b, W>
+where
+ W: AsyncWrite + Unpin + ?Sized,
+{
+ WriteVectored {
+ writer,
+ bufs,
+ _pin: PhantomPinned,
+ }
+}
+
+impl<W> Future for WriteVectored<'_, '_, W>
+where
+ W: AsyncWrite + Unpin + ?Sized,
+{
+ type Output = io::Result<usize>;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
+ let me = self.project();
+ Pin::new(&mut *me.writer).poll_write_vectored(cx, me.bufs)
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
index 46f1b84..15aeced 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,4 +1,3 @@
-#![doc(html_root_url = "https://docs.rs/tokio/1.2.0")]
#![allow(
clippy::cognitive_complexity,
clippy::large_enum_variant,
@@ -77,7 +76,7 @@
//!
//! ### Authoring libraries
//!
-//! As a library author your goal should be to provide the lighest weight crate
+//! As a library author your goal should be to provide the lightest weight crate
//! that is based on Tokio. To achieve this you should ensure that you only enable
//! the features you need. This allows users to pick up your crate without having
//! to enable unnecessary features.
@@ -411,7 +410,7 @@ mod util;
/// # Why was `Stream` not included in Tokio 1.0?
///
/// Originally, we had planned to ship Tokio 1.0 with a stable `Stream` type
-/// but unfortunetly the [RFC] had not been merged in time for `Stream` to
+/// but unfortunately the [RFC] had not been merged in time for `Stream` to
/// reach `std` on a stable compiler in time for the 1.0 release of Tokio. For
/// this reason, the team has decided to move all `Stream` based utilities to
/// the [`tokio-stream`] crate. While this is not ideal, once `Stream` has made
diff --git a/src/macros/cfg.rs b/src/macros/cfg.rs
index 9ae098f..3442612 100644
--- a/src/macros/cfg.rs
+++ b/src/macros/cfg.rs
@@ -357,3 +357,21 @@ macro_rules! cfg_coop {
)*
}
}
+
+macro_rules! cfg_not_coop {
+ ($($item:item)*) => {
+ $(
+ #[cfg(not(any(
+ feature = "fs",
+ feature = "io-std",
+ feature = "net",
+ feature = "process",
+ feature = "rt",
+ feature = "signal",
+ feature = "sync",
+ feature = "time",
+ )))]
+ $item
+ )*
+ }
+}
diff --git a/src/macros/pin.rs b/src/macros/pin.rs
index a32187e..7af9ce7 100644
--- a/src/macros/pin.rs
+++ b/src/macros/pin.rs
@@ -44,7 +44,7 @@
/// Stream + Unpin`.
///
/// [`Future`]: trait@std::future::Future
-/// [`Box::pin`]: #
+/// [`Box::pin`]: std::boxed::Box::pin
///
/// # Usage
///
diff --git a/src/macros/select.rs b/src/macros/select.rs
index aa29105..3ba16b6 100644
--- a/src/macros/select.rs
+++ b/src/macros/select.rs
@@ -14,7 +14,7 @@
/// branch, which evaluates if none of the other branches match their patterns:
///
/// ```text
-/// else <expression>
+/// else => <expression>
/// ```
///
/// The macro aggregates all `<async expression>` expressions and runs them
@@ -129,8 +129,24 @@
///
/// ### Fairness
///
-/// `select!` randomly picks a branch to check first. This provides some level
-/// of fairness when calling `select!` in a loop with branches that are always
+/// By default, `select!` randomly picks a branch to check first. This provides
+/// some level of fairness when calling `select!` in a loop with branches that
+/// are always ready.
+///
+/// This behavior can be overridden by adding `biased;` to the beginning of the
+/// macro usage. See the examples for details. This will cause `select` to poll
+/// the futures in the order they appear from top to bottom. There are a few
+/// reasons you may want this:
+///
+/// - The random number generation of `tokio::select!` has a non-zero CPU cost
+/// - Your futures may interact in a way where known polling order is significant
+///
+/// But there is an important caveat to this mode. It becomes your responsibility
+/// to ensure that the polling order of your futures is fair. If for example you
+/// are selecting between a stream and a shutdown future, and the stream has a
+/// huge volume of messages and zero or nearly zero time between them, you should
+/// place the shutdown future earlier in the `select!` list to ensure that it is
+/// always polled, and will not be ignored due to the stream being constantly
/// ready.
///
/// # Panics
@@ -283,6 +299,45 @@
/// assert_eq!(res.1, "second");
/// }
/// ```
+///
+/// Using the `biased;` mode to control polling order.
+///
+/// ```
+/// #[tokio::main]
+/// async fn main() {
+/// let mut count = 0u8;
+///
+/// loop {
+/// tokio::select! {
+/// // If you run this example without `biased;`, the polling order is
+/// // psuedo-random, and the assertions on the value of count will
+/// // (probably) fail.
+/// biased;
+///
+/// _ = async {}, if count < 1 => {
+/// count += 1;
+/// assert_eq!(count, 1);
+/// }
+/// _ = async {}, if count < 2 => {
+/// count += 1;
+/// assert_eq!(count, 2);
+/// }
+/// _ = async {}, if count < 3 => {
+/// count += 1;
+/// assert_eq!(count, 3);
+/// }
+/// _ = async {}, if count < 4 => {
+/// count += 1;
+/// assert_eq!(count, 4);
+/// }
+///
+/// else => {
+/// break;
+/// }
+/// };
+/// }
+/// }
+/// ```
#[macro_export]
#[cfg_attr(docsrs, doc(cfg(feature = "macros")))]
macro_rules! select {
@@ -300,6 +355,10 @@ macro_rules! select {
// All input is normalized, now transform.
(@ {
+ // The index of the future to poll first (in bias mode), or the RNG
+ // expression to use to pick a future to poll first.
+ start=$start:expr;
+
// One `_` for each branch in the `select!` macro. Passing this to
// `count!` converts $skip to an integer.
( $($count:tt)* )
@@ -357,9 +416,11 @@ macro_rules! select {
// disabled.
let mut is_pending = false;
- // Randomly generate a starting point. This makes `select!` a
- // bit more fair and avoids always polling the first future.
- let start = $crate::macros::support::thread_rng_n(BRANCHES);
+ // Choose a starting index to begin polling the futures at. In
+ // practice, this will either be a psuedo-randomly generrated
+ // number by default, or the constant 0 if `biased;` is
+ // supplied.
+ let start = $start;
for i in 0..BRANCHES {
let branch;
@@ -444,42 +505,48 @@ macro_rules! select {
// These rules match a single `select!` branch and normalize it for
// processing by the first rule.
- (@ { $($t:tt)* } ) => {
+ (@ { start=$start:expr; $($t:tt)* } ) => {
// No `else` branch
- $crate::select!(@{ $($t)*; panic!("all branches are disabled and there is no else branch") })
+ $crate::select!(@{ start=$start; $($t)*; panic!("all branches are disabled and there is no else branch") })
};
- (@ { $($t:tt)* } else => $else:expr $(,)?) => {
- $crate::select!(@{ $($t)*; $else })
+ (@ { start=$start:expr; $($t:tt)* } else => $else:expr $(,)?) => {
+ $crate::select!(@{ start=$start; $($t)*; $else })
};
- (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block, $($r:tt)* ) => {
- $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*)
+ (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block, $($r:tt)* ) => {
+ $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*)
};
- (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block, $($r:tt)* ) => {
- $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*)
+ (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block, $($r:tt)* ) => {
+ $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*)
};
- (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block $($r:tt)* ) => {
- $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*)
+ (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:block $($r:tt)* ) => {
+ $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*)
};
- (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block $($r:tt)* ) => {
- $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*)
+ (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:block $($r:tt)* ) => {
+ $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*)
};
- (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr ) => {
- $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, })
+ (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr ) => {
+ $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, })
};
- (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr ) => {
- $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, })
+ (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr ) => {
+ $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, })
};
- (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr, $($r:tt)* ) => {
- $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*)
+ (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr, if $c:expr => $h:expr, $($r:tt)* ) => {
+ $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if $c => $h, } $($r)*)
};
- (@ { ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr, $($r:tt)* ) => {
- $crate::select!(@{ ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*)
+ (@ { start=$start:expr; ( $($s:tt)* ) $($t:tt)* } $p:pat = $f:expr => $h:expr, $($r:tt)* ) => {
+ $crate::select!(@{ start=$start; ($($s)* _) $($t)* ($($s)*) $p = $f, if true => $h, } $($r)*)
};
// ===== Entry point =====
+ (biased; $p:pat = $($t:tt)* ) => {
+ $crate::select!(@{ start=0; () } $p = $($t)*)
+ };
+
( $p:pat = $($t:tt)* ) => {
- $crate::select!(@{ () } $p = $($t)*)
+ // Randomly generate a starting point. This makes `select!` a bit more
+ // fair and avoids always polling the first future.
+ $crate::select!(@{ start={ $crate::macros::support::thread_rng_n(BRANCHES) }; () } $p = $($t)*)
};
() => {
compile_error!("select! requires at least one branch.")
diff --git a/src/net/tcp/listener.rs b/src/net/tcp/listener.rs
index 1ff0949..5c093bb 100644
--- a/src/net/tcp/listener.rs
+++ b/src/net/tcp/listener.rs
@@ -192,7 +192,6 @@ impl TcpListener {
/// backing event loop. This allows configuration of options like
/// `SO_REUSEPORT`, binding to multiple addresses, etc.
///
- ///
/// # Examples
///
/// ```rust,no_run
@@ -221,6 +220,48 @@ impl TcpListener {
Ok(TcpListener { io })
}
+ /// Turn a [`tokio::net::TcpListener`] into a [`std::net::TcpListener`].
+ ///
+ /// The returned [`std::net::TcpListener`] will have nonblocking mode set as
+ /// `true`. Use [`set_nonblocking`] to change the blocking mode if needed.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,no_run
+ /// use std::error::Error;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn Error>> {
+ /// let tokio_listener = tokio::net::TcpListener::bind("127.0.0.1:0").await?;
+ /// let std_listener = tokio_listener.into_std()?;
+ /// std_listener.set_nonblocking(false)?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`tokio::net::TcpListener`]: TcpListener
+ /// [`std::net::TcpListener`]: std::net::TcpListener
+ /// [`set_nonblocking`]: fn@std::net::TcpListener::set_nonblocking
+ pub fn into_std(self) -> io::Result<std::net::TcpListener> {
+ #[cfg(unix)]
+ {
+ use std::os::unix::io::{FromRawFd, IntoRawFd};
+ self.io
+ .into_inner()
+ .map(|io| io.into_raw_fd())
+ .map(|raw_fd| unsafe { std::net::TcpListener::from_raw_fd(raw_fd) })
+ }
+
+ #[cfg(windows)]
+ {
+ use std::os::windows::io::{FromRawSocket, IntoRawSocket};
+ self.io
+ .into_inner()
+ .map(|io| io.into_raw_socket())
+ .map(|raw_socket| unsafe { std::net::TcpListener::from_raw_socket(raw_socket) })
+ }
+ }
+
pub(crate) fn new(listener: mio::net::TcpListener) -> io::Result<TcpListener> {
let io = PollEvented::new(listener)?;
Ok(TcpListener { io })
diff --git a/src/net/tcp/socket.rs b/src/net/tcp/socket.rs
index b10898b..4bcbe3f 100644
--- a/src/net/tcp/socket.rs
+++ b/src/net/tcp/socket.rs
@@ -5,7 +5,7 @@ use std::io;
use std::net::SocketAddr;
#[cfg(unix)]
-use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
#[cfg(windows)]
use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket, RawSocket};
@@ -448,7 +448,7 @@ impl TcpSocket {
/// `backlog` defines the maximum number of pending connections are queued
/// by the operating system at any given time. Connection are removed from
/// the queue with [`TcpListener::accept`]. When the queue is full, the
- /// operationg-system will start rejecting connections.
+ /// operating-system will start rejecting connections.
///
/// [`TcpListener::accept`]: TcpListener::accept
///
@@ -511,6 +511,13 @@ impl FromRawFd for TcpSocket {
}
}
+#[cfg(unix)]
+impl IntoRawFd for TcpSocket {
+ fn into_raw_fd(self) -> RawFd {
+ self.inner.into_raw_fd()
+ }
+}
+
#[cfg(windows)]
impl IntoRawSocket for TcpSocket {
fn into_raw_socket(self) -> RawSocket {
diff --git a/src/net/tcp/stream.rs b/src/net/tcp/stream.rs
index 91e357f..e231e5d 100644
--- a/src/net/tcp/stream.rs
+++ b/src/net/tcp/stream.rs
@@ -8,11 +8,6 @@ use std::convert::TryFrom;
use std::fmt;
use std::io;
use std::net::{Shutdown, SocketAddr};
-#[cfg(windows)]
-use std::os::windows::io::{AsRawSocket, FromRawSocket, IntoRawSocket};
-
-#[cfg(unix)]
-use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
@@ -199,7 +194,7 @@ impl TcpStream {
/// Turn a [`tokio::net::TcpStream`] into a [`std::net::TcpStream`].
///
- /// The returned [`std::net::TcpStream`] will have `nonblocking mode` set as `true`.
+ /// The returned [`std::net::TcpStream`] will have nonblocking mode set as `true`.
/// Use [`set_nonblocking`] to change the blocking mode if needed.
///
/// # Examples
@@ -234,6 +229,7 @@ impl TcpStream {
pub fn into_std(self) -> io::Result<std::net::TcpStream> {
#[cfg(unix)]
{
+ use std::os::unix::io::{FromRawFd, IntoRawFd};
self.io
.into_inner()
.map(|io| io.into_raw_fd())
@@ -242,6 +238,7 @@ impl TcpStream {
#[cfg(windows)]
{
+ use std::os::windows::io::{FromRawSocket, IntoRawSocket};
self.io
.into_inner()
.map(|io| io.into_raw_socket())
@@ -932,11 +929,13 @@ impl TcpStream {
fn to_mio(&self) -> mio::net::TcpSocket {
#[cfg(windows)]
{
+ use std::os::windows::io::{AsRawSocket, FromRawSocket};
unsafe { mio::net::TcpSocket::from_raw_socket(self.as_raw_socket()) }
}
#[cfg(unix)]
{
+ use std::os::unix::io::{AsRawFd, FromRawFd};
unsafe { mio::net::TcpSocket::from_raw_fd(self.as_raw_fd()) }
}
}
diff --git a/src/net/udp.rs b/src/net/udp.rs
index 86b4fe9..6e63355 100644
--- a/src/net/udp.rs
+++ b/src/net/udp.rs
@@ -23,10 +23,12 @@ cfg_net! {
/// and [`recv`](`UdpSocket::recv`) to communicate only with that remote address
///
/// This type does not provide a `split` method, because this functionality
- /// can be achieved by wrapping the socket in an [`Arc`]. Note that you do
- /// not need a `Mutex` to share the `UdpSocket` — an `Arc<UdpSocket>` is
- /// enough. This is because all of the methods take `&self` instead of `&mut
- /// self`.
+ /// can be achieved by instead wrapping the socket in an [`Arc`]. Note that
+ /// you do not need a `Mutex` to share the `UdpSocket` — an `Arc<UdpSocket>`
+ /// is enough. This is because all of the methods take `&self` instead of
+ /// `&mut self`. Once you have wrapped it in an `Arc`, you can call
+ /// `.clone()` on the `Arc<UdpSocket>` to get multiple shared handles to the
+ /// same socket. An example of such usage can be found further down.
///
/// [`Arc`]: std::sync::Arc
///
@@ -209,6 +211,48 @@ impl UdpSocket {
UdpSocket::new(io)
}
+ /// Turn a [`tokio::net::UdpSocket`] into a [`std::net::UdpSocket`].
+ ///
+ /// The returned [`std::net::UdpSocket`] will have nonblocking mode set as
+ /// `true`. Use [`set_nonblocking`] to change the blocking mode if needed.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,no_run
+ /// use std::error::Error;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn Error>> {
+ /// let tokio_socket = tokio::net::UdpSocket::bind("127.0.0.1:0").await?;
+ /// let std_socket = tokio_socket.into_std()?;
+ /// std_socket.set_nonblocking(false)?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`tokio::net::UdpSocket`]: UdpSocket
+ /// [`std::net::UdpSocket`]: std::net::UdpSocket
+ /// [`set_nonblocking`]: fn@std::net::UdpSocket::set_nonblocking
+ pub fn into_std(self) -> io::Result<std::net::UdpSocket> {
+ #[cfg(unix)]
+ {
+ use std::os::unix::io::{FromRawFd, IntoRawFd};
+ self.io
+ .into_inner()
+ .map(|io| io.into_raw_fd())
+ .map(|raw_fd| unsafe { std::net::UdpSocket::from_raw_fd(raw_fd) })
+ }
+
+ #[cfg(windows)]
+ {
+ use std::os::windows::io::{FromRawSocket, IntoRawSocket};
+ self.io
+ .into_inner()
+ .map(|io| io.into_raw_socket())
+ .map(|raw_socket| unsafe { std::net::UdpSocket::from_raw_socket(raw_socket) })
+ }
+ }
+
/// Returns the local address that this socket is bound to.
///
/// # Example
diff --git a/src/net/unix/datagram/socket.rs b/src/net/unix/datagram/socket.rs
index 126a243..6bc5615 100644
--- a/src/net/unix/datagram/socket.rs
+++ b/src/net/unix/datagram/socket.rs
@@ -5,7 +5,7 @@ use std::convert::TryFrom;
use std::fmt;
use std::io;
use std::net::Shutdown;
-use std::os::unix::io::{AsRawFd, RawFd};
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::os::unix::net;
use std::path::Path;
use std::task::{Context, Poll};
@@ -376,6 +376,36 @@ impl UnixDatagram {
Ok(UnixDatagram { io })
}
+ /// Turn a [`tokio::net::UnixDatagram`] into a [`std::os::unix::net::UnixDatagram`].
+ ///
+ /// The returned [`std::os::unix::net::UnixDatagram`] will have nonblocking
+ /// mode set as `true`. Use [`set_nonblocking`] to change the blocking mode
+ /// if needed.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,no_run
+ /// use std::error::Error;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn Error>> {
+ /// let tokio_socket = tokio::net::UnixDatagram::bind("127.0.0.1:0")?;
+ /// let std_socket = tokio_socket.into_std()?;
+ /// std_socket.set_nonblocking(false)?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`tokio::net::UnixDatagram`]: UnixDatagram
+ /// [`std::os::unix::net::UnixDatagram`]: std::os::unix::net::UnixDatagram
+ /// [`set_nonblocking`]: fn@std::os::unix::net::UnixDatagram::set_nonblocking
+ pub fn into_std(self) -> io::Result<std::os::unix::net::UnixDatagram> {
+ self.io
+ .into_inner()
+ .map(|io| io.into_raw_fd())
+ .map(|raw_fd| unsafe { std::os::unix::net::UnixDatagram::from_raw_fd(raw_fd) })
+ }
+
fn new(socket: mio::net::UnixDatagram) -> io::Result<UnixDatagram> {
let io = PollEvented::new(socket)?;
Ok(UnixDatagram { io })
diff --git a/src/net/unix/listener.rs b/src/net/unix/listener.rs
index d1c063e..b5b05a6 100644
--- a/src/net/unix/listener.rs
+++ b/src/net/unix/listener.rs
@@ -4,7 +4,7 @@ use crate::net::unix::{SocketAddr, UnixStream};
use std::convert::TryFrom;
use std::fmt;
use std::io;
-use std::os::unix::io::{AsRawFd, RawFd};
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::os::unix::net;
use std::path::Path;
use std::task::{Context, Poll};
@@ -88,6 +88,35 @@ impl UnixListener {
Ok(UnixListener { io })
}
+ /// Turn a [`tokio::net::UnixListener`] into a [`std::os::unix::net::UnixListener`].
+ ///
+ /// The returned [`std::os::unix::net::UnixListener`] will have nonblocking mode
+ /// set as `true`. Use [`set_nonblocking`] to change the blocking mode if needed.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,no_run
+ /// use std::error::Error;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn Error>> {
+ /// let tokio_listener = tokio::net::UnixListener::bind("127.0.0.1:0")?;
+ /// let std_listener = tokio_listener.into_std()?;
+ /// std_listener.set_nonblocking(false)?;
+ /// Ok(())
+ /// }
+ /// ```
+ ///
+ /// [`tokio::net::UnixListener`]: UnixListener
+ /// [`std::os::unix::net::UnixListener`]: std::os::unix::net::UnixListener
+ /// [`set_nonblocking`]: fn@std::os::unix::net::UnixListener::set_nonblocking
+ pub fn into_std(self) -> io::Result<std::os::unix::net::UnixListener> {
+ self.io
+ .into_inner()
+ .map(|io| io.into_raw_fd())
+ .map(|raw_fd| unsafe { net::UnixListener::from_raw_fd(raw_fd) })
+ }
+
/// Returns the local socket address of this listener.
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.io.local_addr().map(SocketAddr)
diff --git a/src/net/unix/stream.rs b/src/net/unix/stream.rs
index a3e3487..d797aae 100644
--- a/src/net/unix/stream.rs
+++ b/src/net/unix/stream.rs
@@ -9,7 +9,7 @@ use std::convert::TryFrom;
use std::fmt;
use std::io::{self, Read, Write};
use std::net::Shutdown;
-use std::os::unix::io::{AsRawFd, RawFd};
+use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use std::os::unix::net;
use std::path::Path;
use std::pin::Pin;
@@ -508,6 +508,51 @@ impl UnixStream {
Ok(UnixStream { io })
}
+ /// Turn a [`tokio::net::UnixStream`] into a [`std::os::unix::net::UnixStream`].
+ ///
+ /// The returned [`std::os::unix::net::UnixStream`] will have nonblocking
+ /// mode set as `true`. Use [`set_nonblocking`] to change the blocking
+ /// mode if needed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::io::Read;
+ /// use tokio::net::UnixListener;
+ /// # use tokio::net::UnixStream;
+ /// # use tokio::io::AsyncWriteExt;
+ ///
+ /// #[tokio::main]
+ /// async fn main() -> Result<(), Box<dyn Error>> {
+ /// let dir = tempfile::tempdir().unwrap();
+ /// let bind_path = dir.path().join("bind_path");
+ ///
+ /// let mut data = [0u8; 12];
+ /// let listener = UnixListener::bind(&bind_path)?;
+ /// # let handle = tokio::spawn(async {
+ /// # let mut stream = UnixStream::connect(bind_path).await.unwrap();
+ /// # stream.write(b"Hello world!").await.unwrap();
+ /// # });
+ /// let (tokio_unix_stream, _) = listener.accept().await?;
+ /// let mut std_unix_stream = tokio_unix_stream.into_std()?;
+ /// # handle.await.expect("The task being joined has panicked");
+ /// std_unix_stream.set_nonblocking(false)?;
+ /// std_unix_stream.read_exact(&mut data)?;
+ /// # assert_eq!(b"Hello world!", &data);
+ /// Ok(())
+ /// }
+ /// ```
+ /// [`tokio::net::UnixStream`]: UnixStream
+ /// [`std::os::unix::net::UnixStream`]: std::os::unix::net::UnixStream
+ /// [`set_nonblocking`]: fn@std::os::unix::net::UnixStream::set_nonblocking
+ pub fn into_std(self) -> io::Result<std::os::unix::net::UnixStream> {
+ self.io
+ .into_inner()
+ .map(|io| io.into_raw_fd())
+ .map(|raw_fd| unsafe { std::os::unix::net::UnixStream::from_raw_fd(raw_fd) })
+ }
+
/// Creates an unnamed pair of connected sockets.
///
/// This function will create a pair of interconnected Unix sockets for
diff --git a/src/park/mod.rs b/src/park/mod.rs
index 5db26ce..edd9371 100644
--- a/src/park/mod.rs
+++ b/src/park/mod.rs
@@ -41,6 +41,7 @@ cfg_rt! {
#[cfg(any(feature = "rt", feature = "sync"))]
pub(crate) mod thread;
+use std::fmt::Debug;
use std::sync::Arc;
use std::time::Duration;
@@ -50,7 +51,7 @@ pub(crate) trait Park {
type Unpark: Unpark;
/// Error returned by `park`
- type Error;
+ type Error: Debug;
/// Gets a new `Unpark` handle associated with this `Park` instance.
fn unpark(&self) -> Self::Unpark;
diff --git a/src/process/mod.rs b/src/process/mod.rs
index 7180d51..00e39b0 100644
--- a/src/process/mod.rs
+++ b/src/process/mod.rs
@@ -479,7 +479,7 @@ impl Command {
/// Basic usage:
///
/// ```no_run
- /// use tokio::process::Command;;
+ /// use tokio::process::Command;
/// use std::process::Stdio;
///
/// let command = Command::new("ls")
@@ -503,7 +503,7 @@ impl Command {
/// Basic usage:
///
/// ```no_run
- /// use tokio::process::Command;;
+ /// use tokio::process::Command;
/// use std::process::{Stdio};
///
/// let command = Command::new("ls")
diff --git a/src/process/unix/driver.rs b/src/process/unix/driver.rs
index 9a16cad..110b484 100644
--- a/src/process/unix/driver.rs
+++ b/src/process/unix/driver.rs
@@ -6,8 +6,8 @@ use crate::park::Park;
use crate::process::unix::orphan::ReapOrphanQueue;
use crate::process::unix::GlobalOrphanQueue;
use crate::signal::unix::driver::Driver as SignalDriver;
-use crate::signal::unix::{signal_with_handle, InternalStream, Signal, SignalKind};
-use crate::sync::mpsc::error::TryRecvError;
+use crate::signal::unix::{signal_with_handle, SignalKind};
+use crate::sync::watch;
use std::io;
use std::time::Duration;
@@ -16,7 +16,7 @@ use std::time::Duration;
#[derive(Debug)]
pub(crate) struct Driver {
park: SignalDriver,
- inner: CoreDriver<Signal, GlobalOrphanQueue>,
+ inner: CoreDriver<watch::Receiver<()>, GlobalOrphanQueue>,
}
#[derive(Debug)]
@@ -25,27 +25,25 @@ struct CoreDriver<S, Q> {
orphan_queue: Q,
}
+trait HasChanged {
+ fn has_changed(&mut self) -> bool;
+}
+
+impl<T> HasChanged for watch::Receiver<T> {
+ fn has_changed(&mut self) -> bool {
+ self.try_has_changed().and_then(Result::ok).is_some()
+ }
+}
+
// ===== impl CoreDriver =====
impl<S, Q> CoreDriver<S, Q>
where
- S: InternalStream,
+ S: HasChanged,
Q: ReapOrphanQueue,
{
- fn got_signal(&mut self) -> bool {
- match self.sigchild.try_recv() {
- Ok(()) => true,
- Err(TryRecvError::Empty) => false,
- Err(TryRecvError::Closed) => panic!("signal was deregistered"),
- }
- }
-
fn process(&mut self) {
- if self.got_signal() {
- // Drain all notifications which may have been buffered
- // so we can try to reap all orphans in one batch
- while self.got_signal() {}
-
+ if self.sigchild.has_changed() {
self.orphan_queue.reap_orphans();
}
}
@@ -97,8 +95,6 @@ impl Park for Driver {
mod test {
use super::*;
use crate::process::unix::orphan::test::MockQueue;
- use crate::sync::mpsc::error::TryRecvError;
- use std::task::{Context, Poll};
struct MockStream {
total_try_recv: usize,
@@ -114,17 +110,10 @@ mod test {
}
}
- impl InternalStream for MockStream {
- fn poll_recv(&mut self, _cx: &mut Context<'_>) -> Poll<Option<()>> {
- unimplemented!();
- }
-
- fn try_recv(&mut self) -> Result<(), TryRecvError> {
+ impl HasChanged for MockStream {
+ fn has_changed(&mut self) -> bool {
self.total_try_recv += 1;
- match self.values.remove(0) {
- Some(()) => Ok(()),
- None => Err(TryRecvError::Empty),
- }
+ self.values.remove(0).is_some()
}
}
@@ -140,17 +129,4 @@ mod test {
assert_eq!(1, driver.sigchild.total_try_recv);
assert_eq!(0, driver.orphan_queue.total_reaps.get());
}
-
- #[test]
- fn coalesce_signals_before_reaping() {
- let mut driver = CoreDriver {
- sigchild: MockStream::new(vec![Some(()), Some(()), None]),
- orphan_queue: MockQueue::<()>::new(),
- };
-
- driver.process();
-
- assert_eq!(3, driver.sigchild.total_try_recv);
- assert_eq!(1, driver.orphan_queue.total_reaps.get());
- }
}
diff --git a/src/process/unix/reap.rs b/src/process/unix/reap.rs
index de483c4..5dc95e5 100644
--- a/src/process/unix/reap.rs
+++ b/src/process/unix/reap.rs
@@ -15,7 +15,7 @@ use std::task::Poll;
#[derive(Debug)]
pub(crate) struct Reaper<W, Q, S>
where
- W: Wait + Unpin,
+ W: Wait,
Q: OrphanQueue<W>,
{
inner: Option<W>,
@@ -25,7 +25,7 @@ where
impl<W, Q, S> Deref for Reaper<W, Q, S>
where
- W: Wait + Unpin,
+ W: Wait,
Q: OrphanQueue<W>,
{
type Target = W;
@@ -37,7 +37,7 @@ where
impl<W, Q, S> Reaper<W, Q, S>
where
- W: Wait + Unpin,
+ W: Wait,
Q: OrphanQueue<W>,
{
pub(crate) fn new(inner: W, orphan_queue: Q, signal: S) -> Self {
@@ -61,7 +61,7 @@ impl<W, Q, S> Future for Reaper<W, Q, S>
where
W: Wait + Unpin,
Q: OrphanQueue<W> + Unpin,
- S: InternalStream,
+ S: InternalStream + Unpin,
{
type Output = io::Result<ExitStatus>;
@@ -106,7 +106,7 @@ where
impl<W, Q, S> Kill for Reaper<W, Q, S>
where
- W: Kill + Wait + Unpin,
+ W: Kill + Wait,
Q: OrphanQueue<W>,
{
fn kill(&mut self) -> io::Result<()> {
@@ -116,7 +116,7 @@ where
impl<W, Q, S> Drop for Reaper<W, Q, S>
where
- W: Wait + Unpin,
+ W: Wait,
Q: OrphanQueue<W>,
{
fn drop(&mut self) {
@@ -134,7 +134,6 @@ mod test {
use super::*;
use crate::process::unix::orphan::test::MockQueue;
- use crate::sync::mpsc::error::TryRecvError;
use futures::future::FutureExt;
use std::os::unix::process::ExitStatusExt;
use std::process::ExitStatus;
@@ -206,10 +205,6 @@ mod test {
None => Poll::Pending,
}
}
-
- fn try_recv(&mut self) -> Result<(), TryRecvError> {
- unimplemented!();
- }
}
#[test]
diff --git a/src/runtime/basic_scheduler.rs b/src/runtime/basic_scheduler.rs
index aeb0150..ffe0bca 100644
--- a/src/runtime/basic_scheduler.rs
+++ b/src/runtime/basic_scheduler.rs
@@ -1,4 +1,5 @@
use crate::future::poll_fn;
+use crate::loom::sync::atomic::AtomicBool;
use crate::loom::sync::Mutex;
use crate::park::{Park, Unpark};
use crate::runtime::task::{self, JoinHandle, Schedule, Task};
@@ -10,6 +11,8 @@ use std::cell::RefCell;
use std::collections::VecDeque;
use std::fmt;
use std::future::Future;
+use std::ptr::NonNull;
+use std::sync::atomic::Ordering::{AcqRel, Acquire, Release};
use std::sync::Arc;
use std::task::Poll::{Pending, Ready};
use std::time::Duration;
@@ -63,13 +66,32 @@ struct Tasks {
queue: VecDeque<task::Notified<Arc<Shared>>>,
}
+/// A remote scheduler entry.
+///
+/// These are filled in by remote threads sending instructions to the scheduler.
+enum Entry {
+ /// A remote thread wants to spawn a task.
+ Schedule(task::Notified<Arc<Shared>>),
+ /// A remote thread wants a task to be released by the scheduler. We only
+ /// have access to its header.
+ Release(NonNull<task::Header>),
+}
+
+// Safety: Used correctly, the task header is "thread safe". Ultimately the task
+// is owned by the current thread executor, for which this instruction is being
+// sent.
+unsafe impl Send for Entry {}
+
/// Scheduler state shared between threads.
struct Shared {
/// Remote run queue
- queue: Mutex<VecDeque<task::Notified<Arc<Shared>>>>,
+ queue: Mutex<VecDeque<Entry>>,
/// Unpark the blocked thread
unpark: Box<dyn Unpark>,
+
+ // indicates whether the blocked on thread was woken
+ woken: AtomicBool,
}
/// Thread-local context.
@@ -85,6 +107,9 @@ struct Context {
const INITIAL_CAPACITY: usize = 64;
/// Max number of tasks to poll per tick.
+#[cfg(loom)]
+const MAX_TASKS_PER_TICK: usize = 4;
+#[cfg(not(loom))]
const MAX_TASKS_PER_TICK: usize = 61;
/// How often to check the remote queue first.
@@ -101,6 +126,7 @@ impl<P: Park> BasicScheduler<P> {
shared: Arc::new(Shared {
queue: Mutex::new(VecDeque::with_capacity(INITIAL_CAPACITY)),
unpark: unpark as Box<dyn Unpark>,
+ woken: AtomicBool::new(false),
}),
};
@@ -177,12 +203,16 @@ impl<P: Park> Inner<P> {
let _enter = crate::runtime::enter(false);
let waker = scheduler.spawner.waker_ref();
let mut cx = std::task::Context::from_waker(&waker);
+ let mut polled = false;
pin!(future);
'outer: loop {
- if let Ready(v) = crate::coop::budget(|| future.as_mut().poll(&mut cx)) {
- return v;
+ if scheduler.spawner.was_woken() || !polled {
+ polled = true;
+ if let Ready(v) = crate::coop::budget(|| future.as_mut().poll(&mut cx)) {
+ return v;
+ }
}
for _ in 0..MAX_TASKS_PER_TICK {
@@ -190,29 +220,57 @@ impl<P: Park> Inner<P> {
let tick = scheduler.tick;
scheduler.tick = scheduler.tick.wrapping_add(1);
- let next = if tick % REMOTE_FIRST_INTERVAL == 0 {
- scheduler
- .spawner
- .pop()
- .or_else(|| context.tasks.borrow_mut().queue.pop_front())
+ let entry = if tick % REMOTE_FIRST_INTERVAL == 0 {
+ scheduler.spawner.pop().or_else(|| {
+ context
+ .tasks
+ .borrow_mut()
+ .queue
+ .pop_front()
+ .map(Entry::Schedule)
+ })
} else {
context
.tasks
.borrow_mut()
.queue
.pop_front()
+ .map(Entry::Schedule)
.or_else(|| scheduler.spawner.pop())
};
- match next {
- Some(task) => crate::coop::budget(|| task.run()),
+ let entry = match entry {
+ Some(entry) => entry,
None => {
// Park until the thread is signaled
- scheduler.park.park().ok().expect("failed to park");
+ scheduler.park.park().expect("failed to park");
// Try polling the `block_on` future next
continue 'outer;
}
+ };
+
+ match entry {
+ Entry::Schedule(task) => crate::coop::budget(|| task.run()),
+ Entry::Release(ptr) => {
+ // Safety: the task header is only legally provided
+ // internally in the header, so we know that it is a
+ // valid (or in particular *allocated*) header that
+ // is part of the linked list.
+ unsafe {
+ let removed = context.tasks.borrow_mut().owned.remove(ptr);
+
+ // TODO: This seems like it should hold, because
+ // there doesn't seem to be an avenue for anyone
+ // else to fiddle with the owned tasks
+ // collection *after* a remote thread has marked
+ // it as released, and at that point, the only
+ // location at which it can be removed is here
+ // or in the Drop implementation of the
+ // scheduler.
+ debug_assert!(removed.is_some());
+ }
+ }
}
}
@@ -221,7 +279,6 @@ impl<P: Park> Inner<P> {
scheduler
.park
.park_timeout(Duration::from_millis(0))
- .ok()
.expect("failed to park");
}
})
@@ -295,8 +352,16 @@ impl<P: Park> Drop for BasicScheduler<P> {
}
// Drain remote queue
- for task in scheduler.spawner.shared.queue.lock().drain(..) {
- task.shutdown();
+ for entry in scheduler.spawner.shared.queue.lock().drain(..) {
+ match entry {
+ Entry::Schedule(task) => {
+ task.shutdown();
+ }
+ Entry::Release(..) => {
+ // Do nothing, each entry in the linked list was *just*
+ // dropped by the scheduler above.
+ }
+ }
}
assert!(context.tasks.borrow().owned.is_empty());
@@ -324,13 +389,19 @@ impl Spawner {
handle
}
- fn pop(&self) -> Option<task::Notified<Arc<Shared>>> {
+ fn pop(&self) -> Option<Entry> {
self.shared.queue.lock().pop_front()
}
fn waker_ref(&self) -> WakerRef<'_> {
+ // clear the woken bit
+ self.shared.woken.swap(false, AcqRel);
waker_ref(&self.shared)
}
+
+ fn was_woken(&self) -> bool {
+ self.shared.woken.load(Acquire)
+ }
}
impl fmt::Debug for Spawner {
@@ -351,15 +422,19 @@ impl Schedule for Arc<Shared> {
}
fn release(&self, task: &Task<Self>) -> Option<Task<Self>> {
- use std::ptr::NonNull;
-
CURRENT.with(|maybe_cx| {
- let cx = maybe_cx.expect("scheduler context missing");
+ let ptr = NonNull::from(task.header());
- // safety: the task is inserted in the list in `bind`.
- unsafe {
- let ptr = NonNull::from(task.header());
- cx.tasks.borrow_mut().owned.remove(ptr)
+ if let Some(cx) = maybe_cx {
+ // safety: the task is inserted in the list in `bind`.
+ unsafe { cx.tasks.borrow_mut().owned.remove(ptr) }
+ } else {
+ self.queue.lock().push_back(Entry::Release(ptr));
+ self.unpark.unpark();
+ // Returning `None` here prevents the task plumbing from being
+ // freed. It is then up to the scheduler through the queue we
+ // just added to, or its Drop impl to free the task.
+ None
}
})
}
@@ -370,7 +445,7 @@ impl Schedule for Arc<Shared> {
cx.tasks.borrow_mut().queue.push_back(task);
}
_ => {
- self.queue.lock().push_back(task);
+ self.queue.lock().push_back(Entry::Schedule(task));
self.unpark.unpark();
}
});
@@ -384,6 +459,7 @@ impl Wake for Shared {
/// Wake by reference
fn wake_by_ref(arc_self: &Arc<Self>) {
+ arc_self.woken.store(true, Release);
arc_self.unpark.unpark();
}
}
diff --git a/src/runtime/builder.rs b/src/runtime/builder.rs
index e845192..0249266 100644
--- a/src/runtime/builder.rs
+++ b/src/runtime/builder.rs
@@ -86,6 +86,11 @@ impl Builder {
/// Returns a new builder with the current thread scheduler selected.
///
/// Configuration methods can be chained on the return value.
+ ///
+ /// To spawn non-`Send` tasks on the resulting runtime, combine it with a
+ /// [`LocalSet`].
+ ///
+ /// [`LocalSet`]: crate::task::LocalSet
pub fn new_current_thread() -> Builder {
Builder::new(Kind::CurrentThread)
}
@@ -162,8 +167,8 @@ impl Builder {
/// Sets the number of worker threads the `Runtime` will use.
///
- /// This should be a number between 0 and 32,768 though it is advised to
- /// keep this value on the smaller side.
+ /// This can be any number above 0 though it is advised to keep this value
+ /// on the smaller side.
///
/// # Default
///
@@ -215,19 +220,28 @@ impl Builder {
self
}
- /// Specifies limit for threads spawned by the Runtime used for blocking operations.
+ /// Specifies the limit for additional threads spawned by the Runtime.
///
- ///
- /// Similarly to the `worker_threads`, this number should be between 1 and 32,768.
+ /// These threads are used for blocking operations like tasks spawned
+ /// through [`spawn_blocking`]. Unlike the [`worker_threads`], they are not
+ /// always active and will exit if left idle for too long. You can change
+ /// this timeout duration with [`thread_keep_alive`].
///
/// The default value is 512.
///
- /// Otherwise as `worker_threads` are always active, it limits additional threads (e.g. for
- /// blocking annotations).
- ///
/// # Panic
///
/// This will panic if `val` is not larger than `0`.
+ ///
+ /// # Upgrading from 0.x
+ ///
+ /// In old versions `max_threads` limited both blocking and worker threads, but the
+ /// current `max_blocking_threads` does not include async worker threads in the count.
+ ///
+ /// [`spawn_blocking`]: fn@crate::task::spawn_blocking
+ /// [`worker_threads`]: Self::worker_threads
+ /// [`thread_keep_alive`]: Self::thread_keep_alive
+ #[cfg_attr(docsrs, doc(alias = "max_threads"))]
pub fn max_blocking_threads(&mut self, val: usize) -> &mut Self {
assert!(val > 0, "Max blocking threads cannot be set to 0");
self.max_blocking_threads = val;
diff --git a/src/runtime/context.rs b/src/runtime/context.rs
index 6e4a016..a727ed4 100644
--- a/src/runtime/context.rs
+++ b/src/runtime/context.rs
@@ -40,20 +40,14 @@ cfg_time! {
cfg_test_util! {
pub(crate) fn clock() -> Option<crate::runtime::driver::Clock> {
- CONTEXT.with(|ctx| match *ctx.borrow() {
- Some(ref ctx) => Some(ctx.clock.clone()),
- None => None,
- })
+ CONTEXT.with(|ctx| (*ctx.borrow()).as_ref().map(|ctx| ctx.clock.clone()))
}
}
}
cfg_rt! {
pub(crate) fn spawn_handle() -> Option<crate::runtime::Spawner> {
- CONTEXT.with(|ctx| match *ctx.borrow() {
- Some(ref ctx) => Some(ctx.spawner.clone()),
- None => None,
- })
+ CONTEXT.with(|ctx| (*ctx.borrow()).as_ref().map(|ctx| ctx.spawner.clone()))
}
}
diff --git a/src/runtime/handle.rs b/src/runtime/handle.rs
index 76b28f2..4f1b4c5 100644
--- a/src/runtime/handle.rs
+++ b/src/runtime/handle.rs
@@ -39,6 +39,7 @@ pub struct Handle {
///
/// [`Runtime::enter`]: fn@crate::runtime::Runtime::enter
#[derive(Debug)]
+#[must_use = "Creating and dropping a guard does nothing"]
pub struct EnterGuard<'a> {
handle: &'a Handle,
guard: context::EnterGuard,
@@ -201,6 +202,93 @@ impl Handle {
let _ = self.blocking_spawner.spawn(task, &self);
handle
}
+
+ /// Run a future to completion on this `Handle`'s associated `Runtime`.
+ ///
+ /// This runs the given future on the current thread, blocking until it is
+ /// complete, and yielding its resolved result. Any tasks or timers which
+ /// the future spawns internally will be executed on the runtime.
+ ///
+ /// When this is used on a `current_thread` runtime, only the
+ /// [`Runtime::block_on`] method can drive the IO and timer drivers, but the
+ /// `Handle::block_on` method cannot drive them. This means that, when using
+ /// this method on a current_thread runtime, anything that relies on IO or
+ /// timers will not work unless there is another thread currently calling
+ /// [`Runtime::block_on`] on the same runtime.
+ ///
+ /// # If the runtime has been shut down
+ ///
+ /// If the `Handle`'s associated `Runtime` has been shut down (through
+ /// [`Runtime::shutdown_background`], [`Runtime::shutdown_timeout`], or by
+ /// dropping it) and `Handle::block_on` is used it might return an error or
+ /// panic. Specifically IO resources will return an error and timers will
+ /// panic. Runtime independent futures will run as normal.
+ ///
+ /// # Panics
+ ///
+ /// This function panics if the provided future panics, if called within an
+ /// asynchronous execution context, or if a timer future is executed on a
+ /// runtime that has been shut down.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::runtime::Runtime;
+ ///
+ /// // Create the runtime
+ /// let rt = Runtime::new().unwrap();
+ ///
+ /// // Get a handle from this runtime
+ /// let handle = rt.handle();
+ ///
+ /// // Execute the future, blocking the current thread until completion
+ /// handle.block_on(async {
+ /// println!("hello");
+ /// });
+ /// ```
+ ///
+ /// Or using `Handle::current`:
+ ///
+ /// ```
+ /// use tokio::runtime::Handle;
+ ///
+ /// #[tokio::main]
+ /// async fn main () {
+ /// let handle = Handle::current();
+ /// std::thread::spawn(move || {
+ /// // Using Handle::block_on to run async code in the new thread.
+ /// handle.block_on(async {
+ /// println!("hello");
+ /// });
+ /// });
+ /// }
+ /// ```
+ ///
+ /// [`JoinError`]: struct@crate::task::JoinError
+ /// [`JoinHandle`]: struct@crate::task::JoinHandle
+ /// [`Runtime::block_on`]: fn@crate::runtime::Runtime::block_on
+ /// [`Runtime::shutdown_background`]: fn@crate::runtime::Runtime::shutdown_background
+ /// [`Runtime::shutdown_timeout`]: fn@crate::runtime::Runtime::shutdown_timeout
+ /// [`spawn_blocking`]: crate::task::spawn_blocking
+ /// [`tokio::fs`]: crate::fs
+ /// [`tokio::net`]: crate::net
+ /// [`tokio::time`]: crate::time
+ pub fn block_on<F: Future>(&self, future: F) -> F::Output {
+ // Enter the **runtime** context. This configures spawning, the current I/O driver, ...
+ let _rt_enter = self.enter();
+
+ // Enter a **blocking** context. This prevents blocking from a runtime.
+ let mut blocking_enter = crate::runtime::enter(true);
+
+ // Block on the future
+ blocking_enter
+ .block_on(future)
+ .expect("failed to park thread")
+ }
+
+ pub(crate) fn shutdown(mut self) {
+ self.spawner.shutdown();
+ }
}
/// Error returned by `try_current` when no Runtime has been started
diff --git a/src/runtime/mod.rs b/src/runtime/mod.rs
index b138a66..52532ec 100644
--- a/src/runtime/mod.rs
+++ b/src/runtime/mod.rs
@@ -250,7 +250,7 @@ cfg_rt! {
///
/// The Tokio runtime implements `Sync` and `Send` to allow you to wrap it
/// in a `Arc`. Most fn take `&self` to allow you to call them concurrently
- /// accross multiple threads.
+ /// across multiple threads.
///
/// Calls to `shutdown` and `shutdown_timeout` require exclusive ownership of
/// the runtime type and this can be achieved via `Arc::try_unwrap` when only
@@ -405,7 +405,7 @@ cfg_rt! {
/// Run a future to completion on the Tokio runtime. This is the
/// runtime's entry point.
///
- /// This runs the given future on the runtime, blocking until it is
+ /// This runs the given future on the current thread, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers
/// which the future spawns internally will be executed on the runtime.
///
@@ -526,7 +526,7 @@ cfg_rt! {
/// ```
pub fn shutdown_timeout(mut self, duration: Duration) {
// Wakeup and shutdown all the worker threads
- self.handle.spawner.shutdown();
+ self.handle.shutdown();
self.blocking_pool.shutdown(Some(duration));
}
diff --git a/src/runtime/queue.rs b/src/runtime/queue.rs
index 1c7bb23..6ea23c9 100644
--- a/src/runtime/queue.rs
+++ b/src/runtime/queue.rs
@@ -572,7 +572,7 @@ impl<T: 'static> Inject<T> {
let mut p = self.pointers.lock();
- // It is possible to hit null here if another thread poped the last
+ // It is possible to hit null here if another thread popped the last
// task between us checking `len` and acquiring the lock.
let task = p.head?;
diff --git a/src/runtime/tests/loom_basic_scheduler.rs b/src/runtime/tests/loom_basic_scheduler.rs
new file mode 100644
index 0000000..e6221d3
--- /dev/null
+++ b/src/runtime/tests/loom_basic_scheduler.rs
@@ -0,0 +1,82 @@
+use crate::loom::sync::atomic::AtomicUsize;
+use crate::loom::sync::Arc;
+use crate::loom::thread;
+use crate::runtime::{Builder, Runtime};
+use crate::sync::oneshot::{self, Receiver};
+use crate::task;
+use std::future::Future;
+use std::pin::Pin;
+use std::sync::atomic::Ordering::{Acquire, Release};
+use std::task::{Context, Poll};
+
+fn assert_at_most_num_polls(rt: Arc<Runtime>, at_most_polls: usize) {
+ let (tx, rx) = oneshot::channel();
+ let num_polls = Arc::new(AtomicUsize::new(0));
+ rt.spawn(async move {
+ for _ in 0..12 {
+ task::yield_now().await;
+ }
+ tx.send(()).unwrap();
+ });
+
+ rt.block_on(async {
+ BlockedFuture {
+ rx,
+ num_polls: num_polls.clone(),
+ }
+ .await;
+ });
+
+ let polls = num_polls.load(Acquire);
+ assert!(polls <= at_most_polls);
+}
+
+#[test]
+fn block_on_num_polls() {
+ loom::model(|| {
+ // we expect at most 3 number of polls because there are
+ // three points at which we poll the future. At any of these
+ // points it can be ready:
+ //
+ // - when we fail to steal the parker and we block on a
+ // notification that it is available.
+ //
+ // - when we steal the parker and we schedule the future
+ //
+ // - when the future is woken up and we have ran the max
+ // number of tasks for the current tick or there are no
+ // more tasks to run.
+ //
+ let at_most = 3;
+
+ let rt1 = Arc::new(Builder::new_current_thread().build().unwrap());
+ let rt2 = rt1.clone();
+ let rt3 = rt1.clone();
+
+ let th1 = thread::spawn(move || assert_at_most_num_polls(rt1, at_most));
+ let th2 = thread::spawn(move || assert_at_most_num_polls(rt2, at_most));
+ let th3 = thread::spawn(move || assert_at_most_num_polls(rt3, at_most));
+
+ th1.join().unwrap();
+ th2.join().unwrap();
+ th3.join().unwrap();
+ });
+}
+
+struct BlockedFuture {
+ rx: Receiver<()>,
+ num_polls: Arc<AtomicUsize>,
+}
+
+impl Future for BlockedFuture {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.num_polls.fetch_add(1, Release);
+
+ match Pin::new(&mut self.rx).poll(cx) {
+ Poll::Pending => Poll::Pending,
+ _ => Poll::Ready(()),
+ }
+ }
+}
diff --git a/src/runtime/tests/mod.rs b/src/runtime/tests/mod.rs
index 123a7e3..ebb48de 100644
--- a/src/runtime/tests/mod.rs
+++ b/src/runtime/tests/mod.rs
@@ -1,4 +1,5 @@
cfg_loom! {
+ mod loom_basic_scheduler;
mod loom_blocking;
mod loom_oneshot;
mod loom_pool;
diff --git a/src/runtime/thread_pool/worker.rs b/src/runtime/thread_pool/worker.rs
index dbd1aff..86d3f91 100644
--- a/src/runtime/thread_pool/worker.rs
+++ b/src/runtime/thread_pool/worker.rs
@@ -827,6 +827,6 @@ impl Shared {
}
fn ptr_eq(&self, other: &Shared) -> bool {
- self as *const _ == other as *const _
+ std::ptr::eq(self, other)
}
}
diff --git a/src/signal/mod.rs b/src/signal/mod.rs
index d347e6e..fe572f0 100644
--- a/src/signal/mod.rs
+++ b/src/signal/mod.rs
@@ -42,6 +42,8 @@
//! }
//! # }
//! ```
+use crate::sync::watch::Receiver;
+use std::task::{Context, Poll};
mod ctrl_c;
pub use ctrl_c::ctrl_c;
@@ -58,3 +60,41 @@ mod os {
pub mod unix;
pub mod windows;
+
+mod reusable_box;
+use self::reusable_box::ReusableBoxFuture;
+
+#[derive(Debug)]
+struct RxFuture {
+ inner: ReusableBoxFuture<Receiver<()>>,
+}
+
+async fn make_future(mut rx: Receiver<()>) -> Receiver<()> {
+ match rx.changed().await {
+ Ok(()) => rx,
+ Err(_) => panic!("signal sender went away"),
+ }
+}
+
+impl RxFuture {
+ fn new(rx: Receiver<()>) -> Self {
+ Self {
+ inner: ReusableBoxFuture::new(make_future(rx)),
+ }
+ }
+
+ async fn recv(&mut self) -> Option<()> {
+ use crate::future::poll_fn;
+ poll_fn(|cx| self.poll_recv(cx)).await
+ }
+
+ fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> {
+ match self.inner.poll(cx) {
+ Poll::Pending => Poll::Pending,
+ Poll::Ready(rx) => {
+ self.inner.set(make_future(rx));
+ Poll::Ready(Some(()))
+ }
+ }
+ }
+}
diff --git a/src/signal/registry.rs b/src/signal/registry.rs
index 55ee8c5..8b89108 100644
--- a/src/signal/registry.rs
+++ b/src/signal/registry.rs
@@ -2,22 +2,32 @@
use crate::signal::os::{OsExtraData, OsStorage};
-use crate::sync::mpsc::Sender;
+use crate::sync::watch;
use once_cell::sync::Lazy;
use std::ops;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, Ordering};
-use std::sync::Mutex;
pub(crate) type EventId = usize;
/// State for a specific event, whether a notification is pending delivery,
/// and what listeners are registered.
-#[derive(Default, Debug)]
+#[derive(Debug)]
pub(crate) struct EventInfo {
pending: AtomicBool,
- recipients: Mutex<Vec<Sender<()>>>,
+ tx: watch::Sender<()>,
+}
+
+impl Default for EventInfo {
+ fn default() -> Self {
+ let (tx, _rx) = watch::channel(());
+
+ Self {
+ pending: AtomicBool::new(false),
+ tx,
+ }
+ }
}
/// An interface for retrieving the `EventInfo` for a particular eventId.
@@ -67,14 +77,12 @@ impl<S> Registry<S> {
impl<S: Storage> Registry<S> {
/// Registers a new listener for `event_id`.
- fn register_listener(&self, event_id: EventId, listener: Sender<()>) {
+ fn register_listener(&self, event_id: EventId) -> watch::Receiver<()> {
self.storage
.event_info(event_id)
.unwrap_or_else(|| panic!("invalid event_id: {}", event_id))
- .recipients
- .lock()
- .unwrap()
- .push(listener);
+ .tx
+ .subscribe()
}
/// Marks `event_id` as having been delivered, without broadcasting it to
@@ -89,8 +97,6 @@ impl<S: Storage> Registry<S> {
///
/// Returns `true` if an event was delivered to at least one listener.
fn broadcast(&self) -> bool {
- use crate::sync::mpsc::error::TrySendError;
-
let mut did_notify = false;
self.storage.for_each(|event_info| {
// Any signal of this kind arrived since we checked last?
@@ -98,23 +104,9 @@ impl<S: Storage> Registry<S> {
return;
}
- let mut recipients = event_info.recipients.lock().unwrap();
-
- // Notify all waiters on this signal that the signal has been
- // received. If we can't push a message into the queue then we don't
- // worry about it as everything is coalesced anyway. If the channel
- // has gone away then we can remove that slot.
- for i in (0..recipients.len()).rev() {
- match recipients[i].try_send(()) {
- Ok(()) => did_notify = true,
- Err(TrySendError::Closed(..)) => {
- recipients.swap_remove(i);
- }
-
- // Channel is full, ignore the error since the
- // receiver has already been woken up
- Err(_) => {}
- }
+ // Ignore errors if there are no listeners
+ if event_info.tx.send(()).is_ok() {
+ did_notify = true;
}
});
@@ -137,8 +129,8 @@ impl ops::Deref for Globals {
impl Globals {
/// Registers a new listener for `event_id`.
- pub(crate) fn register_listener(&self, event_id: EventId, listener: Sender<()>) {
- self.registry.register_listener(event_id, listener);
+ pub(crate) fn register_listener(&self, event_id: EventId) -> watch::Receiver<()> {
+ self.registry.register_listener(event_id)
}
/// Marks `event_id` as having been delivered, without broadcasting it to
@@ -179,7 +171,7 @@ where
mod tests {
use super::*;
use crate::runtime::{self, Runtime};
- use crate::sync::{mpsc, oneshot};
+ use crate::sync::{oneshot, watch};
use futures::future;
@@ -193,13 +185,9 @@ mod tests {
EventInfo::default(),
]);
- let (first_tx, first_rx) = mpsc::channel(3);
- let (second_tx, second_rx) = mpsc::channel(3);
- let (third_tx, third_rx) = mpsc::channel(3);
-
- registry.register_listener(0, first_tx);
- registry.register_listener(1, second_tx);
- registry.register_listener(2, third_tx);
+ let first = registry.register_listener(0);
+ let second = registry.register_listener(1);
+ let third = registry.register_listener(2);
let (fire, wait) = oneshot::channel();
@@ -213,6 +201,9 @@ mod tests {
registry.record_event(1);
registry.broadcast();
+ // Yield so the previous broadcast can get received
+ crate::time::sleep(std::time::Duration::from_millis(10)).await;
+
// Send subsequent signal
registry.record_event(0);
registry.broadcast();
@@ -221,7 +212,7 @@ mod tests {
});
let _ = fire.send(());
- let all = future::join3(collect(first_rx), collect(second_rx), collect(third_rx));
+ let all = future::join3(collect(first), collect(second), collect(third));
let (first_results, second_results, third_results) = all.await;
assert_eq!(2, first_results.len());
@@ -235,8 +226,7 @@ mod tests {
fn register_panics_on_invalid_input() {
let registry = Registry::new(vec![EventInfo::default()]);
- let (tx, _) = mpsc::channel(1);
- registry.register_listener(1, tx);
+ registry.register_listener(1);
}
#[test]
@@ -246,73 +236,36 @@ mod tests {
}
#[test]
- fn broadcast_cleans_up_disconnected_listeners() {
- let rt = Runtime::new().unwrap();
-
- rt.block_on(async {
- let registry = Registry::new(vec![EventInfo::default()]);
-
- let (first_tx, first_rx) = mpsc::channel(1);
- let (second_tx, second_rx) = mpsc::channel(1);
- let (third_tx, third_rx) = mpsc::channel(1);
-
- registry.register_listener(0, first_tx);
- registry.register_listener(0, second_tx);
- registry.register_listener(0, third_tx);
-
- drop(first_rx);
- drop(second_rx);
-
- let (fire, wait) = oneshot::channel();
-
- crate::spawn(async {
- wait.await.expect("wait failed");
-
- registry.record_event(0);
- registry.broadcast();
-
- assert_eq!(1, registry.storage[0].recipients.lock().unwrap().len());
- drop(registry);
- });
-
- let _ = fire.send(());
- let results = collect(third_rx).await;
-
- assert_eq!(1, results.len());
- });
- }
-
- #[test]
fn broadcast_returns_if_at_least_one_event_fired() {
- let registry = Registry::new(vec![EventInfo::default()]);
+ let registry = Registry::new(vec![EventInfo::default(), EventInfo::default()]);
registry.record_event(0);
assert_eq!(false, registry.broadcast());
- let (first_tx, first_rx) = mpsc::channel(1);
- let (second_tx, second_rx) = mpsc::channel(1);
-
- registry.register_listener(0, first_tx);
- registry.register_listener(0, second_tx);
+ let first = registry.register_listener(0);
+ let second = registry.register_listener(1);
registry.record_event(0);
assert_eq!(true, registry.broadcast());
- drop(first_rx);
+ drop(first);
registry.record_event(0);
assert_eq!(false, registry.broadcast());
- drop(second_rx);
+ drop(second);
}
fn rt() -> Runtime {
- runtime::Builder::new_current_thread().build().unwrap()
+ runtime::Builder::new_current_thread()
+ .enable_time()
+ .build()
+ .unwrap()
}
- async fn collect(mut rx: crate::sync::mpsc::Receiver<()>) -> Vec<()> {
+ async fn collect(mut rx: watch::Receiver<()>) -> Vec<()> {
let mut ret = vec![];
- while let Some(v) = rx.recv().await {
+ while let Ok(v) = rx.changed().await {
ret.push(v);
}
diff --git a/src/signal/reusable_box.rs b/src/signal/reusable_box.rs
new file mode 100644
index 0000000..426ecb0
--- /dev/null
+++ b/src/signal/reusable_box.rs
@@ -0,0 +1,227 @@
+use std::alloc::Layout;
+use std::future::Future;
+use std::panic::AssertUnwindSafe;
+use std::pin::Pin;
+use std::ptr::{self, NonNull};
+use std::task::{Context, Poll};
+use std::{fmt, panic};
+
+/// A reusable `Pin<Box<dyn Future<Output = T> + Send>>`.
+///
+/// This type lets you replace the future stored in the box without
+/// reallocating when the size and alignment permits this.
+pub(crate) struct ReusableBoxFuture<T> {
+ boxed: NonNull<dyn Future<Output = T> + Send>,
+}
+
+impl<T> ReusableBoxFuture<T> {
+ /// Create a new `ReusableBoxFuture<T>` containing the provided future.
+ pub(crate) fn new<F>(future: F) -> Self
+ where
+ F: Future<Output = T> + Send + 'static,
+ {
+ let boxed: Box<dyn Future<Output = T> + Send> = Box::new(future);
+
+ let boxed = Box::into_raw(boxed);
+
+ // SAFETY: Box::into_raw does not return null pointers.
+ let boxed = unsafe { NonNull::new_unchecked(boxed) };
+
+ Self { boxed }
+ }
+
+ /// Replace the future currently stored in this box.
+ ///
+ /// This reallocates if and only if the layout of the provided future is
+ /// different from the layout of the currently stored future.
+ pub(crate) fn set<F>(&mut self, future: F)
+ where
+ F: Future<Output = T> + Send + 'static,
+ {
+ if let Err(future) = self.try_set(future) {
+ *self = Self::new(future);
+ }
+ }
+
+ /// Replace the future currently stored in this box.
+ ///
+ /// This function never reallocates, but returns an error if the provided
+ /// future has a different size or alignment from the currently stored
+ /// future.
+ pub(crate) fn try_set<F>(&mut self, future: F) -> Result<(), F>
+ where
+ F: Future<Output = T> + Send + 'static,
+ {
+ // SAFETY: The pointer is not dangling.
+ let self_layout = {
+ let dyn_future: &(dyn Future<Output = T> + Send) = unsafe { self.boxed.as_ref() };
+ Layout::for_value(dyn_future)
+ };
+
+ if Layout::new::<F>() == self_layout {
+ // SAFETY: We just checked that the layout of F is correct.
+ unsafe {
+ self.set_same_layout(future);
+ }
+
+ Ok(())
+ } else {
+ Err(future)
+ }
+ }
+
+ /// Set the current future.
+ ///
+ /// # Safety
+ ///
+ /// This function requires that the layout of the provided future is the
+ /// same as `self.layout`.
+ unsafe fn set_same_layout<F>(&mut self, future: F)
+ where
+ F: Future<Output = T> + Send + 'static,
+ {
+ // Drop the existing future, catching any panics.
+ let result = panic::catch_unwind(AssertUnwindSafe(|| {
+ ptr::drop_in_place(self.boxed.as_ptr());
+ }));
+
+ // Overwrite the future behind the pointer. This is safe because the
+ // allocation was allocated with the same size and alignment as the type F.
+ let self_ptr: *mut F = self.boxed.as_ptr() as *mut F;
+ ptr::write(self_ptr, future);
+
+ // Update the vtable of self.boxed. The pointer is not null because we
+ // just got it from self.boxed, which is not null.
+ self.boxed = NonNull::new_unchecked(self_ptr);
+
+ // If the old future's destructor panicked, resume unwinding.
+ match result {
+ Ok(()) => {}
+ Err(payload) => {
+ panic::resume_unwind(payload);
+ }
+ }
+ }
+
+ /// Get a pinned reference to the underlying future.
+ pub(crate) fn get_pin(&mut self) -> Pin<&mut (dyn Future<Output = T> + Send)> {
+ // SAFETY: The user of this box cannot move the box, and we do not move it
+ // either.
+ unsafe { Pin::new_unchecked(self.boxed.as_mut()) }
+ }
+
+ /// Poll the future stored inside this box.
+ pub(crate) fn poll(&mut self, cx: &mut Context<'_>) -> Poll<T> {
+ self.get_pin().poll(cx)
+ }
+}
+
+impl<T> Future for ReusableBoxFuture<T> {
+ type Output = T;
+
+ /// Poll the future stored inside this box.
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<T> {
+ Pin::into_inner(self).get_pin().poll(cx)
+ }
+}
+
+// The future stored inside ReusableBoxFuture<T> must be Send.
+unsafe impl<T> Send for ReusableBoxFuture<T> {}
+
+// The only method called on self.boxed is poll, which takes &mut self, so this
+// struct being Sync does not permit any invalid access to the Future, even if
+// the future is not Sync.
+unsafe impl<T> Sync for ReusableBoxFuture<T> {}
+
+// Just like a Pin<Box<dyn Future>> is always Unpin, so is this type.
+impl<T> Unpin for ReusableBoxFuture<T> {}
+
+impl<T> Drop for ReusableBoxFuture<T> {
+ fn drop(&mut self) {
+ unsafe {
+ drop(Box::from_raw(self.boxed.as_ptr()));
+ }
+ }
+}
+
+impl<T> fmt::Debug for ReusableBoxFuture<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ReusableBoxFuture").finish()
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::ReusableBoxFuture;
+ use futures::future::FutureExt;
+ use std::alloc::Layout;
+ use std::future::Future;
+ use std::pin::Pin;
+ use std::task::{Context, Poll};
+
+ #[test]
+ fn test_different_futures() {
+ let fut = async move { 10 };
+ // Not zero sized!
+ assert_eq!(Layout::for_value(&fut).size(), 1);
+
+ let mut b = ReusableBoxFuture::new(fut);
+
+ assert_eq!(b.get_pin().now_or_never(), Some(10));
+
+ b.try_set(async move { 20 })
+ .unwrap_or_else(|_| panic!("incorrect size"));
+
+ assert_eq!(b.get_pin().now_or_never(), Some(20));
+
+ b.try_set(async move { 30 })
+ .unwrap_or_else(|_| panic!("incorrect size"));
+
+ assert_eq!(b.get_pin().now_or_never(), Some(30));
+ }
+
+ #[test]
+ fn test_different_sizes() {
+ let fut1 = async move { 10 };
+ let val = [0u32; 1000];
+ let fut2 = async move { val[0] };
+ let fut3 = ZeroSizedFuture {};
+
+ assert_eq!(Layout::for_value(&fut1).size(), 1);
+ assert_eq!(Layout::for_value(&fut2).size(), 4004);
+ assert_eq!(Layout::for_value(&fut3).size(), 0);
+
+ let mut b = ReusableBoxFuture::new(fut1);
+ assert_eq!(b.get_pin().now_or_never(), Some(10));
+ b.set(fut2);
+ assert_eq!(b.get_pin().now_or_never(), Some(0));
+ b.set(fut3);
+ assert_eq!(b.get_pin().now_or_never(), Some(5));
+ }
+
+ struct ZeroSizedFuture {}
+ impl Future for ZeroSizedFuture {
+ type Output = u32;
+ fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<u32> {
+ Poll::Ready(5)
+ }
+ }
+
+ #[test]
+ fn test_zero_sized() {
+ let fut = ZeroSizedFuture {};
+ // Zero sized!
+ assert_eq!(Layout::for_value(&fut).size(), 0);
+
+ let mut b = ReusableBoxFuture::new(fut);
+
+ assert_eq!(b.get_pin().now_or_never(), Some(5));
+ assert_eq!(b.get_pin().now_or_never(), Some(5));
+
+ b.try_set(ZeroSizedFuture {})
+ .unwrap_or_else(|_| panic!("incorrect size"));
+
+ assert_eq!(b.get_pin().now_or_never(), Some(5));
+ assert_eq!(b.get_pin().now_or_never(), Some(5));
+ }
+}
diff --git a/src/signal/unix.rs b/src/signal/unix.rs
index 0de875a..cb1d1cc 100644
--- a/src/signal/unix.rs
+++ b/src/signal/unix.rs
@@ -6,8 +6,8 @@
#![cfg(unix)]
use crate::signal::registry::{globals, EventId, EventInfo, Globals, Init, Storage};
-use crate::sync::mpsc::error::TryRecvError;
-use crate::sync::mpsc::{channel, Receiver};
+use crate::signal::RxFuture;
+use crate::sync::watch;
use libc::c_int;
use mio::net::UnixStream;
@@ -222,7 +222,8 @@ fn action(globals: Pin<&'static Globals>, signal: c_int) {
///
/// This will register the signal handler if it hasn't already been registered,
/// returning any error along the way if that fails.
-fn signal_enable(signal: c_int, handle: Handle) -> io::Result<()> {
+fn signal_enable(signal: SignalKind, handle: Handle) -> io::Result<()> {
+ let signal = signal.0;
if signal < 0 || signal_hook_registry::FORBIDDEN.contains(&signal) {
return Err(Error::new(
ErrorKind::Other,
@@ -325,7 +326,7 @@ fn signal_enable(signal: c_int, handle: Handle) -> io::Result<()> {
#[must_use = "streams do nothing unless polled"]
#[derive(Debug)]
pub struct Signal {
- rx: Receiver<()>,
+ inner: RxFuture,
}
/// Creates a new stream which will receive notifications when the current
@@ -351,21 +352,21 @@ pub struct Signal {
/// * If the signal is one of
/// [`signal_hook::FORBIDDEN`](fn@signal_hook_registry::register#panics)
pub fn signal(kind: SignalKind) -> io::Result<Signal> {
- signal_with_handle(kind, Handle::current())
-}
+ let rx = signal_with_handle(kind, Handle::current())?;
-pub(crate) fn signal_with_handle(kind: SignalKind, handle: Handle) -> io::Result<Signal> {
- let signal = kind.0;
+ Ok(Signal {
+ inner: RxFuture::new(rx),
+ })
+}
+pub(crate) fn signal_with_handle(
+ kind: SignalKind,
+ handle: Handle,
+) -> io::Result<watch::Receiver<()>> {
// Turn the signal delivery on once we are ready for it
- signal_enable(signal, handle)?;
+ signal_enable(kind, handle)?;
- // One wakeup in a queue is enough, no need for us to buffer up any
- // more.
- let (tx, rx) = channel(1);
- globals().register_listener(signal as EventId, tx);
-
- Ok(Signal { rx })
+ Ok(globals().register_listener(kind.0 as EventId))
}
impl Signal {
@@ -393,8 +394,7 @@ impl Signal {
/// }
/// ```
pub async fn recv(&mut self) -> Option<()> {
- use crate::future::poll_fn;
- poll_fn(|cx| self.poll_recv(cx)).await
+ self.inner.recv().await
}
/// Polls to receive the next signal notification event, outside of an
@@ -432,29 +432,19 @@ impl Signal {
/// }
/// ```
pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> {
- self.rx.poll_recv(cx)
- }
-
- /// Try to receive a signal notification without blocking or registering a waker.
- pub(crate) fn try_recv(&mut self) -> Result<(), TryRecvError> {
- self.rx.try_recv()
+ self.inner.poll_recv(cx)
}
}
// Work around for abstracting streams internally
-pub(crate) trait InternalStream: Unpin {
+pub(crate) trait InternalStream {
fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>>;
- fn try_recv(&mut self) -> Result<(), TryRecvError>;
}
impl InternalStream for Signal {
fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> {
self.poll_recv(cx)
}
-
- fn try_recv(&mut self) -> Result<(), TryRecvError> {
- self.try_recv()
- }
}
pub(crate) fn ctrl_c() -> io::Result<Signal> {
@@ -467,11 +457,15 @@ mod tests {
#[test]
fn signal_enable_error_on_invalid_input() {
- signal_enable(-1, Handle::default()).unwrap_err();
+ signal_enable(SignalKind::from_raw(-1), Handle::default()).unwrap_err();
}
#[test]
fn signal_enable_error_on_forbidden_input() {
- signal_enable(signal_hook_registry::FORBIDDEN[0], Handle::default()).unwrap_err();
+ signal_enable(
+ SignalKind::from_raw(signal_hook_registry::FORBIDDEN[0]),
+ Handle::default(),
+ )
+ .unwrap_err();
}
}
diff --git a/src/signal/windows.rs b/src/signal/windows.rs
index 43af290..c231d62 100644
--- a/src/signal/windows.rs
+++ b/src/signal/windows.rs
@@ -8,7 +8,7 @@
#![cfg(windows)]
use crate::signal::registry::{globals, EventId, EventInfo, Init, Storage};
-use crate::sync::mpsc::{channel, Receiver};
+use crate::signal::RxFuture;
use std::convert::TryFrom;
use std::io;
@@ -76,22 +76,18 @@ impl Init for OsExtraData {
#[must_use = "streams do nothing unless polled"]
#[derive(Debug)]
pub(crate) struct Event {
- rx: Receiver<()>,
+ inner: RxFuture,
}
impl Event {
fn new(signum: DWORD) -> io::Result<Self> {
global_init()?;
- let (tx, rx) = channel(1);
- globals().register_listener(signum as EventId, tx);
+ let rx = globals().register_listener(signum as EventId);
- Ok(Event { rx })
- }
-
- pub(crate) async fn recv(&mut self) -> Option<()> {
- use crate::future::poll_fn;
- poll_fn(|cx| self.rx.poll_recv(cx)).await
+ Ok(Self {
+ inner: RxFuture::new(rx),
+ })
}
}
@@ -195,7 +191,7 @@ impl CtrlC {
/// }
/// ```
pub async fn recv(&mut self) -> Option<()> {
- self.inner.recv().await
+ self.inner.inner.recv().await
}
/// Polls to receive the next signal notification event, outside of an
@@ -227,7 +223,7 @@ impl CtrlC {
/// }
/// ```
pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> {
- self.inner.rx.poll_recv(cx)
+ self.inner.inner.poll_recv(cx)
}
}
@@ -267,7 +263,7 @@ impl CtrlBreak {
/// }
/// ```
pub async fn recv(&mut self) -> Option<()> {
- self.inner.recv().await
+ self.inner.inner.recv().await
}
/// Polls to receive the next signal notification event, outside of an
@@ -299,7 +295,7 @@ impl CtrlBreak {
/// }
/// ```
pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<()>> {
- self.inner.rx.poll_recv(cx)
+ self.inner.inner.poll_recv(cx)
}
}
diff --git a/src/sync/barrier.rs b/src/sync/barrier.rs
index fddb3a5..a8b291f 100644
--- a/src/sync/barrier.rs
+++ b/src/sync/barrier.rs
@@ -8,8 +8,6 @@ use std::sync::Mutex;
/// # #[tokio::main]
/// # async fn main() {
/// use tokio::sync::Barrier;
-///
-/// use futures::future::join_all;
/// use std::sync::Arc;
///
/// let mut handles = Vec::with_capacity(10);
@@ -18,17 +16,25 @@ use std::sync::Mutex;
/// let c = barrier.clone();
/// // The same messages will be printed together.
/// // You will NOT see any interleaving.
-/// handles.push(async move {
+/// handles.push(tokio::spawn(async move {
/// println!("before wait");
-/// let wr = c.wait().await;
+/// let wait_result = c.wait().await;
/// println!("after wait");
-/// wr
-/// });
+/// wait_result
+/// }));
+/// }
+///
+/// // Will not resolve until all "after wait" messages have been printed
+/// let mut num_leaders = 0;
+/// for handle in handles {
+/// let wait_result = handle.await.unwrap();
+/// if wait_result.is_leader() {
+/// num_leaders += 1;
+/// }
/// }
-/// // Will not resolve until all "before wait" messages have been printed
-/// let wrs = join_all(handles).await;
+///
/// // Exactly one barrier will resolve as the "leader"
-/// assert_eq!(wrs.into_iter().filter(|wr| wr.is_leader()).count(), 1);
+/// assert_eq!(num_leaders, 1);
/// # }
/// ```
#[derive(Debug)]
diff --git a/src/sync/batch_semaphore.rs b/src/sync/batch_semaphore.rs
index 803f2a1..a0bf5ef 100644
--- a/src/sync/batch_semaphore.rs
+++ b/src/sync/batch_semaphore.rs
@@ -271,6 +271,7 @@ impl Semaphore {
Self::MAX_PERMITS
);
let prev = self.permits.fetch_add(rem << Self::PERMIT_SHIFT, Release);
+ let prev = prev >> Self::PERMIT_SHIFT;
assert!(
prev + permits <= Self::MAX_PERMITS,
"number of added permits ({}) would overflow MAX_PERMITS ({})",
diff --git a/src/sync/mod.rs b/src/sync/mod.rs
index a953c66..d89a9dd 100644
--- a/src/sync/mod.rs
+++ b/src/sync/mod.rs
@@ -450,19 +450,26 @@ cfg_sync! {
pub use semaphore::{Semaphore, SemaphorePermit, OwnedSemaphorePermit};
mod rwlock;
- pub use rwlock::{RwLock, RwLockReadGuard, RwLockWriteGuard};
+ pub use rwlock::RwLock;
+ pub use rwlock::owned_read_guard::OwnedRwLockReadGuard;
+ pub use rwlock::owned_write_guard::OwnedRwLockWriteGuard;
+ pub use rwlock::owned_write_guard_mapped::OwnedRwLockMappedWriteGuard;
+ pub use rwlock::read_guard::RwLockReadGuard;
+ pub use rwlock::write_guard::RwLockWriteGuard;
+ pub use rwlock::write_guard_mapped::RwLockMappedWriteGuard;
mod task;
pub(crate) use task::AtomicWaker;
+ mod once_cell;
+ pub use self::once_cell::{OnceCell, SetError};
+
pub mod watch;
}
cfg_not_sync! {
- #[cfg(any(feature = "fs", feature = "signal", all(unix, feature = "process")))]
- pub(crate) mod batch_semaphore;
-
cfg_fs! {
+ pub(crate) mod batch_semaphore;
mod mutex;
pub(crate) use mutex::Mutex;
}
@@ -470,20 +477,16 @@ cfg_not_sync! {
#[cfg(any(feature = "rt", feature = "signal", all(unix, feature = "process")))]
pub(crate) mod notify;
+ #[cfg(any(feature = "rt", all(windows, feature = "process")))]
+ pub(crate) mod oneshot;
+
cfg_atomic_waker_impl! {
mod task;
pub(crate) use task::AtomicWaker;
}
- #[cfg(any(
- feature = "rt",
- feature = "process",
- feature = "signal"))]
- pub(crate) mod oneshot;
-
- cfg_signal_internal! {
- pub(crate) mod mpsc;
- }
+ #[cfg(any(feature = "signal", all(unix, feature = "process")))]
+ pub(crate) mod watch;
}
/// Unit tests
diff --git a/src/sync/mpsc/block.rs b/src/sync/mpsc/block.rs
index 6bef794..1c9ab14 100644
--- a/src/sync/mpsc/block.rs
+++ b/src/sync/mpsc/block.rs
@@ -186,7 +186,7 @@ impl<T> Block<T> {
///
/// * The block will no longer be accessed by any sender.
pub(crate) unsafe fn tx_release(&self, tail_position: usize) {
- // Track the observed tail_position. Any sender targetting a greater
+ // Track the observed tail_position. Any sender targeting a greater
// tail_position is guaranteed to not access this block.
self.observed_tail_position
.with_mut(|ptr| *ptr = tail_position);
@@ -350,7 +350,7 @@ impl<T> Block<T> {
}
}
-/// Returns `true` if the specificed slot has a value ready to be consumed.
+/// Returns `true` if the specified slot has a value ready to be consumed.
fn is_ready(bits: usize, slot: usize) -> bool {
let mask = 1 << slot;
mask == mask & bits
diff --git a/src/sync/mpsc/bounded.rs b/src/sync/mpsc/bounded.rs
index dfe4a74..1f670bf 100644
--- a/src/sync/mpsc/bounded.rs
+++ b/src/sync/mpsc/bounded.rs
@@ -1,8 +1,5 @@
use crate::sync::batch_semaphore::{self as semaphore, TryAcquireError};
use crate::sync::mpsc::chan;
-#[cfg(unix)]
-#[cfg(any(feature = "signal", feature = "process"))]
-use crate::sync::mpsc::error::TryRecvError;
use crate::sync::mpsc::error::{SendError, TrySendError};
cfg_time! {
@@ -16,6 +13,11 @@ use std::task::{Context, Poll};
/// Send values to the associated `Receiver`.
///
/// Instances are created by the [`channel`](channel) function.
+///
+/// To use the `Sender` in a poll function, you can use the [`PollSender`]
+/// utility.
+///
+/// [`PollSender`]: https://docs.rs/tokio-util/0.6/tokio_util/sync/struct.PollSender.html
pub struct Sender<T> {
chan: chan::Tx<T, Semaphore>,
}
@@ -219,23 +221,6 @@ impl<T> Receiver<T> {
crate::future::block_on(self.recv())
}
- /// Attempts to return a pending value on this receiver without blocking.
- ///
- /// This method will never block the caller in order to wait for data to
- /// become available. Instead, this will always return immediately with
- /// a possible option of pending data on the channel.
- ///
- /// This is useful for a flavor of "optimistic check" before deciding to
- /// block on a receiver.
- ///
- /// Compared with recv, this function has two failure cases instead of
- /// one (one for disconnection, one for an empty buffer).
- #[cfg(unix)]
- #[cfg(any(feature = "signal", feature = "process"))]
- pub(crate) fn try_recv(&mut self) -> Result<T, TryRecvError> {
- self.chan.try_recv()
- }
-
/// Closes the receiving half of a channel without dropping it.
///
/// This prevents any further messages from being sent on the channel while
@@ -698,6 +683,55 @@ impl<T> Sender<T> {
Ok(Permit { chan: &self.chan })
}
+
+ /// Returns `true` if senders belong to the same channel.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let (tx, rx) = tokio::sync::mpsc::channel::<()>(1);
+ /// let tx2 = tx.clone();
+ /// assert!(tx.same_channel(&tx2));
+ ///
+ /// let (tx3, rx3) = tokio::sync::mpsc::channel::<()>(1);
+ /// assert!(!tx3.same_channel(&tx2));
+ /// ```
+ pub fn same_channel(&self, other: &Self) -> bool {
+ self.chan.same_channel(&other.chan)
+ }
+
+ /// Returns the current capacity of the channel.
+ ///
+ /// The capacity goes down when sending a value by calling [`send`] or by reserving capacity
+ /// with [`reserve`]. The capacity goes up when values are received by the [`Receiver`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::mpsc;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let (tx, mut rx) = mpsc::channel::<()>(5);
+ ///
+ /// assert_eq!(tx.capacity(), 5);
+ ///
+ /// // Making a reservation drops the capacity by one.
+ /// let permit = tx.reserve().await.unwrap();
+ /// assert_eq!(tx.capacity(), 4);
+ ///
+ /// // Sending and receiving a value increases the caapcity by one.
+ /// permit.send(());
+ /// rx.recv().await.unwrap();
+ /// assert_eq!(tx.capacity(), 5);
+ /// }
+ /// ```
+ ///
+ /// [`send`]: Sender::send
+ /// [`reserve`]: Sender::reserve
+ pub fn capacity(&self) -> usize {
+ self.chan.semaphore().0.available_permits()
+ }
}
impl<T> Clone for Sender<T> {
diff --git a/src/sync/mpsc/chan.rs b/src/sync/mpsc/chan.rs
index f34eb0f..554d022 100644
--- a/src/sync/mpsc/chan.rs
+++ b/src/sync/mpsc/chan.rs
@@ -139,6 +139,11 @@ impl<T, S> Tx<T, S> {
pub(crate) fn wake_rx(&self) {
self.inner.rx_waker.wake();
}
+
+ /// Returns `true` if senders belong to the same channel.
+ pub(crate) fn same_channel(&self, other: &Self) -> bool {
+ Arc::ptr_eq(&self.inner, &other.inner)
+ }
}
impl<T, S: Semaphore> Tx<T, S> {
@@ -260,30 +265,6 @@ impl<T, S: Semaphore> Rx<T, S> {
}
}
-feature! {
- #![all(unix, any(feature = "signal", feature = "process"))]
-
- use crate::sync::mpsc::error::TryRecvError;
-
- impl<T, S: Semaphore> Rx<T, S> {
- /// Receives the next value without blocking
- pub(crate) fn try_recv(&mut self) -> Result<T, TryRecvError> {
- use super::block::Read::*;
- self.inner.rx_fields.with_mut(|rx_fields_ptr| {
- let rx_fields = unsafe { &mut *rx_fields_ptr };
- match rx_fields.list.pop(&self.inner.tx) {
- Some(Value(value)) => {
- self.inner.semaphore.add_permit();
- Ok(value)
- }
- Some(Closed) => Err(TryRecvError::Closed),
- None => Err(TryRecvError::Empty),
- }
- })
- }
- }
-}
-
impl<T, S: Semaphore> Drop for Rx<T, S> {
fn drop(&mut self) {
use super::block::Read::Value;
diff --git a/src/sync/mpsc/error.rs b/src/sync/mpsc/error.rs
index d23255b..a2d2824 100644
--- a/src/sync/mpsc/error.rs
+++ b/src/sync/mpsc/error.rs
@@ -65,39 +65,6 @@ impl fmt::Display for RecvError {
impl Error for RecvError {}
-// ===== TryRecvError =====
-
-feature! {
- #![all(unix, any(feature = "signal", feature = "process"))]
-
- /// This enumeration is the list of the possible reasons that try_recv
- /// could not return data when called.
- #[derive(Debug, PartialEq)]
- pub(crate) enum TryRecvError {
- /// This channel is currently empty, but the Sender(s) have not yet
- /// disconnected, so data may yet become available.
- Empty,
- /// The channel's sending half has been closed, and there will
- /// never be any more data received on it.
- Closed,
- }
-
- impl fmt::Display for TryRecvError {
- fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(
- fmt,
- "{}",
- match self {
- TryRecvError::Empty => "channel empty",
- TryRecvError::Closed => "channel closed",
- }
- )
- }
- }
-
- impl Error for TryRecvError {}
-}
-
cfg_time! {
// ===== SendTimeoutError =====
diff --git a/src/sync/mpsc/unbounded.rs b/src/sync/mpsc/unbounded.rs
index 29a0a29..ffdb34c 100644
--- a/src/sync/mpsc/unbounded.rs
+++ b/src/sync/mpsc/unbounded.rs
@@ -291,4 +291,20 @@ impl<T> UnboundedSender<T> {
pub fn is_closed(&self) -> bool {
self.chan.is_closed()
}
+
+ /// Returns `true` if senders belong to the same channel.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let (tx, rx) = tokio::sync::mpsc::unbounded_channel::<()>();
+ /// let tx2 = tx.clone();
+ /// assert!(tx.same_channel(&tx2));
+ ///
+ /// let (tx3, rx3) = tokio::sync::mpsc::unbounded_channel::<()>();
+ /// assert!(!tx3.same_channel(&tx2));
+ /// ```
+ pub fn same_channel(&self, other: &Self) -> bool {
+ self.chan.same_channel(&other.chan)
+ }
}
diff --git a/src/sync/mutex.rs b/src/sync/mutex.rs
index 0283336..0a118e7 100644
--- a/src/sync/mutex.rs
+++ b/src/sync/mutex.rs
@@ -10,23 +10,28 @@ use std::sync::Arc;
/// An asynchronous `Mutex`-like type.
///
-/// This type acts similarly to an asynchronous [`std::sync::Mutex`], with one
-/// major difference: [`lock`] does not block and the lock guard can be held
-/// across await points.
+/// This type acts similarly to [`std::sync::Mutex`], with two major
+/// differences: [`lock`] is an async method so does not block, and the lock
+/// guard is designed to be held across `.await` points.
///
/// # Which kind of mutex should you use?
///
/// Contrary to popular belief, it is ok and often preferred to use the ordinary
-/// [`Mutex`][std] from the standard library in asynchronous code. This section
-/// will help you decide on which kind of mutex you should use.
+/// [`Mutex`][std] from the standard library in asynchronous code.
///
-/// The primary use case of the async mutex is to provide shared mutable access
-/// to IO resources such as a database connection. If the data stored behind the
-/// mutex is just data, it is often better to use a blocking mutex such as the
-/// one in the standard library or [`parking_lot`]. This is because the feature
-/// that the async mutex offers over the blocking mutex is that it is possible
-/// to keep the mutex locked across an `.await` point, which is rarely necessary
-/// for data.
+/// The feature that the async mutex offers over the blocking mutex is the
+/// ability to keep it locked across an `.await` point. This makes the async
+/// mutex more expensive than the blocking mutex, so the blocking mutex should
+/// be preferred in the cases where it can be used. The primary use case for the
+/// async mutex is to provide shared mutable access to IO resources such as a
+/// database connection. If the value behind the mutex is just data, it's
+/// usually appropriate to use a blocking mutex such as the one in the standard
+/// library or [`parking_lot`].
+///
+/// Note that, although the compiler will not prevent the std `Mutex` from holding
+/// its guard across `.await` points in situations where the task is not movable
+/// between threads, this virtually never leads to correct concurrent code in
+/// practice as it can easily lead to deadlocks.
///
/// A common pattern is to wrap the `Arc<Mutex<...>>` in a struct that provides
/// non-async methods for performing operations on the data within, and only
@@ -71,13 +76,13 @@ use std::sync::Arc;
/// async fn main() {
/// let count = Arc::new(Mutex::new(0));
///
-/// for _ in 0..5 {
+/// for i in 0..5 {
/// let my_count = Arc::clone(&count);
/// tokio::spawn(async move {
-/// for _ in 0..10 {
+/// for j in 0..10 {
/// let mut lock = my_count.lock().await;
/// *lock += 1;
-/// println!("{}", lock);
+/// println!("{} {} {}", i, j, lock);
/// }
/// });
/// }
@@ -100,9 +105,10 @@ use std::sync::Arc;
/// Tokio's Mutex works in a simple FIFO (first in, first out) style where all
/// calls to [`lock`] complete in the order they were performed. In that way the
/// Mutex is "fair" and predictable in how it distributes the locks to inner
-/// data. This is why the output of the program above is an in-order count to
-/// 50. Locks are released and reacquired after every iteration, so basically,
+/// data. Locks are released and reacquired after every iteration, so basically,
/// each thread goes to the back of the line after it increments the value once.
+/// Note that there's some unpredictability to the timing between when the
+/// threads are started, but once they are going they alternate predictably.
/// Finally, since there is only a single valid lock at any given time, there is
/// no possibility of a race condition when mutating the inner value.
///
@@ -122,7 +128,8 @@ pub struct Mutex<T: ?Sized> {
c: UnsafeCell<T>,
}
-/// A handle to a held `Mutex`.
+/// A handle to a held `Mutex`. The guard can be held across any `.await` point
+/// as it is [`Send`].
///
/// As long as you have this guard, you have exclusive access to the underlying
/// `T`. The guard internally borrows the `Mutex`, so the mutex will not be
diff --git a/src/sync/notify.rs b/src/sync/notify.rs
index f39f92f..2d30da9 100644
--- a/src/sync/notify.rs
+++ b/src/sync/notify.rs
@@ -312,6 +312,8 @@ impl Notify {
/// notify.notify_one();
/// }
/// ```
+ // Alias for old name in 0.x
+ #[cfg_attr(docsrs, doc(alias = "notify"))]
pub fn notify_one(&self) {
// Load the current state
let mut curr = self.state.load(SeqCst);
@@ -349,8 +351,8 @@ impl Notify {
/// Notifies all waiting tasks
///
/// If a task is currently waiting, that task is notified. Unlike with
- /// `notify()`, no permit is stored to be used by the next call to
- /// [`notified().await`]. The purpose of this method is to notify all
+ /// `notify_one()`, no permit is stored to be used by the next call to
+ /// `notified().await`. The purpose of this method is to notify all
/// already registered waiters. Registering for notification is done by
/// acquiring an instance of the `Notified` future via calling `notified()`.
///
@@ -679,35 +681,17 @@ impl Drop for Notified<'_> {
let mut waiters = notify.waiters.lock();
let mut notify_state = notify.state.load(SeqCst);
- // `Notify.state` may be in any of the three states (Empty, Waiting,
- // Notified). It doesn't actually matter what the atomic is set to
- // at this point. We hold the lock and will ensure the atomic is in
- // the correct state once the lock is dropped.
- //
- // Because the atomic state is not checked, at first glance, it may
- // seem like this routine does not handle the case where the
- // receiver is notified but has not yet observed the notification.
- // If this happens, no matter how many notifications happen between
- // this receiver being notified and the receive future dropping, all
- // we need to do is ensure that one notification is returned back to
- // the `Notify`. This is done by calling `notify_locked` if `self`
- // has the `notified` flag set.
-
- // remove the entry from the list
+ // remove the entry from the list (if not already removed)
//
// safety: the waiter is only added to `waiters` by virtue of it
// being the only `LinkedList` available to the type.
unsafe { waiters.remove(NonNull::new_unchecked(waiter.get())) };
if waiters.is_empty() {
- notify_state = set_state(notify_state, EMPTY);
- // If the state *should* be `NOTIFIED`, the call to
- // `notify_locked` below will end up doing the
- // `store(NOTIFIED)`. If a concurrent receiver races and
- // observes the incorrect `EMPTY` state, it will then obtain the
- // lock and block until `notify.state` is in the correct final
- // state.
- notify.state.store(notify_state, SeqCst);
+ if let WAITING = get_state(notify_state) {
+ notify_state = set_state(notify_state, EMPTY);
+ notify.state.store(notify_state, SeqCst);
+ }
}
// See if the node was notified but not received. In this case, if
diff --git a/src/sync/once_cell.rs b/src/sync/once_cell.rs
new file mode 100644
index 0000000..fa9b1f1
--- /dev/null
+++ b/src/sync/once_cell.rs
@@ -0,0 +1,400 @@
+use super::Semaphore;
+use crate::loom::cell::UnsafeCell;
+use std::error::Error;
+use std::fmt;
+use std::future::Future;
+use std::mem::MaybeUninit;
+use std::ops::Drop;
+use std::ptr;
+use std::sync::atomic::{AtomicBool, Ordering};
+
+/// A thread-safe cell which can be written to only once.
+///
+/// Provides the functionality to either set the value, in case `OnceCell`
+/// is uninitialized, or get the already initialized value by using an async
+/// function via [`OnceCell::get_or_init`].
+///
+/// [`OnceCell::get_or_init`]: crate::sync::OnceCell::get_or_init
+///
+/// # Examples
+/// ```
+/// use tokio::sync::OnceCell;
+///
+/// async fn some_computation() -> u32 {
+/// 1 + 1
+/// }
+///
+/// static ONCE: OnceCell<u32> = OnceCell::const_new();
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let result1 = ONCE.get_or_init(some_computation).await;
+/// assert_eq!(*result1, 2);
+/// }
+/// ```
+pub struct OnceCell<T> {
+ value_set: AtomicBool,
+ value: UnsafeCell<MaybeUninit<T>>,
+ semaphore: Semaphore,
+}
+
+impl<T> Default for OnceCell<T> {
+ fn default() -> OnceCell<T> {
+ OnceCell::new()
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for OnceCell<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_struct("OnceCell")
+ .field("value", &self.get())
+ .finish()
+ }
+}
+
+impl<T: Clone> Clone for OnceCell<T> {
+ fn clone(&self) -> OnceCell<T> {
+ OnceCell::new_with(self.get().cloned())
+ }
+}
+
+impl<T: PartialEq> PartialEq for OnceCell<T> {
+ fn eq(&self, other: &OnceCell<T>) -> bool {
+ self.get() == other.get()
+ }
+}
+
+impl<T: Eq> Eq for OnceCell<T> {}
+
+impl<T> Drop for OnceCell<T> {
+ fn drop(&mut self) {
+ if self.initialized() {
+ unsafe {
+ self.value
+ .with_mut(|ptr| ptr::drop_in_place((&mut *ptr).as_mut_ptr()));
+ };
+ }
+ }
+}
+
+impl<T> OnceCell<T> {
+ /// Creates a new uninitialized OnceCell instance.
+ pub fn new() -> Self {
+ OnceCell {
+ value_set: AtomicBool::new(false),
+ value: UnsafeCell::new(MaybeUninit::uninit()),
+ semaphore: Semaphore::new(1),
+ }
+ }
+
+ /// Creates a new initialized OnceCell instance if `value` is `Some`, otherwise
+ /// has the same functionality as [`OnceCell::new`].
+ ///
+ /// [`OnceCell::new`]: crate::sync::OnceCell::new
+ pub fn new_with(value: Option<T>) -> Self {
+ if let Some(v) = value {
+ let semaphore = Semaphore::new(0);
+ semaphore.close();
+ OnceCell {
+ value_set: AtomicBool::new(true),
+ value: UnsafeCell::new(MaybeUninit::new(v)),
+ semaphore,
+ }
+ } else {
+ OnceCell::new()
+ }
+ }
+
+ /// Creates a new uninitialized OnceCell instance.
+ #[cfg(all(feature = "parking_lot", not(all(loom, test)),))]
+ #[cfg_attr(docsrs, doc(cfg(feature = "parking_lot")))]
+ pub const fn const_new() -> Self {
+ OnceCell {
+ value_set: AtomicBool::new(false),
+ value: UnsafeCell::new(MaybeUninit::uninit()),
+ semaphore: Semaphore::const_new(1),
+ }
+ }
+
+ /// Whether the value of the OnceCell is set or not.
+ pub fn initialized(&self) -> bool {
+ self.value_set.load(Ordering::Acquire)
+ }
+
+ // SAFETY: safe to call only once self.initialized() is true
+ unsafe fn get_unchecked(&self) -> &T {
+ &*self.value.with(|ptr| (*ptr).as_ptr())
+ }
+
+ // SAFETY: safe to call only once self.initialized() is true. Safe because
+ // because of the mutable reference.
+ unsafe fn get_unchecked_mut(&mut self) -> &mut T {
+ &mut *self.value.with_mut(|ptr| (*ptr).as_mut_ptr())
+ }
+
+ // SAFETY: safe to call only once a permit on the semaphore has been
+ // acquired
+ unsafe fn set_value(&self, value: T) {
+ self.value.with_mut(|ptr| (*ptr).as_mut_ptr().write(value));
+ self.value_set.store(true, Ordering::Release);
+ self.semaphore.close();
+ }
+
+ /// Tries to get a reference to the value of the OnceCell.
+ ///
+ /// Returns None if the value of the OnceCell hasn't previously been initialized.
+ pub fn get(&self) -> Option<&T> {
+ if self.initialized() {
+ Some(unsafe { self.get_unchecked() })
+ } else {
+ None
+ }
+ }
+
+ /// Tries to return a mutable reference to the value of the cell.
+ ///
+ /// Returns None if the cell hasn't previously been initialized.
+ pub fn get_mut(&mut self) -> Option<&mut T> {
+ if self.initialized() {
+ Some(unsafe { self.get_unchecked_mut() })
+ } else {
+ None
+ }
+ }
+
+ /// Sets the value of the OnceCell to the argument value.
+ ///
+ /// If the value of the OnceCell was already set prior to this call
+ /// then [`SetError::AlreadyInitializedError`] is returned. If another thread
+ /// is initializing the cell while this method is called,
+ /// [`SetError::InitializingError`] is returned. In order to wait
+ /// for an ongoing initialization to finish, call
+ /// [`OnceCell::get_or_init`] instead.
+ ///
+ /// [`SetError::AlreadyInitializedError`]: crate::sync::SetError::AlreadyInitializedError
+ /// [`SetError::InitializingError`]: crate::sync::SetError::InitializingError
+ /// ['OnceCell::get_or_init`]: crate::sync::OnceCell::get_or_init
+ pub fn set(&self, value: T) -> Result<(), SetError<T>> {
+ if !self.initialized() {
+ // Another thread might be initializing the cell, in which case `try_acquire` will
+ // return an error
+ match self.semaphore.try_acquire() {
+ Ok(_permit) => {
+ if !self.initialized() {
+ // SAFETY: There is only one permit on the semaphore, hence only one
+ // mutable reference is created
+ unsafe { self.set_value(value) };
+
+ return Ok(());
+ } else {
+ unreachable!(
+ "acquired the permit after OnceCell value was already initialized."
+ );
+ }
+ }
+ _ => {
+ // Couldn't acquire the permit, look if initializing process is already completed
+ if !self.initialized() {
+ return Err(SetError::InitializingError(value));
+ }
+ }
+ }
+ }
+
+ Err(SetError::AlreadyInitializedError(value))
+ }
+
+ /// Tries to initialize the value of the OnceCell using the async function `f`.
+ /// If the value of the OnceCell was already initialized prior to this call,
+ /// a reference to that initialized value is returned. If some other thread
+ /// initiated the initialization prior to this call and the initialization
+ /// hasn't completed, this call waits until the initialization is finished.
+ ///
+ /// This will deadlock if `f` tries to initialize the cell itself.
+ pub async fn get_or_init<F, Fut>(&self, f: F) -> &T
+ where
+ F: FnOnce() -> Fut,
+ Fut: Future<Output = T>,
+ {
+ if self.initialized() {
+ // SAFETY: once the value is initialized, no mutable references are given out, so
+ // we can give out arbitrarily many immutable references
+ unsafe { self.get_unchecked() }
+ } else {
+ // After acquire().await we have either acquired a permit while self.value
+ // is still uninitialized, or the current thread is awoken after another thread
+ // has intialized the value and closed the semaphore, in which case self.initialized
+ // is true and we don't set the value here
+ match self.semaphore.acquire().await {
+ Ok(_permit) => {
+ if !self.initialized() {
+ // If `f()` panics or `select!` is called, this `get_or_init` call
+ // is aborted and the semaphore permit is dropped.
+ let value = f().await;
+
+ // SAFETY: There is only one permit on the semaphore, hence only one
+ // mutable reference is created
+ unsafe { self.set_value(value) };
+
+ // SAFETY: once the value is initialized, no mutable references are given out, so
+ // we can give out arbitrarily many immutable references
+ unsafe { self.get_unchecked() }
+ } else {
+ unreachable!("acquired semaphore after value was already initialized.");
+ }
+ }
+ Err(_) => {
+ if self.initialized() {
+ // SAFETY: once the value is initialized, no mutable references are given out, so
+ // we can give out arbitrarily many immutable references
+ unsafe { self.get_unchecked() }
+ } else {
+ unreachable!(
+ "Semaphore closed, but the OnceCell has not been initialized."
+ );
+ }
+ }
+ }
+ }
+ }
+
+ /// Tries to initialize the value of the OnceCell using the async function `f`.
+ /// If the value of the OnceCell was already initialized prior to this call,
+ /// a reference to that initialized value is returned. If some other thread
+ /// initiated the initialization prior to this call and the initialization
+ /// hasn't completed, this call waits until the initialization is finished.
+ /// If the function argument `f` returns an error, `get_or_try_init`
+ /// returns that error, otherwise the result of `f` will be stored in the cell.
+ ///
+ /// This will deadlock if `f` tries to initialize the cell itself.
+ pub async fn get_or_try_init<E, F, Fut>(&self, f: F) -> Result<&T, E>
+ where
+ F: FnOnce() -> Fut,
+ Fut: Future<Output = Result<T, E>>,
+ {
+ if self.initialized() {
+ // SAFETY: once the value is initialized, no mutable references are given out, so
+ // we can give out arbitrarily many immutable references
+ unsafe { Ok(self.get_unchecked()) }
+ } else {
+ // After acquire().await we have either acquired a permit while self.value
+ // is still uninitialized, or the current thread is awoken after another thread
+ // has intialized the value and closed the semaphore, in which case self.initialized
+ // is true and we don't set the value here
+ match self.semaphore.acquire().await {
+ Ok(_permit) => {
+ if !self.initialized() {
+ // If `f()` panics or `select!` is called, this `get_or_try_init` call
+ // is aborted and the semaphore permit is dropped.
+ let value = f().await;
+
+ match value {
+ Ok(value) => {
+ // SAFETY: There is only one permit on the semaphore, hence only one
+ // mutable reference is created
+ unsafe { self.set_value(value) };
+
+ // SAFETY: once the value is initialized, no mutable references are given out, so
+ // we can give out arbitrarily many immutable references
+ unsafe { Ok(self.get_unchecked()) }
+ }
+ Err(e) => Err(e),
+ }
+ } else {
+ unreachable!("acquired semaphore after value was already initialized.");
+ }
+ }
+ Err(_) => {
+ if self.initialized() {
+ // SAFETY: once the value is initialized, no mutable references are given out, so
+ // we can give out arbitrarily many immutable references
+ unsafe { Ok(self.get_unchecked()) }
+ } else {
+ unreachable!(
+ "Semaphore closed, but the OnceCell has not been initialized."
+ );
+ }
+ }
+ }
+ }
+ }
+
+ /// Moves the value out of the cell, destroying the cell in the process.
+ ///
+ /// Returns `None` if the cell is uninitialized.
+ pub fn into_inner(mut self) -> Option<T> {
+ if self.initialized() {
+ // Set to uninitialized for the destructor of `OnceCell` to work properly
+ *self.value_set.get_mut() = false;
+ Some(unsafe { self.value.with(|ptr| ptr::read(ptr).assume_init()) })
+ } else {
+ None
+ }
+ }
+
+ /// Takes ownership of the current value, leaving the cell uninitialized.
+ ///
+ /// Returns `None` if the cell is uninitialized.
+ pub fn take(&mut self) -> Option<T> {
+ std::mem::take(self).into_inner()
+ }
+}
+
+// Since `get` gives us access to immutable references of the
+// OnceCell, OnceCell can only be Sync if T is Sync, otherwise
+// OnceCell would allow sharing references of !Sync values across
+// threads. We need T to be Send in order for OnceCell to by Sync
+// because we can use `set` on `&OnceCell<T>` to send
+// values (of type T) across threads.
+unsafe impl<T: Sync + Send> Sync for OnceCell<T> {}
+
+// Access to OnceCell's value is guarded by the semaphore permit
+// and atomic operations on `value_set`, so as long as T itself is Send
+// it's safe to send it to another thread
+unsafe impl<T: Send> Send for OnceCell<T> {}
+
+/// Errors that can be returned from [`OnceCell::set`]
+///
+/// [`OnceCell::set`]: crate::sync::OnceCell::set
+#[derive(Debug, PartialEq)]
+pub enum SetError<T> {
+ /// Error resulting from [`OnceCell::set`] calls if the cell was previously initialized.
+ ///
+ /// [`OnceCell::set`]: crate::sync::OnceCell::set
+ AlreadyInitializedError(T),
+
+ /// Error resulting from [`OnceCell::set`] calls when the cell is currently being
+ /// inintialized during the calls to that method.
+ ///
+ /// [`OnceCell::set`]: crate::sync::OnceCell::set
+ InitializingError(T),
+}
+
+impl<T> fmt::Display for SetError<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ SetError::AlreadyInitializedError(_) => write!(f, "AlreadyInitializedError"),
+ SetError::InitializingError(_) => write!(f, "InitializingError"),
+ }
+ }
+}
+
+impl<T: fmt::Debug> Error for SetError<T> {}
+
+impl<T> SetError<T> {
+ /// Whether `SetError` is `SetError::AlreadyInitializedError`.
+ pub fn is_already_init_err(&self) -> bool {
+ match self {
+ SetError::AlreadyInitializedError(_) => true,
+ SetError::InitializingError(_) => false,
+ }
+ }
+
+ /// Whether `SetError` is `SetError::InitializingError`
+ pub fn is_initializing_err(&self) -> bool {
+ match self {
+ SetError::AlreadyInitializedError(_) => false,
+ SetError::InitializingError(_) => true,
+ }
+ }
+}
diff --git a/src/sync/oneshot.rs b/src/sync/oneshot.rs
index 20d39dc..0df6037 100644
--- a/src/sync/oneshot.rs
+++ b/src/sync/oneshot.rs
@@ -1,6 +1,56 @@
#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))]
-//! A channel for sending a single message between asynchronous tasks.
+//! A one-shot channel is used for sending a single message between
+//! asynchronous tasks. The [`channel`] function is used to create a
+//! [`Sender`] and [`Receiver`] handle pair that form the channel.
+//!
+//! The `Sender` handle is used by the producer to send the value.
+//! The `Receiver` handle is used by the consumer to receive the value.
+//!
+//! Each handle can be used on separate tasks.
+//!
+//! # Examples
+//!
+//! ```
+//! use tokio::sync::oneshot;
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! let (tx, rx) = oneshot::channel();
+//!
+//! tokio::spawn(async move {
+//! if let Err(_) = tx.send(3) {
+//! println!("the receiver dropped");
+//! }
+//! });
+//!
+//! match rx.await {
+//! Ok(v) => println!("got = {:?}", v),
+//! Err(_) => println!("the sender dropped"),
+//! }
+//! }
+//! ```
+//!
+//! If the sender is dropped without sending, the receiver will fail with
+//! [`error::RecvError`]:
+//!
+//! ```
+//! use tokio::sync::oneshot;
+//!
+//! #[tokio::main]
+//! async fn main() {
+//! let (tx, rx) = oneshot::channel::<u32>();
+//!
+//! tokio::spawn(async move {
+//! drop(tx);
+//! });
+//!
+//! match rx.await {
+//! Ok(_) => panic!("This doesn't happen"),
+//! Err(_) => println!("the sender dropped"),
+//! }
+//! }
+//! ```
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::atomic::AtomicUsize;
@@ -14,17 +64,62 @@ use std::sync::atomic::Ordering::{self, AcqRel, Acquire};
use std::task::Poll::{Pending, Ready};
use std::task::{Context, Poll, Waker};
-/// Sends a value to the associated `Receiver`.
+/// Sends a value to the associated [`Receiver`].
///
-/// Instances are created by the [`channel`](fn@channel) function.
+/// A pair of both a [`Sender`] and a [`Receiver`] are created by the
+/// [`channel`](fn@channel) function.
#[derive(Debug)]
pub struct Sender<T> {
inner: Option<Arc<Inner<T>>>,
}
-/// Receive a value from the associated `Sender`.
+/// Receive a value from the associated [`Sender`].
///
-/// Instances are created by the [`channel`](fn@channel) function.
+/// A pair of both a [`Sender`] and a [`Receiver`] are created by the
+/// [`channel`](fn@channel) function.
+///
+/// # Examples
+///
+/// ```
+/// use tokio::sync::oneshot;
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let (tx, rx) = oneshot::channel();
+///
+/// tokio::spawn(async move {
+/// if let Err(_) = tx.send(3) {
+/// println!("the receiver dropped");
+/// }
+/// });
+///
+/// match rx.await {
+/// Ok(v) => println!("got = {:?}", v),
+/// Err(_) => println!("the sender dropped"),
+/// }
+/// }
+/// ```
+///
+/// If the sender is dropped without sending, the receiver will fail with
+/// [`error::RecvError`]:
+///
+/// ```
+/// use tokio::sync::oneshot;
+///
+/// #[tokio::main]
+/// async fn main() {
+/// let (tx, rx) = oneshot::channel::<u32>();
+///
+/// tokio::spawn(async move {
+/// drop(tx);
+/// });
+///
+/// match rx.await {
+/// Ok(_) => panic!("This doesn't happen"),
+/// Err(_) => println!("the sender dropped"),
+/// }
+/// }
+/// ```
#[derive(Debug)]
pub struct Receiver<T> {
inner: Option<Arc<Inner<T>>>,
@@ -443,6 +538,9 @@ impl<T> Receiver<T> {
/// This function is useful to perform a graceful shutdown and ensure that a
/// value will not be sent into the channel and never received.
///
+ /// `close` is no-op if a message is already received or the channel
+ /// is already closed.
+ ///
/// [`Sender`]: Sender
/// [`try_recv`]: Receiver::try_recv
///
@@ -490,8 +588,9 @@ impl<T> Receiver<T> {
/// }
/// ```
pub fn close(&mut self) {
- let inner = self.inner.as_ref().unwrap();
- inner.close();
+ if let Some(inner) = self.inner.as_ref() {
+ inner.close();
+ }
}
/// Attempts to receive a value.
@@ -573,7 +672,7 @@ impl<T> Receiver<T> {
return Err(TryRecvError::Empty);
}
} else {
- panic!("called after complete");
+ Err(TryRecvError::Closed)
};
self.inner = None;
diff --git a/src/sync/rwlock.rs b/src/sync/rwlock.rs
index b0777b2..6f0c011 100644
--- a/src/sync/rwlock.rs
+++ b/src/sync/rwlock.rs
@@ -1,16 +1,29 @@
use crate::sync::batch_semaphore::{Semaphore, TryAcquireError};
use crate::sync::mutex::TryLockError;
use std::cell::UnsafeCell;
-use std::fmt;
use std::marker;
-use std::mem;
-use std::ops;
+use std::marker::PhantomData;
+use std::mem::ManuallyDrop;
+use std::sync::Arc;
+
+pub(crate) mod owned_read_guard;
+pub(crate) mod owned_write_guard;
+pub(crate) mod owned_write_guard_mapped;
+pub(crate) mod read_guard;
+pub(crate) mod write_guard;
+pub(crate) mod write_guard_mapped;
+pub(crate) use owned_read_guard::OwnedRwLockReadGuard;
+pub(crate) use owned_write_guard::OwnedRwLockWriteGuard;
+pub(crate) use owned_write_guard_mapped::OwnedRwLockMappedWriteGuard;
+pub(crate) use read_guard::RwLockReadGuard;
+pub(crate) use write_guard::RwLockWriteGuard;
+pub(crate) use write_guard_mapped::RwLockMappedWriteGuard;
#[cfg(not(loom))]
-const MAX_READS: usize = 32;
+const MAX_READS: u32 = std::u32::MAX >> 3;
#[cfg(loom)]
-const MAX_READS: usize = 10;
+const MAX_READS: u32 = 10;
/// An asynchronous reader-writer lock.
///
@@ -73,6 +86,9 @@ const MAX_READS: usize = 10;
/// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies
#[derive(Debug)]
pub struct RwLock<T: ?Sized> {
+ // maximum number of concurrent readers
+ mr: u32,
+
//semaphore to coordinate read and write access to T
s: Semaphore,
@@ -80,240 +96,6 @@ pub struct RwLock<T: ?Sized> {
c: UnsafeCell<T>,
}
-/// RAII structure used to release the shared read access of a lock when
-/// dropped.
-///
-/// This structure is created by the [`read`] method on
-/// [`RwLock`].
-///
-/// [`read`]: method@RwLock::read
-/// [`RwLock`]: struct@RwLock
-pub struct RwLockReadGuard<'a, T: ?Sized> {
- s: &'a Semaphore,
- data: *const T,
- marker: marker::PhantomData<&'a T>,
-}
-
-impl<'a, T> RwLockReadGuard<'a, T> {
- /// Make a new `RwLockReadGuard` for a component of the locked data.
- ///
- /// This operation cannot fail as the `RwLockReadGuard` passed in already
- /// locked the data.
- ///
- /// This is an associated function that needs to be
- /// used as `RwLockReadGuard::map(...)`. A method would interfere with
- /// methods of the same name on the contents of the locked data.
- ///
- /// This is an asynchronous version of [`RwLockReadGuard::map`] from the
- /// [`parking_lot` crate].
- ///
- /// [`RwLockReadGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.map
- /// [`parking_lot` crate]: https://crates.io/crates/parking_lot
- ///
- /// # Examples
- ///
- /// ```
- /// use tokio::sync::{RwLock, RwLockReadGuard};
- ///
- /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
- /// struct Foo(u32);
- ///
- /// # #[tokio::main]
- /// # async fn main() {
- /// let lock = RwLock::new(Foo(1));
- ///
- /// let guard = lock.read().await;
- /// let guard = RwLockReadGuard::map(guard, |f| &f.0);
- ///
- /// assert_eq!(1, *guard);
- /// # }
- /// ```
- #[inline]
- pub fn map<F, U: ?Sized>(this: Self, f: F) -> RwLockReadGuard<'a, U>
- where
- F: FnOnce(&T) -> &U,
- {
- let data = f(&*this) as *const U;
- let s = this.s;
- // NB: Forget to avoid drop impl from being called.
- mem::forget(this);
- RwLockReadGuard {
- s,
- data,
- marker: marker::PhantomData,
- }
- }
-
- /// Attempts to make a new [`RwLockReadGuard`] for a component of the
- /// locked data. The original guard is returned if the closure returns
- /// `None`.
- ///
- /// This operation cannot fail as the `RwLockReadGuard` passed in already
- /// locked the data.
- ///
- /// This is an associated function that needs to be used as
- /// `RwLockReadGuard::try_map(..)`. A method would interfere with methods of the
- /// same name on the contents of the locked data.
- ///
- /// This is an asynchronous version of [`RwLockReadGuard::try_map`] from the
- /// [`parking_lot` crate].
- ///
- /// [`RwLockReadGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.try_map
- /// [`parking_lot` crate]: https://crates.io/crates/parking_lot
- ///
- /// # Examples
- ///
- /// ```
- /// use tokio::sync::{RwLock, RwLockReadGuard};
- ///
- /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
- /// struct Foo(u32);
- ///
- /// # #[tokio::main]
- /// # async fn main() {
- /// let lock = RwLock::new(Foo(1));
- ///
- /// let guard = lock.read().await;
- /// let guard = RwLockReadGuard::try_map(guard, |f| Some(&f.0)).expect("should not fail");
- ///
- /// assert_eq!(1, *guard);
- /// # }
- /// ```
- #[inline]
- pub fn try_map<F, U: ?Sized>(this: Self, f: F) -> Result<RwLockReadGuard<'a, U>, Self>
- where
- F: FnOnce(&T) -> Option<&U>,
- {
- let data = match f(&*this) {
- Some(data) => data as *const U,
- None => return Err(this),
- };
- let s = this.s;
- // NB: Forget to avoid drop impl from being called.
- mem::forget(this);
- Ok(RwLockReadGuard {
- s,
- data,
- marker: marker::PhantomData,
- })
- }
-}
-
-impl<'a, T: ?Sized> fmt::Debug for RwLockReadGuard<'a, T>
-where
- T: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Debug::fmt(&**self, f)
- }
-}
-
-impl<'a, T: ?Sized> fmt::Display for RwLockReadGuard<'a, T>
-where
- T: fmt::Display,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(&**self, f)
- }
-}
-
-impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> {
- fn drop(&mut self) {
- self.s.release(1);
- }
-}
-
-/// RAII structure used to release the exclusive write access of a lock when
-/// dropped.
-///
-/// This structure is created by the [`write`] and method
-/// on [`RwLock`].
-///
-/// [`write`]: method@RwLock::write
-/// [`RwLock`]: struct@RwLock
-pub struct RwLockWriteGuard<'a, T: ?Sized> {
- s: &'a Semaphore,
- data: *mut T,
- marker: marker::PhantomData<&'a mut T>,
-}
-
-impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
- /// Atomically downgrades a write lock into a read lock without allowing
- /// any writers to take exclusive access of the lock in the meantime.
- ///
- /// **Note:** This won't *necessarily* allow any additional readers to acquire
- /// locks, since [`RwLock`] is fair and it is possible that a writer is next
- /// in line.
- ///
- /// Returns an RAII guard which will drop this read access of the `RwLock`
- /// when dropped.
- ///
- /// # Examples
- ///
- /// ```
- /// # use tokio::sync::RwLock;
- /// # use std::sync::Arc;
- /// #
- /// # #[tokio::main]
- /// # async fn main() {
- /// let lock = Arc::new(RwLock::new(1));
- ///
- /// let n = lock.write().await;
- ///
- /// let cloned_lock = lock.clone();
- /// let handle = tokio::spawn(async move {
- /// *cloned_lock.write().await = 2;
- /// });
- ///
- /// let n = n.downgrade();
- /// assert_eq!(*n, 1, "downgrade is atomic");
- ///
- /// drop(n);
- /// handle.await.unwrap();
- /// assert_eq!(*lock.read().await, 2, "second writer obtained write lock");
- /// # }
- /// ```
- ///
- /// [`RwLock`]: struct@RwLock
- pub fn downgrade(self) -> RwLockReadGuard<'a, T> {
- let RwLockWriteGuard { s, data, .. } = self;
-
- // Release all but one of the permits held by the write guard
- s.release(MAX_READS - 1);
- // NB: Forget to avoid drop impl from being called.
- mem::forget(self);
- RwLockReadGuard {
- s,
- data,
- marker: marker::PhantomData,
- }
- }
-}
-
-impl<'a, T: ?Sized> fmt::Debug for RwLockWriteGuard<'a, T>
-where
- T: fmt::Debug,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Debug::fmt(&**self, f)
- }
-}
-
-impl<'a, T: ?Sized> fmt::Display for RwLockWriteGuard<'a, T>
-where
- T: fmt::Display,
-{
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- fmt::Display::fmt(&**self, f)
- }
-}
-
-impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> {
- fn drop(&mut self) {
- self.s.release(MAX_READS);
- }
-}
-
#[test]
#[cfg(not(loom))]
fn bounds() {
@@ -331,13 +113,31 @@ fn bounds() {
check_sync::<RwLockReadGuard<'_, u32>>();
check_unpin::<RwLockReadGuard<'_, u32>>();
+ check_send::<OwnedRwLockReadGuard<u32, i32>>();
+ check_sync::<OwnedRwLockReadGuard<u32, i32>>();
+ check_unpin::<OwnedRwLockReadGuard<u32, i32>>();
+
check_send::<RwLockWriteGuard<'_, u32>>();
check_sync::<RwLockWriteGuard<'_, u32>>();
check_unpin::<RwLockWriteGuard<'_, u32>>();
- let rwlock = RwLock::new(0);
+ check_send::<RwLockMappedWriteGuard<'_, u32>>();
+ check_sync::<RwLockMappedWriteGuard<'_, u32>>();
+ check_unpin::<RwLockMappedWriteGuard<'_, u32>>();
+
+ check_send::<OwnedRwLockWriteGuard<u32>>();
+ check_sync::<OwnedRwLockWriteGuard<u32>>();
+ check_unpin::<OwnedRwLockWriteGuard<u32>>();
+
+ check_send::<OwnedRwLockMappedWriteGuard<u32, i32>>();
+ check_sync::<OwnedRwLockMappedWriteGuard<u32, i32>>();
+ check_unpin::<OwnedRwLockMappedWriteGuard<u32, i32>>();
+
+ let rwlock = Arc::new(RwLock::new(0));
check_send_sync_val(rwlock.read());
+ check_send_sync_val(Arc::clone(&rwlock).read_owned());
check_send_sync_val(rwlock.write());
+ check_send_sync_val(Arc::clone(&rwlock).write_owned());
}
// As long as T: Send + Sync, it's fine to send and share RwLock<T> between threads.
@@ -350,12 +150,42 @@ unsafe impl<T> Sync for RwLock<T> where T: ?Sized + Send + Sync {}
// `T` is `Send`.
unsafe impl<T> Send for RwLockReadGuard<'_, T> where T: ?Sized + Sync {}
unsafe impl<T> Sync for RwLockReadGuard<'_, T> where T: ?Sized + Send + Sync {}
+// T is required to be `Send` because an OwnedRwLockReadGuard can be used to drop the value held in
+// the RwLock, unlike RwLockReadGuard.
+unsafe impl<T, U> Send for OwnedRwLockReadGuard<T, U>
+where
+ T: ?Sized + Send + Sync,
+ U: ?Sized + Sync,
+{
+}
+unsafe impl<T, U> Sync for OwnedRwLockReadGuard<T, U>
+where
+ T: ?Sized + Send + Sync,
+ U: ?Sized + Send + Sync,
+{
+}
unsafe impl<T> Sync for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
+unsafe impl<T> Sync for OwnedRwLockWriteGuard<T> where T: ?Sized + Send + Sync {}
+unsafe impl<T> Sync for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
+unsafe impl<T, U> Sync for OwnedRwLockMappedWriteGuard<T, U>
+where
+ T: ?Sized + Send + Sync,
+ U: ?Sized + Send + Sync,
+{
+}
// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over
// `T` is `Send` - but since this is also provides mutable access, we need to
// make sure that `T` is `Send` since its value can be sent across thread
// boundaries.
unsafe impl<T> Send for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
+unsafe impl<T> Send for OwnedRwLockWriteGuard<T> where T: ?Sized + Send + Sync {}
+unsafe impl<T> Send for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
+unsafe impl<T, U> Send for OwnedRwLockMappedWriteGuard<T, U>
+where
+ T: ?Sized + Send + Sync,
+ U: ?Sized + Send + Sync,
+{
+}
impl<T: ?Sized> RwLock<T> {
/// Creates a new instance of an `RwLock<T>` which is unlocked.
@@ -372,8 +202,39 @@ impl<T: ?Sized> RwLock<T> {
T: Sized,
{
RwLock {
+ mr: MAX_READS,
+ c: UnsafeCell::new(value),
+ s: Semaphore::new(MAX_READS as usize),
+ }
+ }
+
+ /// Creates a new instance of an `RwLock<T>` which is unlocked
+ /// and allows a maximum of `max_reads` concurrent readers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::RwLock;
+ ///
+ /// let lock = RwLock::with_max_readers(5, 1024);
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// Panics if `max_reads` is more than `u32::MAX >> 3`.
+ pub fn with_max_readers(value: T, max_reads: u32) -> RwLock<T>
+ where
+ T: Sized,
+ {
+ assert!(
+ max_reads <= MAX_READS,
+ "a RwLock may not be created with more than {} readers",
+ MAX_READS
+ );
+ RwLock {
+ mr: max_reads,
c: UnsafeCell::new(value),
- s: Semaphore::new(MAX_READS),
+ s: Semaphore::new(max_reads as usize),
}
}
@@ -393,8 +254,33 @@ impl<T: ?Sized> RwLock<T> {
T: Sized,
{
RwLock {
+ mr: MAX_READS,
+ c: UnsafeCell::new(value),
+ s: Semaphore::const_new(MAX_READS as usize),
+ }
+ }
+
+ /// Creates a new instance of an `RwLock<T>` which is unlocked
+ /// and allows a maximum of `max_reads` concurrent readers.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::RwLock;
+ ///
+ /// static LOCK: RwLock<i32> = RwLock::const_with_max_readers(5, 1024);
+ /// ```
+ #[cfg(all(feature = "parking_lot", not(all(loom, test))))]
+ #[cfg_attr(docsrs, doc(cfg(feature = "parking_lot")))]
+ pub const fn const_with_max_readers(value: T, mut max_reads: u32) -> RwLock<T>
+ where
+ T: Sized,
+ {
+ max_reads &= MAX_READS;
+ RwLock {
+ mr: max_reads,
c: UnsafeCell::new(value),
- s: Semaphore::const_new(MAX_READS),
+ s: Semaphore::const_new(max_reads as usize),
}
}
@@ -437,7 +323,6 @@ impl<T: ?Sized> RwLock<T> {
/// drop(n);
///}
/// ```
- ///
pub async fn read(&self) -> RwLockReadGuard<'_, T> {
self.s.acquire(1).await.unwrap_or_else(|_| {
// The semaphore was closed. but, we never explicitly close it, and we have a
@@ -451,6 +336,64 @@ impl<T: ?Sized> RwLock<T> {
}
}
+ /// Locks this `RwLock` with shared read access, causing the current task
+ /// to yield until the lock has been acquired.
+ ///
+ /// The calling task will yield until there are no writers which hold the
+ /// lock. There may be other readers inside the lock when the task resumes.
+ ///
+ /// This method is identical to [`RwLock::read`], except that the returned
+ /// guard references the `RwLock` with an [`Arc`] rather than by borrowing
+ /// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this
+ /// method, and the guard will live for the `'static` lifetime, as it keeps
+ /// the `RwLock` alive by holding an `Arc`.
+ ///
+ /// Note that under the priority policy of [`RwLock`], read locks are not
+ /// granted until prior write locks, to prevent starvation. Therefore
+ /// deadlock may occur if a read lock is held by the current task, a write
+ /// lock attempt is made, and then a subsequent read lock attempt is made
+ /// by the current task.
+ ///
+ /// Returns an RAII guard which will drop this read access of the `RwLock`
+ /// when dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use tokio::sync::RwLock;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let lock = Arc::new(RwLock::new(1));
+ /// let c_lock = lock.clone();
+ ///
+ /// let n = lock.read_owned().await;
+ /// assert_eq!(*n, 1);
+ ///
+ /// tokio::spawn(async move {
+ /// // While main has an active read lock, we acquire one too.
+ /// let r = c_lock.read_owned().await;
+ /// assert_eq!(*r, 1);
+ /// }).await.expect("The spawned task has panicked");
+ ///
+ /// // Drop the guard after the spawned task finishes.
+ /// drop(n);
+ ///}
+ /// ```
+ pub async fn read_owned(self: Arc<Self>) -> OwnedRwLockReadGuard<T> {
+ self.s.acquire(1).await.unwrap_or_else(|_| {
+ // The semaphore was closed. but, we never explicitly close it, and we have a
+ // handle to it through the Arc, which means that this can never happen.
+ unreachable!()
+ });
+ OwnedRwLockReadGuard {
+ data: self.c.get(),
+ lock: ManuallyDrop::new(self),
+ _p: PhantomData,
+ }
+ }
+
/// Attempts to acquire this `RwLock` with shared read access.
///
/// If the access couldn't be acquired immediately, returns [`TryLockError`].
@@ -497,11 +440,63 @@ impl<T: ?Sized> RwLock<T> {
})
}
+ /// Attempts to acquire this `RwLock` with shared read access.
+ ///
+ /// If the access couldn't be acquired immediately, returns [`TryLockError`].
+ /// Otherwise, an RAII guard is returned which will release read access
+ /// when dropped.
+ ///
+ /// This method is identical to [`RwLock::try_read`], except that the
+ /// returned guard references the `RwLock` with an [`Arc`] rather than by
+ /// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to
+ /// call this method, and the guard will live for the `'static` lifetime,
+ /// as it keeps the `RwLock` alive by holding an `Arc`.
+ ///
+ /// [`TryLockError`]: TryLockError
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use tokio::sync::RwLock;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let lock = Arc::new(RwLock::new(1));
+ /// let c_lock = lock.clone();
+ ///
+ /// let v = lock.try_read_owned().unwrap();
+ /// assert_eq!(*v, 1);
+ ///
+ /// tokio::spawn(async move {
+ /// // While main has an active read lock, we acquire one too.
+ /// let n = c_lock.read_owned().await;
+ /// assert_eq!(*n, 1);
+ /// }).await.expect("The spawned task has panicked");
+ ///
+ /// // Drop the guard when spawned task finishes.
+ /// drop(v);
+ /// }
+ /// ```
+ pub fn try_read_owned(self: Arc<Self>) -> Result<OwnedRwLockReadGuard<T>, TryLockError> {
+ match self.s.try_acquire(1) {
+ Ok(permit) => permit,
+ Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
+ Err(TryAcquireError::Closed) => unreachable!(),
+ }
+
+ Ok(OwnedRwLockReadGuard {
+ data: self.c.get(),
+ lock: ManuallyDrop::new(self),
+ _p: PhantomData,
+ })
+ }
+
/// Locks this `RwLock` with exclusive write access, causing the current
/// task to yield until the lock has been acquired.
///
- /// The calling task will yield while other writers or readers
- /// currently have access to the lock.
+ /// The calling task will yield while other writers or readers currently
+ /// have access to the lock.
///
/// Returns an RAII guard which will drop the write access of this `RwLock`
/// when dropped.
@@ -520,18 +515,62 @@ impl<T: ?Sized> RwLock<T> {
///}
/// ```
pub async fn write(&self) -> RwLockWriteGuard<'_, T> {
- self.s.acquire(MAX_READS as u32).await.unwrap_or_else(|_| {
+ self.s.acquire(self.mr).await.unwrap_or_else(|_| {
// The semaphore was closed. but, we never explicitly close it, and we have a
// handle to it through the Arc, which means that this can never happen.
unreachable!()
});
RwLockWriteGuard {
+ permits_acquired: self.mr,
s: &self.s,
data: self.c.get(),
marker: marker::PhantomData,
}
}
+ /// Locks this `RwLock` with exclusive write access, causing the current
+ /// task to yield until the lock has been acquired.
+ ///
+ /// The calling task will yield while other writers or readers currently
+ /// have access to the lock.
+ ///
+ /// This method is identical to [`RwLock::write`], except that the returned
+ /// guard references the `RwLock` with an [`Arc`] rather than by borrowing
+ /// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this
+ /// method, and the guard will live for the `'static` lifetime, as it keeps
+ /// the `RwLock` alive by holding an `Arc`.
+ ///
+ /// Returns an RAII guard which will drop the write access of this `RwLock`
+ /// when dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use tokio::sync::RwLock;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let lock = Arc::new(RwLock::new(1));
+ ///
+ /// let mut n = lock.write_owned().await;
+ /// *n = 2;
+ ///}
+ /// ```
+ pub async fn write_owned(self: Arc<Self>) -> OwnedRwLockWriteGuard<T> {
+ self.s.acquire(self.mr).await.unwrap_or_else(|_| {
+ // The semaphore was closed. but, we never explicitly close it, and we have a
+ // handle to it through the Arc, which means that this can never happen.
+ unreachable!()
+ });
+ OwnedRwLockWriteGuard {
+ permits_acquired: self.mr,
+ data: self.c.get(),
+ lock: ManuallyDrop::new(self),
+ _p: PhantomData,
+ }
+ }
+
/// Attempts to acquire this `RwLock` with exclusive write access.
///
/// If the access couldn't be acquired immediately, returns [`TryLockError`].
@@ -556,19 +595,65 @@ impl<T: ?Sized> RwLock<T> {
/// }
/// ```
pub fn try_write(&self) -> Result<RwLockWriteGuard<'_, T>, TryLockError> {
- match self.s.try_acquire(MAX_READS as u32) {
+ match self.s.try_acquire(self.mr) {
Ok(permit) => permit,
Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
Err(TryAcquireError::Closed) => unreachable!(),
}
Ok(RwLockWriteGuard {
+ permits_acquired: self.mr,
s: &self.s,
data: self.c.get(),
marker: marker::PhantomData,
})
}
+ /// Attempts to acquire this `RwLock` with exclusive write access.
+ ///
+ /// If the access couldn't be acquired immediately, returns [`TryLockError`].
+ /// Otherwise, an RAII guard is returned which will release write access
+ /// when dropped.
+ ///
+ /// This method is identical to [`RwLock::try_write`], except that the
+ /// returned guard references the `RwLock` with an [`Arc`] rather than by
+ /// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to
+ /// call this method, and the guard will live for the `'static` lifetime,
+ /// as it keeps the `RwLock` alive by holding an `Arc`.
+ ///
+ /// [`TryLockError`]: TryLockError
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use tokio::sync::RwLock;
+ ///
+ /// #[tokio::main]
+ /// async fn main() {
+ /// let rw = Arc::new(RwLock::new(1));
+ ///
+ /// let v = Arc::clone(&rw).read_owned().await;
+ /// assert_eq!(*v, 1);
+ ///
+ /// assert!(rw.try_write_owned().is_err());
+ /// }
+ /// ```
+ pub fn try_write_owned(self: Arc<Self>) -> Result<OwnedRwLockWriteGuard<T>, TryLockError> {
+ match self.s.try_acquire(self.mr) {
+ Ok(permit) => permit,
+ Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
+ Err(TryAcquireError::Closed) => unreachable!(),
+ }
+
+ Ok(OwnedRwLockWriteGuard {
+ permits_acquired: self.mr,
+ data: self.c.get(),
+ lock: ManuallyDrop::new(self),
+ _p: PhantomData,
+ })
+ }
+
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the `RwLock` mutably, no actual locking needs to
@@ -602,28 +687,6 @@ impl<T: ?Sized> RwLock<T> {
}
}
-impl<T: ?Sized> ops::Deref for RwLockReadGuard<'_, T> {
- type Target = T;
-
- fn deref(&self) -> &T {
- unsafe { &*self.data }
- }
-}
-
-impl<T: ?Sized> ops::Deref for RwLockWriteGuard<'_, T> {
- type Target = T;
-
- fn deref(&self) -> &T {
- unsafe { &*self.data }
- }
-}
-
-impl<T: ?Sized> ops::DerefMut for RwLockWriteGuard<'_, T> {
- fn deref_mut(&mut self) -> &mut T {
- unsafe { &mut *self.data }
- }
-}
-
impl<T> From<T> for RwLock<T> {
fn from(s: T) -> Self {
Self::new(s)
diff --git a/src/sync/rwlock/owned_read_guard.rs b/src/sync/rwlock/owned_read_guard.rs
new file mode 100644
index 0000000..b7f3926
--- /dev/null
+++ b/src/sync/rwlock/owned_read_guard.rs
@@ -0,0 +1,149 @@
+use crate::sync::rwlock::RwLock;
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem;
+use std::mem::ManuallyDrop;
+use std::ops;
+use std::sync::Arc;
+
+/// Owned RAII structure used to release the shared read access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`read_owned`] method on
+/// [`RwLock`].
+///
+/// [`read_owned`]: method@crate::sync::RwLock::read_owned
+/// [`RwLock`]: struct@crate::sync::RwLock
+pub struct OwnedRwLockReadGuard<T: ?Sized, U: ?Sized = T> {
+ // ManuallyDrop allows us to destructure into this field without running the destructor.
+ pub(super) lock: ManuallyDrop<Arc<RwLock<T>>>,
+ pub(super) data: *const U,
+ pub(super) _p: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> OwnedRwLockReadGuard<T, U> {
+ /// Make a new `OwnedRwLockReadGuard` for a component of the locked data.
+ /// This operation cannot fail as the `OwnedRwLockReadGuard` passed in
+ /// already locked the data.
+ ///
+ /// This is an associated function that needs to be
+ /// used as `OwnedRwLockReadGuard::map(...)`. A method would interfere with
+ /// methods of the same name on the contents of the locked data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use tokio::sync::{RwLock, OwnedRwLockReadGuard};
+ ///
+ /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ /// struct Foo(u32);
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = Arc::new(RwLock::new(Foo(1)));
+ ///
+ /// let guard = lock.read_owned().await;
+ /// let guard = OwnedRwLockReadGuard::map(guard, |f| &f.0);
+ ///
+ /// assert_eq!(1, *guard);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn map<F, V: ?Sized>(mut this: Self, f: F) -> OwnedRwLockReadGuard<T, V>
+ where
+ F: FnOnce(&U) -> &V,
+ {
+ let data = f(&*this) as *const V;
+ let lock = unsafe { ManuallyDrop::take(&mut this.lock) };
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(this);
+ OwnedRwLockReadGuard {
+ lock: ManuallyDrop::new(lock),
+ data,
+ _p: PhantomData,
+ }
+ }
+
+ /// Attempts to make a new [`OwnedRwLockReadGuard`] for a component of the
+ /// locked data. The original guard is returned if the closure returns
+ /// `None`.
+ ///
+ /// This operation cannot fail as the `OwnedRwLockReadGuard` passed in
+ /// already locked the data.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `OwnedRwLockReadGuard::try_map(..)`. A method would interfere with
+ /// methods of the same name on the contents of the locked data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use tokio::sync::{RwLock, OwnedRwLockReadGuard};
+ ///
+ /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ /// struct Foo(u32);
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = Arc::new(RwLock::new(Foo(1)));
+ ///
+ /// let guard = lock.read_owned().await;
+ /// let guard = OwnedRwLockReadGuard::try_map(guard, |f| Some(&f.0)).expect("should not fail");
+ ///
+ /// assert_eq!(1, *guard);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn try_map<F, V: ?Sized>(mut this: Self, f: F) -> Result<OwnedRwLockReadGuard<T, V>, Self>
+ where
+ F: FnOnce(&U) -> Option<&V>,
+ {
+ let data = match f(&*this) {
+ Some(data) => data as *const V,
+ None => return Err(this),
+ };
+ let lock = unsafe { ManuallyDrop::take(&mut this.lock) };
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(this);
+ Ok(OwnedRwLockReadGuard {
+ lock: ManuallyDrop::new(lock),
+ data,
+ _p: PhantomData,
+ })
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> ops::Deref for OwnedRwLockReadGuard<T, U> {
+ type Target = U;
+
+ fn deref(&self) -> &U {
+ unsafe { &*self.data }
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> fmt::Debug for OwnedRwLockReadGuard<T, U>
+where
+ U: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> fmt::Display for OwnedRwLockReadGuard<T, U>
+where
+ U: fmt::Display,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> Drop for OwnedRwLockReadGuard<T, U> {
+ fn drop(&mut self) {
+ self.lock.s.release(1);
+ unsafe { ManuallyDrop::drop(&mut self.lock) };
+ }
+}
diff --git a/src/sync/rwlock/owned_write_guard.rs b/src/sync/rwlock/owned_write_guard.rs
new file mode 100644
index 0000000..91b6595
--- /dev/null
+++ b/src/sync/rwlock/owned_write_guard.rs
@@ -0,0 +1,234 @@
+use crate::sync::rwlock::owned_read_guard::OwnedRwLockReadGuard;
+use crate::sync::rwlock::owned_write_guard_mapped::OwnedRwLockMappedWriteGuard;
+use crate::sync::rwlock::RwLock;
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem::{self, ManuallyDrop};
+use std::ops;
+use std::sync::Arc;
+
+/// Owned RAII structure used to release the exclusive write access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`write_owned`] method
+/// on [`RwLock`].
+///
+/// [`write_owned`]: method@crate::sync::RwLock::write_owned
+/// [`RwLock`]: struct@crate::sync::RwLock
+pub struct OwnedRwLockWriteGuard<T: ?Sized> {
+ pub(super) permits_acquired: u32,
+ // ManuallyDrop allows us to destructure into this field without running the destructor.
+ pub(super) lock: ManuallyDrop<Arc<RwLock<T>>>,
+ pub(super) data: *mut T,
+ pub(super) _p: PhantomData<T>,
+}
+
+impl<T: ?Sized> OwnedRwLockWriteGuard<T> {
+ /// Make a new [`OwnedRwLockMappedWriteGuard`] for a component of the locked
+ /// data.
+ ///
+ /// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in
+ /// already locked the data.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `OwnedRwLockWriteGuard::map(..)`. A method would interfere with methods
+ /// of the same name on the contents of the locked data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard};
+ ///
+ /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ /// struct Foo(u32);
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = Arc::new(RwLock::new(Foo(1)));
+ ///
+ /// {
+ /// let lock = Arc::clone(&lock);
+ /// let mut mapped = OwnedRwLockWriteGuard::map(lock.write_owned().await, |f| &mut f.0);
+ /// *mapped = 2;
+ /// }
+ ///
+ /// assert_eq!(Foo(2), *lock.read().await);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn map<F, U: ?Sized>(mut this: Self, f: F) -> OwnedRwLockMappedWriteGuard<T, U>
+ where
+ F: FnOnce(&mut T) -> &mut U,
+ {
+ let data = f(&mut *this) as *mut U;
+ let lock = unsafe { ManuallyDrop::take(&mut this.lock) };
+ let permits_acquired = this.permits_acquired;
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(this);
+ OwnedRwLockMappedWriteGuard {
+ permits_acquired,
+ lock: ManuallyDrop::new(lock),
+ data,
+ _p: PhantomData,
+ }
+ }
+
+ /// Attempts to make a new [`OwnedRwLockMappedWriteGuard`] for a component
+ /// of the locked data. The original guard is returned if the closure
+ /// returns `None`.
+ ///
+ /// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in
+ /// already locked the data.
+ ///
+ /// This is an associated function that needs to be
+ /// used as `OwnedRwLockWriteGuard::try_map(...)`. A method would interfere
+ /// with methods of the same name on the contents of the locked data.
+ ///
+ /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard};
+ ///
+ /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ /// struct Foo(u32);
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = Arc::new(RwLock::new(Foo(1)));
+ ///
+ /// {
+ /// let guard = Arc::clone(&lock).write_owned().await;
+ /// let mut guard = OwnedRwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail");
+ /// *guard = 2;
+ /// }
+ ///
+ /// assert_eq!(Foo(2), *lock.read().await);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn try_map<F, U: ?Sized>(
+ mut this: Self,
+ f: F,
+ ) -> Result<OwnedRwLockMappedWriteGuard<T, U>, Self>
+ where
+ F: FnOnce(&mut T) -> Option<&mut U>,
+ {
+ let data = match f(&mut *this) {
+ Some(data) => data as *mut U,
+ None => return Err(this),
+ };
+ let permits_acquired = this.permits_acquired;
+ let lock = unsafe { ManuallyDrop::take(&mut this.lock) };
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(this);
+ Ok(OwnedRwLockMappedWriteGuard {
+ permits_acquired,
+ lock: ManuallyDrop::new(lock),
+ data,
+ _p: PhantomData,
+ })
+ }
+
+ /// Converts this `OwnedRwLockWriteGuard` into an
+ /// `OwnedRwLockMappedWriteGuard`. This method can be used to store a
+ /// non-mapped guard in a struct field that expects a mapped guard.
+ ///
+ /// This is equivalent to calling `OwnedRwLockWriteGuard::map(guard, |me| me)`.
+ #[inline]
+ pub fn into_mapped(this: Self) -> OwnedRwLockMappedWriteGuard<T> {
+ Self::map(this, |me| me)
+ }
+
+ /// Atomically downgrades a write lock into a read lock without allowing
+ /// any writers to take exclusive access of the lock in the meantime.
+ ///
+ /// **Note:** This won't *necessarily* allow any additional readers to acquire
+ /// locks, since [`RwLock`] is fair and it is possible that a writer is next
+ /// in line.
+ ///
+ /// Returns an RAII guard which will drop this read access of the `RwLock`
+ /// when dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use tokio::sync::RwLock;
+ /// # use std::sync::Arc;
+ /// #
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = Arc::new(RwLock::new(1));
+ ///
+ /// let n = lock.clone().write_owned().await;
+ ///
+ /// let cloned_lock = lock.clone();
+ /// let handle = tokio::spawn(async move {
+ /// *cloned_lock.write_owned().await = 2;
+ /// });
+ ///
+ /// let n = n.downgrade();
+ /// assert_eq!(*n, 1, "downgrade is atomic");
+ ///
+ /// drop(n);
+ /// handle.await.unwrap();
+ /// assert_eq!(*lock.read().await, 2, "second writer obtained write lock");
+ /// # }
+ /// ```
+ pub fn downgrade(mut self) -> OwnedRwLockReadGuard<T> {
+ let lock = unsafe { ManuallyDrop::take(&mut self.lock) };
+ let data = self.data;
+
+ // Release all but one of the permits held by the write guard
+ lock.s.release((self.permits_acquired - 1) as usize);
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(self);
+ OwnedRwLockReadGuard {
+ lock: ManuallyDrop::new(lock),
+ data,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<T: ?Sized> ops::Deref for OwnedRwLockWriteGuard<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*self.data }
+ }
+}
+
+impl<T: ?Sized> ops::DerefMut for OwnedRwLockWriteGuard<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.data }
+ }
+}
+
+impl<T: ?Sized> fmt::Debug for OwnedRwLockWriteGuard<T>
+where
+ T: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<T: ?Sized> fmt::Display for OwnedRwLockWriteGuard<T>
+where
+ T: fmt::Display,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<T: ?Sized> Drop for OwnedRwLockWriteGuard<T> {
+ fn drop(&mut self) {
+ self.lock.s.release(self.permits_acquired as usize);
+ unsafe { ManuallyDrop::drop(&mut self.lock) };
+ }
+}
diff --git a/src/sync/rwlock/owned_write_guard_mapped.rs b/src/sync/rwlock/owned_write_guard_mapped.rs
new file mode 100644
index 0000000..6453236
--- /dev/null
+++ b/src/sync/rwlock/owned_write_guard_mapped.rs
@@ -0,0 +1,171 @@
+use crate::sync::rwlock::RwLock;
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem::{self, ManuallyDrop};
+use std::ops;
+use std::sync::Arc;
+
+/// Owned RAII structure used to release the exclusive write access of a lock when
+/// dropped.
+///
+/// This structure is created by [mapping] an [`OwnedRwLockWriteGuard`]. It is a
+/// separate type from `OwnedRwLockWriteGuard` to disallow downgrading a mapped
+/// guard, since doing so can cause undefined behavior.
+///
+/// [mapping]: method@crate::sync::OwnedRwLockWriteGuard::map
+/// [`OwnedRwLockWriteGuard`]: struct@crate::sync::OwnedRwLockWriteGuard
+pub struct OwnedRwLockMappedWriteGuard<T: ?Sized, U: ?Sized = T> {
+ pub(super) permits_acquired: u32,
+ // ManuallyDrop allows us to destructure into this field without running the destructor.
+ pub(super) lock: ManuallyDrop<Arc<RwLock<T>>>,
+ pub(super) data: *mut U,
+ pub(super) _p: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> OwnedRwLockMappedWriteGuard<T, U> {
+ /// Make a new `OwnedRwLockMappedWriteGuard` for a component of the locked
+ /// data.
+ ///
+ /// This operation cannot fail as the `OwnedRwLockMappedWriteGuard` passed
+ /// in already locked the data.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `OwnedRwLockWriteGuard::map(..)`. A method would interfere with methods
+ /// of the same name on the contents of the locked data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard};
+ ///
+ /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ /// struct Foo(u32);
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = Arc::new(RwLock::new(Foo(1)));
+ ///
+ /// {
+ /// let lock = Arc::clone(&lock);
+ /// let mut mapped = OwnedRwLockWriteGuard::map(lock.write_owned().await, |f| &mut f.0);
+ /// *mapped = 2;
+ /// }
+ ///
+ /// assert_eq!(Foo(2), *lock.read().await);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn map<F, V: ?Sized>(mut this: Self, f: F) -> OwnedRwLockMappedWriteGuard<T, V>
+ where
+ F: FnOnce(&mut U) -> &mut V,
+ {
+ let data = f(&mut *this) as *mut V;
+ let lock = unsafe { ManuallyDrop::take(&mut this.lock) };
+ let permits_acquired = this.permits_acquired;
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(this);
+ OwnedRwLockMappedWriteGuard {
+ permits_acquired,
+ lock: ManuallyDrop::new(lock),
+ data,
+ _p: PhantomData,
+ }
+ }
+
+ /// Attempts to make a new `OwnedRwLockMappedWriteGuard` for a component
+ /// of the locked data. The original guard is returned if the closure
+ /// returns `None`.
+ ///
+ /// This operation cannot fail as the `OwnedRwLockMappedWriteGuard` passed
+ /// in already locked the data.
+ ///
+ /// This is an associated function that needs to be
+ /// used as `OwnedRwLockMappedWriteGuard::try_map(...)`. A method would interfere with
+ /// methods of the same name on the contents of the locked data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use tokio::sync::{RwLock, OwnedRwLockWriteGuard};
+ ///
+ /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ /// struct Foo(u32);
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = Arc::new(RwLock::new(Foo(1)));
+ ///
+ /// {
+ /// let guard = Arc::clone(&lock).write_owned().await;
+ /// let mut guard = OwnedRwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail");
+ /// *guard = 2;
+ /// }
+ ///
+ /// assert_eq!(Foo(2), *lock.read().await);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn try_map<F, V: ?Sized>(
+ mut this: Self,
+ f: F,
+ ) -> Result<OwnedRwLockMappedWriteGuard<T, V>, Self>
+ where
+ F: FnOnce(&mut U) -> Option<&mut V>,
+ {
+ let data = match f(&mut *this) {
+ Some(data) => data as *mut V,
+ None => return Err(this),
+ };
+ let lock = unsafe { ManuallyDrop::take(&mut this.lock) };
+ let permits_acquired = this.permits_acquired;
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(this);
+ Ok(OwnedRwLockMappedWriteGuard {
+ permits_acquired,
+ lock: ManuallyDrop::new(lock),
+ data,
+ _p: PhantomData,
+ })
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> ops::Deref for OwnedRwLockMappedWriteGuard<T, U> {
+ type Target = U;
+
+ fn deref(&self) -> &U {
+ unsafe { &*self.data }
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> ops::DerefMut for OwnedRwLockMappedWriteGuard<T, U> {
+ fn deref_mut(&mut self) -> &mut U {
+ unsafe { &mut *self.data }
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> fmt::Debug for OwnedRwLockMappedWriteGuard<T, U>
+where
+ U: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> fmt::Display for OwnedRwLockMappedWriteGuard<T, U>
+where
+ U: fmt::Display,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<T: ?Sized, U: ?Sized> Drop for OwnedRwLockMappedWriteGuard<T, U> {
+ fn drop(&mut self) {
+ self.lock.s.release(self.permits_acquired as usize);
+ unsafe { ManuallyDrop::drop(&mut self.lock) };
+ }
+}
diff --git a/src/sync/rwlock/read_guard.rs b/src/sync/rwlock/read_guard.rs
new file mode 100644
index 0000000..38eec77
--- /dev/null
+++ b/src/sync/rwlock/read_guard.rs
@@ -0,0 +1,156 @@
+use crate::sync::batch_semaphore::Semaphore;
+use std::fmt;
+use std::marker;
+use std::mem;
+use std::ops;
+
+/// RAII structure used to release the shared read access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`read`] method on
+/// [`RwLock`].
+///
+/// [`read`]: method@crate::sync::RwLock::read
+/// [`RwLock`]: struct@crate::sync::RwLock
+pub struct RwLockReadGuard<'a, T: ?Sized> {
+ pub(super) s: &'a Semaphore,
+ pub(super) data: *const T,
+ pub(super) marker: marker::PhantomData<&'a T>,
+}
+
+impl<'a, T: ?Sized> RwLockReadGuard<'a, T> {
+ /// Make a new `RwLockReadGuard` for a component of the locked data.
+ ///
+ /// This operation cannot fail as the `RwLockReadGuard` passed in already
+ /// locked the data.
+ ///
+ /// This is an associated function that needs to be
+ /// used as `RwLockReadGuard::map(...)`. A method would interfere with
+ /// methods of the same name on the contents of the locked data.
+ ///
+ /// This is an asynchronous version of [`RwLockReadGuard::map`] from the
+ /// [`parking_lot` crate].
+ ///
+ /// [`RwLockReadGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.map
+ /// [`parking_lot` crate]: https://crates.io/crates/parking_lot
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::{RwLock, RwLockReadGuard};
+ ///
+ /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ /// struct Foo(u32);
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = RwLock::new(Foo(1));
+ ///
+ /// let guard = lock.read().await;
+ /// let guard = RwLockReadGuard::map(guard, |f| &f.0);
+ ///
+ /// assert_eq!(1, *guard);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn map<F, U: ?Sized>(this: Self, f: F) -> RwLockReadGuard<'a, U>
+ where
+ F: FnOnce(&T) -> &U,
+ {
+ let data = f(&*this) as *const U;
+ let s = this.s;
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(this);
+ RwLockReadGuard {
+ s,
+ data,
+ marker: marker::PhantomData,
+ }
+ }
+
+ /// Attempts to make a new [`RwLockReadGuard`] for a component of the
+ /// locked data. The original guard is returned if the closure returns
+ /// `None`.
+ ///
+ /// This operation cannot fail as the `RwLockReadGuard` passed in already
+ /// locked the data.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RwLockReadGuard::try_map(..)`. A method would interfere with methods of the
+ /// same name on the contents of the locked data.
+ ///
+ /// This is an asynchronous version of [`RwLockReadGuard::try_map`] from the
+ /// [`parking_lot` crate].
+ ///
+ /// [`RwLockReadGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.try_map
+ /// [`parking_lot` crate]: https://crates.io/crates/parking_lot
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::{RwLock, RwLockReadGuard};
+ ///
+ /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ /// struct Foo(u32);
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = RwLock::new(Foo(1));
+ ///
+ /// let guard = lock.read().await;
+ /// let guard = RwLockReadGuard::try_map(guard, |f| Some(&f.0)).expect("should not fail");
+ ///
+ /// assert_eq!(1, *guard);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn try_map<F, U: ?Sized>(this: Self, f: F) -> Result<RwLockReadGuard<'a, U>, Self>
+ where
+ F: FnOnce(&T) -> Option<&U>,
+ {
+ let data = match f(&*this) {
+ Some(data) => data as *const U,
+ None => return Err(this),
+ };
+ let s = this.s;
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(this);
+ Ok(RwLockReadGuard {
+ s,
+ data,
+ marker: marker::PhantomData,
+ })
+ }
+}
+
+impl<T: ?Sized> ops::Deref for RwLockReadGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*self.data }
+ }
+}
+
+impl<'a, T: ?Sized> fmt::Debug for RwLockReadGuard<'a, T>
+where
+ T: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: ?Sized> fmt::Display for RwLockReadGuard<'a, T>
+where
+ T: fmt::Display,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> {
+ fn drop(&mut self) {
+ self.s.release(1);
+ }
+}
diff --git a/src/sync/rwlock/write_guard.rs b/src/sync/rwlock/write_guard.rs
new file mode 100644
index 0000000..865a121
--- /dev/null
+++ b/src/sync/rwlock/write_guard.rs
@@ -0,0 +1,240 @@
+use crate::sync::batch_semaphore::Semaphore;
+use crate::sync::rwlock::read_guard::RwLockReadGuard;
+use crate::sync::rwlock::write_guard_mapped::RwLockMappedWriteGuard;
+use std::fmt;
+use std::marker;
+use std::mem;
+use std::ops;
+
+/// RAII structure used to release the exclusive write access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`write`] method
+/// on [`RwLock`].
+///
+/// [`write`]: method@crate::sync::RwLock::write
+/// [`RwLock`]: struct@crate::sync::RwLock
+pub struct RwLockWriteGuard<'a, T: ?Sized> {
+ pub(super) permits_acquired: u32,
+ pub(super) s: &'a Semaphore,
+ pub(super) data: *mut T,
+ pub(super) marker: marker::PhantomData<&'a mut T>,
+}
+
+impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
+ /// Make a new [`RwLockMappedWriteGuard`] for a component of the locked data.
+ ///
+ /// This operation cannot fail as the `RwLockWriteGuard` passed in already
+ /// locked the data.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RwLockWriteGuard::map(..)`. A method would interfere with methods of
+ /// the same name on the contents of the locked data.
+ ///
+ /// This is an asynchronous version of [`RwLockWriteGuard::map`] from the
+ /// [`parking_lot` crate].
+ ///
+ /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard
+ /// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map
+ /// [`parking_lot` crate]: https://crates.io/crates/parking_lot
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::{RwLock, RwLockWriteGuard};
+ ///
+ /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ /// struct Foo(u32);
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = RwLock::new(Foo(1));
+ ///
+ /// {
+ /// let mut mapped = RwLockWriteGuard::map(lock.write().await, |f| &mut f.0);
+ /// *mapped = 2;
+ /// }
+ ///
+ /// assert_eq!(Foo(2), *lock.read().await);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn map<F, U: ?Sized>(mut this: Self, f: F) -> RwLockMappedWriteGuard<'a, U>
+ where
+ F: FnOnce(&mut T) -> &mut U,
+ {
+ let data = f(&mut *this) as *mut U;
+ let s = this.s;
+ let permits_acquired = this.permits_acquired;
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(this);
+ RwLockMappedWriteGuard {
+ permits_acquired,
+ s,
+ data,
+ marker: marker::PhantomData,
+ }
+ }
+
+ /// Attempts to make a new [`RwLockMappedWriteGuard`] for a component of
+ /// the locked data. The original guard is returned if the closure returns
+ /// `None`.
+ ///
+ /// This operation cannot fail as the `RwLockWriteGuard` passed in already
+ /// locked the data.
+ ///
+ /// This is an associated function that needs to be
+ /// used as `RwLockWriteGuard::try_map(...)`. A method would interfere with
+ /// methods of the same name on the contents of the locked data.
+ ///
+ /// This is an asynchronous version of [`RwLockWriteGuard::try_map`] from
+ /// the [`parking_lot` crate].
+ ///
+ /// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard
+ /// [`RwLockWriteGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.try_map
+ /// [`parking_lot` crate]: https://crates.io/crates/parking_lot
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::{RwLock, RwLockWriteGuard};
+ ///
+ /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ /// struct Foo(u32);
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = RwLock::new(Foo(1));
+ ///
+ /// {
+ /// let guard = lock.write().await;
+ /// let mut guard = RwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail");
+ /// *guard = 2;
+ /// }
+ ///
+ /// assert_eq!(Foo(2), *lock.read().await);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn try_map<F, U: ?Sized>(
+ mut this: Self,
+ f: F,
+ ) -> Result<RwLockMappedWriteGuard<'a, U>, Self>
+ where
+ F: FnOnce(&mut T) -> Option<&mut U>,
+ {
+ let data = match f(&mut *this) {
+ Some(data) => data as *mut U,
+ None => return Err(this),
+ };
+ let s = this.s;
+ let permits_acquired = this.permits_acquired;
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(this);
+ Ok(RwLockMappedWriteGuard {
+ permits_acquired,
+ s,
+ data,
+ marker: marker::PhantomData,
+ })
+ }
+
+ /// Converts this `RwLockWriteGuard` into an `RwLockMappedWriteGuard`. This
+ /// method can be used to store a non-mapped guard in a struct field that
+ /// expects a mapped guard.
+ ///
+ /// This is equivalent to calling `RwLockWriteGuard::map(guard, |me| me)`.
+ #[inline]
+ pub fn into_mapped(this: Self) -> RwLockMappedWriteGuard<'a, T> {
+ RwLockWriteGuard::map(this, |me| me)
+ }
+
+ /// Atomically downgrades a write lock into a read lock without allowing
+ /// any writers to take exclusive access of the lock in the meantime.
+ ///
+ /// **Note:** This won't *necessarily* allow any additional readers to acquire
+ /// locks, since [`RwLock`] is fair and it is possible that a writer is next
+ /// in line.
+ ///
+ /// Returns an RAII guard which will drop this read access of the `RwLock`
+ /// when dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use tokio::sync::RwLock;
+ /// # use std::sync::Arc;
+ /// #
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = Arc::new(RwLock::new(1));
+ ///
+ /// let n = lock.write().await;
+ ///
+ /// let cloned_lock = lock.clone();
+ /// let handle = tokio::spawn(async move {
+ /// *cloned_lock.write().await = 2;
+ /// });
+ ///
+ /// let n = n.downgrade();
+ /// assert_eq!(*n, 1, "downgrade is atomic");
+ ///
+ /// drop(n);
+ /// handle.await.unwrap();
+ /// assert_eq!(*lock.read().await, 2, "second writer obtained write lock");
+ /// # }
+ /// ```
+ ///
+ /// [`RwLock`]: struct@crate::sync::RwLock
+ pub fn downgrade(self) -> RwLockReadGuard<'a, T> {
+ let RwLockWriteGuard { s, data, .. } = self;
+
+ // Release all but one of the permits held by the write guard
+ s.release((self.permits_acquired - 1) as usize);
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(self);
+ RwLockReadGuard {
+ s,
+ data,
+ marker: marker::PhantomData,
+ }
+ }
+}
+
+impl<T: ?Sized> ops::Deref for RwLockWriteGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*self.data }
+ }
+}
+
+impl<T: ?Sized> ops::DerefMut for RwLockWriteGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.data }
+ }
+}
+
+impl<'a, T: ?Sized> fmt::Debug for RwLockWriteGuard<'a, T>
+where
+ T: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: ?Sized> fmt::Display for RwLockWriteGuard<'a, T>
+where
+ T: fmt::Display,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> {
+ fn drop(&mut self) {
+ self.s.release(self.permits_acquired as usize);
+ }
+}
diff --git a/src/sync/rwlock/write_guard_mapped.rs b/src/sync/rwlock/write_guard_mapped.rs
new file mode 100644
index 0000000..9c5b1e7
--- /dev/null
+++ b/src/sync/rwlock/write_guard_mapped.rs
@@ -0,0 +1,176 @@
+use crate::sync::batch_semaphore::Semaphore;
+use std::fmt;
+use std::marker;
+use std::mem;
+use std::ops;
+
+/// RAII structure used to release the exclusive write access of a lock when
+/// dropped.
+///
+/// This structure is created by [mapping] an [`RwLockWriteGuard`]. It is a
+/// separate type from `RwLockWriteGuard` to disallow downgrading a mapped
+/// guard, since doing so can cause undefined behavior.
+///
+/// [mapping]: method@crate::sync::RwLockWriteGuard::map
+/// [`RwLockWriteGuard`]: struct@crate::sync::RwLockWriteGuard
+pub struct RwLockMappedWriteGuard<'a, T: ?Sized> {
+ pub(super) permits_acquired: u32,
+ pub(super) s: &'a Semaphore,
+ pub(super) data: *mut T,
+ pub(super) marker: marker::PhantomData<&'a mut T>,
+}
+
+impl<'a, T: ?Sized> RwLockMappedWriteGuard<'a, T> {
+ /// Make a new `RwLockMappedWriteGuard` for a component of the locked data.
+ ///
+ /// This operation cannot fail as the `RwLockMappedWriteGuard` passed in already
+ /// locked the data.
+ ///
+ /// This is an associated function that needs to be used as
+ /// `RwLockMappedWriteGuard::map(..)`. A method would interfere with methods
+ /// of the same name on the contents of the locked data.
+ ///
+ /// This is an asynchronous version of [`RwLockWriteGuard::map`] from the
+ /// [`parking_lot` crate].
+ ///
+ /// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map
+ /// [`parking_lot` crate]: https://crates.io/crates/parking_lot
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::{RwLock, RwLockWriteGuard};
+ ///
+ /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ /// struct Foo(u32);
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = RwLock::new(Foo(1));
+ ///
+ /// {
+ /// let mut mapped = RwLockWriteGuard::map(lock.write().await, |f| &mut f.0);
+ /// *mapped = 2;
+ /// }
+ ///
+ /// assert_eq!(Foo(2), *lock.read().await);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn map<F, U: ?Sized>(mut this: Self, f: F) -> RwLockMappedWriteGuard<'a, U>
+ where
+ F: FnOnce(&mut T) -> &mut U,
+ {
+ let data = f(&mut *this) as *mut U;
+ let s = this.s;
+ let permits_acquired = this.permits_acquired;
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(this);
+ RwLockMappedWriteGuard {
+ permits_acquired,
+ s,
+ data,
+ marker: marker::PhantomData,
+ }
+ }
+
+ /// Attempts to make a new [`RwLockMappedWriteGuard`] for a component of
+ /// the locked data. The original guard is returned if the closure returns
+ /// `None`.
+ ///
+ /// This operation cannot fail as the `RwLockMappedWriteGuard` passed in already
+ /// locked the data.
+ ///
+ /// This is an associated function that needs to be
+ /// used as `RwLockMappedWriteGuard::try_map(...)`. A method would interfere
+ /// with methods of the same name on the contents of the locked data.
+ ///
+ /// This is an asynchronous version of [`RwLockWriteGuard::try_map`] from
+ /// the [`parking_lot` crate].
+ ///
+ /// [`RwLockWriteGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.try_map
+ /// [`parking_lot` crate]: https://crates.io/crates/parking_lot
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use tokio::sync::{RwLock, RwLockWriteGuard};
+ ///
+ /// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
+ /// struct Foo(u32);
+ ///
+ /// # #[tokio::main]
+ /// # async fn main() {
+ /// let lock = RwLock::new(Foo(1));
+ ///
+ /// {
+ /// let guard = lock.write().await;
+ /// let mut guard = RwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail");
+ /// *guard = 2;
+ /// }
+ ///
+ /// assert_eq!(Foo(2), *lock.read().await);
+ /// # }
+ /// ```
+ #[inline]
+ pub fn try_map<F, U: ?Sized>(
+ mut this: Self,
+ f: F,
+ ) -> Result<RwLockMappedWriteGuard<'a, U>, Self>
+ where
+ F: FnOnce(&mut T) -> Option<&mut U>,
+ {
+ let data = match f(&mut *this) {
+ Some(data) => data as *mut U,
+ None => return Err(this),
+ };
+ let s = this.s;
+ let permits_acquired = this.permits_acquired;
+ // NB: Forget to avoid drop impl from being called.
+ mem::forget(this);
+ Ok(RwLockMappedWriteGuard {
+ permits_acquired,
+ s,
+ data,
+ marker: marker::PhantomData,
+ })
+ }
+}
+
+impl<T: ?Sized> ops::Deref for RwLockMappedWriteGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*self.data }
+ }
+}
+
+impl<T: ?Sized> ops::DerefMut for RwLockMappedWriteGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.data }
+ }
+}
+
+impl<'a, T: ?Sized> fmt::Debug for RwLockMappedWriteGuard<'a, T>
+where
+ T: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: ?Sized> fmt::Display for RwLockMappedWriteGuard<'a, T>
+where
+ T: fmt::Display,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: ?Sized> Drop for RwLockMappedWriteGuard<'a, T> {
+ fn drop(&mut self) {
+ self.s.release(self.permits_acquired as usize);
+ }
+}
diff --git a/src/sync/semaphore.rs b/src/sync/semaphore.rs
index bff5de9..af75042 100644
--- a/src/sync/semaphore.rs
+++ b/src/sync/semaphore.rs
@@ -14,6 +14,13 @@ use std::sync::Arc;
/// available, `acquire` (asynchronously) waits until an outstanding permit is
/// dropped. At this point, the freed permit is assigned to the caller.
///
+/// This `Semaphore` is fair, which means that permits are given out in the order
+/// they were requested. This fairness is also applied when `acquire_many` gets
+/// involved, so if a call to `acquire_many` at the front of the queue requests
+/// more permits than currently available, this can prevent a call to `acquire`
+/// from completing, even if the semaphore has enough permits complete the call
+/// to `acquire`.
+///
/// To use the `Semaphore` in a poll function, you can use the [`PollSemaphore`]
/// utility.
///
@@ -143,7 +150,7 @@ impl Semaphore {
}
}
- /// Tries to acquire n permits from the semaphore.
+ /// Tries to acquire `n` permits from the semaphore.
///
/// If the semaphore has been closed, this returns a [`TryAcquireError::Closed`]
/// and a [`TryAcquireError::NoPermits`] if there are no permits left. Otherwise,
@@ -180,6 +187,27 @@ impl Semaphore {
})
}
+ /// Acquires `n` permits from the semaphore.
+ ///
+ /// The semaphore must be wrapped in an [`Arc`] to call this method.
+ /// If the semaphore has been closed, this returns an [`AcquireError`].
+ /// Otherwise, this returns a [`OwnedSemaphorePermit`] representing the
+ /// acquired permit.
+ ///
+ /// [`Arc`]: std::sync::Arc
+ /// [`AcquireError`]: crate::sync::AcquireError
+ /// [`OwnedSemaphorePermit`]: crate::sync::OwnedSemaphorePermit
+ pub async fn acquire_many_owned(
+ self: Arc<Self>,
+ n: u32,
+ ) -> Result<OwnedSemaphorePermit, AcquireError> {
+ self.ll_sem.acquire(n).await?;
+ Ok(OwnedSemaphorePermit {
+ sem: self,
+ permits: n,
+ })
+ }
+
/// Tries to acquire a permit from the semaphore.
///
/// The semaphore must be wrapped in an [`Arc`] to call this method. If
@@ -202,6 +230,31 @@ impl Semaphore {
}
}
+ /// Tries to acquire `n` permits from the semaphore.
+ ///
+ /// The semaphore must be wrapped in an [`Arc`] to call this method. If
+ /// the semaphore has been closed, this returns a [`TryAcquireError::Closed`]
+ /// and a [`TryAcquireError::NoPermits`] if there are no permits left.
+ /// Otherwise, this returns a [`OwnedSemaphorePermit`] representing the
+ /// acquired permit.
+ ///
+ /// [`Arc`]: std::sync::Arc
+ /// [`TryAcquireError::Closed`]: crate::sync::TryAcquireError::Closed
+ /// [`TryAcquireError::NoPermits`]: crate::sync::TryAcquireError::NoPermits
+ /// [`OwnedSemaphorePermit`]: crate::sync::OwnedSemaphorePermit
+ pub fn try_acquire_many_owned(
+ self: Arc<Self>,
+ n: u32,
+ ) -> Result<OwnedSemaphorePermit, TryAcquireError> {
+ match self.ll_sem.try_acquire(n) {
+ Ok(_) => Ok(OwnedSemaphorePermit {
+ sem: self,
+ permits: n,
+ }),
+ Err(e) => Err(e),
+ }
+ }
+
/// Closes the semaphore.
///
/// This prevents the semaphore from issuing new permits and notifies all pending waiters.
@@ -234,6 +287,11 @@ impl Semaphore {
pub fn close(&self) {
self.ll_sem.close();
}
+
+ /// Returns true if the semaphore is closed
+ pub fn is_closed(&self) -> bool {
+ self.ll_sem.is_closed()
+ }
}
impl<'a> SemaphorePermit<'a> {
diff --git a/src/sync/tests/loom_rwlock.rs b/src/sync/tests/loom_rwlock.rs
index 2834a26..4b5cc7e 100644
--- a/src/sync/tests/loom_rwlock.rs
+++ b/src/sync/tests/loom_rwlock.rs
@@ -22,7 +22,7 @@ fn concurrent_write() {
let rwclone = rwlock.clone();
let t2 = thread::spawn(move || {
block_on(async {
- let mut guard = rwclone.write().await;
+ let mut guard = rwclone.write_owned().await;
*guard += 5;
});
});
@@ -53,7 +53,7 @@ fn concurrent_read_write() {
let rwclone = rwlock.clone();
let t2 = thread::spawn(move || {
block_on(async {
- let mut guard = rwclone.write().await;
+ let mut guard = rwclone.write_owned().await;
*guard += 5;
});
});
@@ -67,6 +67,12 @@ fn concurrent_read_write() {
});
});
+ {
+ let guard = block_on(rwlock.clone().read_owned());
+ //at this state the value on the lock may either be 0, 5, or 10
+ assert!(*guard == 0 || *guard == 5 || *guard == 10);
+ }
+
t1.join().expect("thread 1 write should not panic");
t2.join().expect("thread 2 write should not panic");
t3.join().expect("thread 3 read should not panic");
diff --git a/src/sync/watch.rs b/src/sync/watch.rs
index 5590a75..bf6f0ac 100644
--- a/src/sync/watch.rs
+++ b/src/sync/watch.rs
@@ -1,3 +1,5 @@
+#![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))]
+
//! A single-producer, multi-consumer channel that only retains the *last* sent
//! value.
//!
@@ -51,7 +53,7 @@
//! [`Sender::is_closed`]: crate::sync::watch::Sender::is_closed
//! [`Sender::closed`]: crate::sync::watch::Sender::closed
-use crate::sync::Notify;
+use crate::sync::notify::Notify;
use crate::loom::sync::atomic::AtomicUsize;
use crate::loom::sync::atomic::Ordering::{Relaxed, SeqCst};
@@ -198,6 +200,14 @@ pub fn channel<T>(init: T) -> (Sender<T>, Receiver<T>) {
}
impl<T> Receiver<T> {
+ fn from_shared(version: usize, shared: Arc<Shared<T>>) -> Self {
+ // No synchronization necessary as this is only used as a counter and
+ // not memory access.
+ shared.ref_count_rx.fetch_add(1, Relaxed);
+
+ Self { version, shared }
+ }
+
/// Returns a reference to the most recently sent value
///
/// Outstanding borrows hold a read lock. This means that long lived borrows
@@ -260,6 +270,12 @@ impl<T> Receiver<T> {
// loop around again in case the wake-up was spurious
}
}
+
+ cfg_process_driver! {
+ pub(crate) fn try_has_changed(&mut self) -> Option<Result<(), error::RecvError>> {
+ maybe_changed(&self.shared, &mut self.version)
+ }
+ }
}
fn maybe_changed<T>(
@@ -289,11 +305,7 @@ impl<T> Clone for Receiver<T> {
let version = self.version;
let shared = self.shared.clone();
- // No synchronization necessary as this is only used as a counter and
- // not memory access.
- shared.ref_count_rx.fetch_add(1, Relaxed);
-
- Receiver { version, shared }
+ Self::from_shared(version, shared)
}
}
@@ -396,6 +408,15 @@ impl<T> Sender<T> {
notified.await;
debug_assert_eq!(0, self.shared.ref_count_rx.load(Relaxed));
}
+
+ cfg_signal_internal! {
+ pub(crate) fn subscribe(&self) -> Receiver<T> {
+ let shared = self.shared.clone();
+ let version = shared.version.load(SeqCst);
+
+ Receiver::from_shared(version, shared)
+ }
+ }
}
impl<T> Drop for Sender<T> {
diff --git a/src/task/local.rs b/src/task/local.rs
index ee11511..64f1ac5 100644
--- a/src/task/local.rs
+++ b/src/task/local.rs
@@ -661,7 +661,7 @@ impl Shared {
}
fn ptr_eq(&self, other: &Shared) -> bool {
- self as *const _ == other as *const _
+ std::ptr::eq(self, other)
}
}
diff --git a/src/task/mod.rs b/src/task/mod.rs
index 5dc5e72..abae818 100644
--- a/src/task/mod.rs
+++ b/src/task/mod.rs
@@ -209,11 +209,66 @@
//! # }
//! ```
//!
+//! ### Cooperative scheduling
+//!
+//! A single call to [`poll`] on a top-level task may potentially do a lot of
+//! work before it returns `Poll::Pending`. If a task runs for a long period of
+//! time without yielding back to the executor, it can starve other tasks
+//! waiting on that executor to execute them, or drive underlying resources.
+//! Since Rust does not have a runtime, it is difficult to forcibly preempt a
+//! long-running task. Instead, this module provides an opt-in mechanism for
+//! futures to collaborate with the executor to avoid starvation.
+//!
+//! Consider a future like this one:
+//!
+//! ```
+//! # use tokio_stream::{Stream, StreamExt};
+//! async fn drop_all<I: Stream + Unpin>(mut input: I) {
+//! while let Some(_) = input.next().await {}
+//! }
+//! ```
+//!
+//! It may look harmless, but consider what happens under heavy load if the
+//! input stream is _always_ ready. If we spawn `drop_all`, the task will never
+//! yield, and will starve other tasks and resources on the same executor.
+//!
+//! To account for this, Tokio has explicit yield points in a number of library
+//! functions, which force tasks to return to the executor periodically.
+//!
+//!
+//! #### unconstrained
+//!
+//! If necessary, [`task::unconstrained`] lets you opt out a future of Tokio's cooperative
+//! scheduling. When a future is wrapped with `unconstrained`, it will never be forced to yield to
+//! Tokio. For example:
+//!
+//! ```
+//! # #[tokio::main]
+//! # async fn main() {
+//! use tokio::{task, sync::mpsc};
+//!
+//! let fut = async {
+//! let (tx, mut rx) = mpsc::unbounded_channel();
+//!
+//! for i in 0..1000 {
+//! let _ = tx.send(());
+//! // This will always be ready. If coop was in effect, this code would be forced to yield
+//! // periodically. However, if left unconstrained, then this code will never yield.
+//! rx.recv().await;
+//! }
+//! };
+//!
+//! task::unconstrained(fut).await;
+//! # }
+//! ```
+//!
//! [`task::spawn_blocking`]: crate::task::spawn_blocking
//! [`task::block_in_place`]: crate::task::block_in_place
//! [rt-multi-thread]: ../runtime/index.html#threaded-scheduler
//! [`task::yield_now`]: crate::task::yield_now()
//! [`thread::yield_now`]: std::thread::yield_now
+//! [`task::unconstrained`]: crate::task::unconstrained()
+//! [`poll`]: method@std::future::Future::poll
cfg_rt! {
pub use crate::runtime::task::{JoinError, JoinHandle};
@@ -236,4 +291,7 @@ cfg_rt! {
mod task_local;
pub use task_local::LocalKey;
+
+ mod unconstrained;
+ pub use unconstrained::{unconstrained, Unconstrained};
}
diff --git a/src/task/task_local.rs b/src/task/task_local.rs
index bc2e54a..6571ffd 100644
--- a/src/task/task_local.rs
+++ b/src/task/task_local.rs
@@ -127,6 +127,35 @@ impl<T: 'static> LocalKey<T> {
.await
}
+ /// Sets a value `T` as the task-local value for the closure `F`.
+ ///
+ /// On completion of `scope`, the task-local will be dropped.
+ ///
+ /// ### Examples
+ ///
+ /// ```
+ /// # async fn dox() {
+ /// tokio::task_local! {
+ /// static NUMBER: u32;
+ /// }
+ ///
+ /// NUMBER.sync_scope(1, || {
+ /// println!("task local value: {}", NUMBER.get());
+ /// });
+ /// # }
+ /// ```
+ pub fn sync_scope<F, R>(&'static self, value: T, f: F) -> R
+ where
+ F: FnOnce() -> R,
+ {
+ let mut scope = TaskLocalFuture {
+ local: &self,
+ slot: Some(value),
+ future: (),
+ };
+ Pin::new(&mut scope).with_task(|_| f())
+ }
+
/// Accesses the current task-local and runs the provided closure.
///
/// # Panics
@@ -145,7 +174,7 @@ impl<T: 'static> LocalKey<T> {
/// Accesses the current task-local and runs the provided closure.
///
- /// If the task-local with the accociated key is not present, this
+ /// If the task-local with the associated key is not present, this
/// method will return an `AccessError`. For a panicking variant,
/// see `with`.
pub fn try_with<F, R>(&'static self, f: F) -> Result<R, AccessError>
@@ -185,10 +214,8 @@ pin_project! {
}
}
-impl<T: 'static, F: Future> Future for TaskLocalFuture<T, F> {
- type Output = F::Output;
-
- fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+impl<T: 'static, F> TaskLocalFuture<T, F> {
+ fn with_task<F2: FnOnce(Pin<&mut F>) -> R, R>(self: Pin<&mut Self>, f: F2) -> R {
struct Guard<'a, T: 'static> {
local: &'static LocalKey<T>,
slot: &'a mut Option<T>,
@@ -213,7 +240,15 @@ impl<T: 'static, F: Future> Future for TaskLocalFuture<T, F> {
local: *project.local,
};
- project.future.poll(cx)
+ f(project.future)
+ }
+}
+
+impl<T: 'static, F: Future> Future for TaskLocalFuture<T, F> {
+ type Output = F::Output;
+
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ self.with_task(|f| f.poll(cx))
}
}
diff --git a/src/task/unconstrained.rs b/src/task/unconstrained.rs
new file mode 100644
index 0000000..4a62f81
--- /dev/null
+++ b/src/task/unconstrained.rs
@@ -0,0 +1,43 @@
+use pin_project_lite::pin_project;
+use std::future::Future;
+use std::pin::Pin;
+use std::task::{Context, Poll};
+
+pin_project! {
+ /// Future for the [`unconstrained`](unconstrained) method.
+ #[must_use = "Unconstrained does nothing unless polled"]
+ pub struct Unconstrained<F> {
+ #[pin]
+ inner: F,
+ }
+}
+
+impl<F> Future for Unconstrained<F>
+where
+ F: Future,
+{
+ type Output = <F as Future>::Output;
+
+ cfg_coop! {
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let inner = self.project().inner;
+ crate::coop::with_unconstrained(|| inner.poll(cx))
+ }
+ }
+
+ cfg_not_coop! {
+ fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ let inner = self.project().inner;
+ inner.poll(cx)
+ }
+ }
+}
+
+/// Turn off cooperative scheduling for a future. The future will never be forced to yield by
+/// Tokio. Using this exposes your service to starvation if the unconstrained future never yields
+/// otherwise.
+///
+/// See also the usage example in the [task module](index.html#unconstrained).
+pub fn unconstrained<F>(inner: F) -> Unconstrained<F> {
+ Unconstrained { inner }
+}
diff --git a/src/time/driver/entry.rs b/src/time/driver/entry.rs
index 11366d2..e630fa8 100644
--- a/src/time/driver/entry.rs
+++ b/src/time/driver/entry.rs
@@ -543,6 +543,10 @@ impl TimerEntry {
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), super::Error>> {
+ if self.driver.is_shutdown() {
+ panic!("{}", crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR);
+ }
+
if let Some(deadline) = self.initial_deadline {
self.as_mut().reset(deadline);
}
diff --git a/src/time/driver/handle.rs b/src/time/driver/handle.rs
index e934b56..9a05a54 100644
--- a/src/time/driver/handle.rs
+++ b/src/time/driver/handle.rs
@@ -1,4 +1,4 @@
-use crate::loom::sync::{Arc, Mutex};
+use crate::loom::sync::Arc;
use crate::time::driver::ClockTime;
use std::fmt;
@@ -6,13 +6,13 @@ use std::fmt;
#[derive(Clone)]
pub(crate) struct Handle {
time_source: ClockTime,
- inner: Arc<Mutex<super::Inner>>,
+ inner: Arc<super::Inner>,
}
impl Handle {
/// Creates a new timer `Handle` from a shared `Inner` timer state.
- pub(super) fn new(inner: Arc<Mutex<super::Inner>>) -> Self {
- let time_source = inner.lock().time_source.clone();
+ pub(super) fn new(inner: Arc<super::Inner>) -> Self {
+ let time_source = inner.state.lock().time_source.clone();
Handle { time_source, inner }
}
@@ -21,9 +21,14 @@ impl Handle {
&self.time_source
}
- /// Locks the driver's inner structure
- pub(super) fn lock(&self) -> crate::loom::sync::MutexGuard<'_, super::Inner> {
- self.inner.lock()
+ /// Access the driver's inner structure
+ pub(super) fn get(&self) -> &super::Inner {
+ &*self.inner
+ }
+
+ // Check whether the driver has been shutdown
+ pub(super) fn is_shutdown(&self) -> bool {
+ self.inner.is_shutdown()
}
}
@@ -43,7 +48,7 @@ cfg_rt! {
/// since the function is executed outside of the runtime.
/// Whereas `rt.block_on(async {delay_for(...).await})` doesn't panic.
/// And this is because wrapping the function on an async makes it lazy,
- /// and so gets executed inside the runtime successfuly without
+ /// and so gets executed inside the runtime successfully without
/// panicking.
pub(crate) fn current() -> Self {
crate::runtime::context::time_handle()
@@ -68,7 +73,7 @@ cfg_not_rt! {
/// since the function is executed outside of the runtime.
/// Whereas `rt.block_on(async {delay_for(...).await})` doesn't
/// panic. And this is because wrapping the function on an async makes it
- /// lazy, and so outside executed inside the runtime successfuly without
+ /// lazy, and so outside executed inside the runtime successfully without
/// panicking.
pub(crate) fn current() -> Self {
panic!(crate::util::error::CONTEXT_MISSING_ERROR)
diff --git a/src/time/driver/mod.rs b/src/time/driver/mod.rs
index 615307e..3eb1004 100644
--- a/src/time/driver/mod.rs
+++ b/src/time/driver/mod.rs
@@ -16,6 +16,7 @@ mod wheel;
pub(super) mod sleep;
+use crate::loom::sync::atomic::{AtomicBool, Ordering};
use crate::loom::sync::{Arc, Mutex};
use crate::park::{Park, Unpark};
use crate::time::error::Error;
@@ -86,7 +87,7 @@ pub(crate) struct Driver<P: Park + 'static> {
time_source: ClockTime,
/// Shared state
- inner: Handle,
+ handle: Handle,
/// Parker to delegate to
park: P,
@@ -132,7 +133,16 @@ impl ClockTime {
}
/// Timer state shared between `Driver`, `Handle`, and `Registration`.
-pub(self) struct Inner {
+struct Inner {
+ // The state is split like this so `Handle` can access `is_shutdown` without locking the mutex
+ pub(super) state: Mutex<InnerState>,
+
+ /// True if the driver is being shutdown
+ pub(super) is_shutdown: AtomicBool,
+}
+
+/// Time state shared which must be protected by a `Mutex`
+struct InnerState {
/// Timing backend in use
time_source: ClockTime,
@@ -145,9 +155,6 @@ pub(self) struct Inner {
/// Timer wheel
wheel: wheel::Wheel,
- /// True if the driver is being shutdown
- is_shutdown: bool,
-
/// Unparker that can be used to wake the time driver
unpark: Box<dyn Unpark>,
}
@@ -169,7 +176,7 @@ where
Driver {
time_source,
- inner: Handle::new(Arc::new(Mutex::new(inner))),
+ handle: Handle::new(Arc::new(inner)),
park,
}
}
@@ -181,15 +188,15 @@ where
/// `with_default`, setting the timer as the default timer for the execution
/// context.
pub(crate) fn handle(&self) -> Handle {
- self.inner.clone()
+ self.handle.clone()
}
fn park_internal(&mut self, limit: Option<Duration>) -> Result<(), P::Error> {
let clock = &self.time_source.clock;
- let mut lock = self.inner.lock();
+ let mut lock = self.handle.get().state.lock();
- assert!(!lock.is_shutdown);
+ assert!(!self.handle.is_shutdown());
let next_wake = lock.wheel.next_expiration_time();
lock.next_wake =
@@ -237,7 +244,7 @@ where
}
// Process pending timers after waking up
- self.inner.process();
+ self.handle.process();
Ok(())
}
@@ -255,7 +262,7 @@ impl Handle {
let mut waker_list: [Option<Waker>; 32] = Default::default();
let mut waker_idx = 0;
- let mut lock = self.lock();
+ let mut lock = self.get().lock();
assert!(now >= lock.elapsed);
@@ -278,7 +285,7 @@ impl Handle {
waker_idx = 0;
- lock = self.lock();
+ lock = self.get().lock();
}
}
}
@@ -309,7 +316,7 @@ impl Handle {
/// `add_entry` must not be called concurrently.
pub(self) unsafe fn clear_entry(&self, entry: NonNull<TimerShared>) {
unsafe {
- let mut lock = self.lock();
+ let mut lock = self.get().lock();
if entry.as_ref().might_be_registered() {
lock.wheel.remove(entry);
@@ -327,7 +334,7 @@ impl Handle {
/// the `TimerEntry`)
pub(self) unsafe fn reregister(&self, new_tick: u64, entry: NonNull<TimerShared>) {
let waker = unsafe {
- let mut lock = self.lock();
+ let mut lock = self.get().lock();
// We may have raced with a firing/deregistration, so check before
// deregistering.
@@ -338,7 +345,7 @@ impl Handle {
// Now that we have exclusive control of this entry, mint a handle to reinsert it.
let entry = entry.as_ref().handle();
- if lock.is_shutdown {
+ if self.is_shutdown() {
unsafe { entry.fire(Err(crate::time::error::Error::shutdown())) }
} else {
entry.set_expiration(new_tick);
@@ -396,19 +403,15 @@ where
}
fn shutdown(&mut self) {
- let mut lock = self.inner.lock();
-
- if lock.is_shutdown {
+ if self.handle.is_shutdown() {
return;
}
- lock.is_shutdown = true;
-
- drop(lock);
+ self.handle.get().is_shutdown.store(true, Ordering::SeqCst);
// Advance time forward to the end of time.
- self.inner.process_at_time(u64::MAX);
+ self.handle.process_at_time(u64::MAX);
self.park.shutdown();
}
@@ -428,14 +431,26 @@ where
impl Inner {
pub(self) fn new(time_source: ClockTime, unpark: Box<dyn Unpark>) -> Self {
Inner {
- time_source,
- elapsed: 0,
- next_wake: None,
- unpark,
- wheel: wheel::Wheel::new(),
- is_shutdown: false,
+ state: Mutex::new(InnerState {
+ time_source,
+ elapsed: 0,
+ next_wake: None,
+ unpark,
+ wheel: wheel::Wheel::new(),
+ }),
+ is_shutdown: AtomicBool::new(false),
}
}
+
+ /// Locks the driver's inner structure
+ pub(super) fn lock(&self) -> crate::loom::sync::MutexGuard<'_, InnerState> {
+ self.state.lock()
+ }
+
+ // Check whether the driver has been shutdown
+ pub(super) fn is_shutdown(&self) -> bool {
+ self.is_shutdown.load(Ordering::SeqCst)
+ }
}
impl fmt::Debug for Inner {
diff --git a/src/time/driver/sleep.rs b/src/time/driver/sleep.rs
index 2438f14..8658813 100644
--- a/src/time/driver/sleep.rs
+++ b/src/time/driver/sleep.rs
@@ -16,6 +16,8 @@ use std::task::{self, Poll};
///
/// Canceling a sleep instance is done by dropping the returned future. No additional
/// cleanup work is required.
+// Alias for old name in 0.x
+#[cfg_attr(docsrs, doc(alias = "delay_until"))]
pub fn sleep_until(deadline: Instant) -> Sleep {
Sleep::new_timeout(deadline)
}
@@ -53,8 +55,13 @@ pub fn sleep_until(deadline: Instant) -> Sleep {
/// ```
///
/// [`interval`]: crate::time::interval()
+// Alias for old name in 0.x
+#[cfg_attr(docsrs, doc(alias = "delay_for"))]
pub fn sleep(duration: Duration) -> Sleep {
- sleep_until(Instant::now() + duration)
+ match Instant::now().checked_add(duration) {
+ Some(deadline) => sleep_until(deadline),
+ None => sleep_until(Instant::far_future()),
+ }
}
pin_project! {
@@ -145,6 +152,8 @@ pin_project! {
///
/// [`select!`]: ../macro.select.html
/// [`tokio::pin!`]: ../macro.pin.html
+ // Alias for old name in 0.2
+ #[cfg_attr(docsrs, doc(alias = "Delay"))]
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Sleep {
@@ -164,6 +173,10 @@ impl Sleep {
Sleep { deadline, entry }
}
+ pub(crate) fn far_future() -> Sleep {
+ Self::new_timeout(Instant::far_future())
+ }
+
/// Returns the instant at which the future will complete.
pub fn deadline(&self) -> Instant {
self.deadline
@@ -185,7 +198,7 @@ impl Sleep {
/// completed.
///
/// To call this method, you will usually combine the call with
- /// [`Pin::as_mut`], which lets you call the method with consuming the
+ /// [`Pin::as_mut`], which lets you call the method without consuming the
/// `Sleep` itself.
///
/// # Example
diff --git a/src/time/driver/tests/mod.rs b/src/time/driver/tests/mod.rs
index 8ae4a84..7c5cf1f 100644
--- a/src/time/driver/tests/mod.rs
+++ b/src/time/driver/tests/mod.rs
@@ -3,7 +3,7 @@ use std::{task::Context, time::Duration};
#[cfg(not(loom))]
use futures::task::noop_waker_ref;
-use crate::loom::sync::{Arc, Mutex};
+use crate::loom::sync::Arc;
use crate::loom::thread;
use crate::{
loom::sync::atomic::{AtomicBool, Ordering},
@@ -45,7 +45,7 @@ fn single_timer() {
let time_source = super::ClockTime::new(clock.clone());
let inner = super::Inner::new(time_source.clone(), MockUnpark::mock());
- let handle = Handle::new(Arc::new(Mutex::new(inner)));
+ let handle = Handle::new(Arc::new(inner));
let handle_ = handle.clone();
let jh = thread::spawn(move || {
@@ -76,7 +76,7 @@ fn drop_timer() {
let time_source = super::ClockTime::new(clock.clone());
let inner = super::Inner::new(time_source.clone(), MockUnpark::mock());
- let handle = Handle::new(Arc::new(Mutex::new(inner)));
+ let handle = Handle::new(Arc::new(inner));
let handle_ = handle.clone();
let jh = thread::spawn(move || {
@@ -107,7 +107,7 @@ fn change_waker() {
let time_source = super::ClockTime::new(clock.clone());
let inner = super::Inner::new(time_source.clone(), MockUnpark::mock());
- let handle = Handle::new(Arc::new(Mutex::new(inner)));
+ let handle = Handle::new(Arc::new(inner));
let handle_ = handle.clone();
let jh = thread::spawn(move || {
@@ -142,7 +142,7 @@ fn reset_future() {
let time_source = super::ClockTime::new(clock.clone());
let inner = super::Inner::new(time_source.clone(), MockUnpark::mock());
- let handle = Handle::new(Arc::new(Mutex::new(inner)));
+ let handle = Handle::new(Arc::new(inner));
let handle_ = handle.clone();
let finished_early_ = finished_early.clone();
@@ -191,7 +191,7 @@ fn poll_process_levels() {
let time_source = super::ClockTime::new(clock.clone());
let inner = super::Inner::new(time_source, MockUnpark::mock());
- let handle = Handle::new(Arc::new(Mutex::new(inner)));
+ let handle = Handle::new(Arc::new(inner));
let mut entries = vec![];
@@ -232,7 +232,7 @@ fn poll_process_levels_targeted() {
let time_source = super::ClockTime::new(clock.clone());
let inner = super::Inner::new(time_source, MockUnpark::mock());
- let handle = Handle::new(Arc::new(Mutex::new(inner)));
+ let handle = Handle::new(Arc::new(inner));
let e1 = TimerEntry::new(&handle, clock.now() + Duration::from_millis(193));
pin!(e1);
diff --git a/src/time/instant.rs b/src/time/instant.rs
index f4d6eac..1f8e663 100644
--- a/src/time/instant.rs
+++ b/src/time/instant.rs
@@ -54,6 +54,14 @@ impl Instant {
Instant { std }
}
+ pub(crate) fn far_future() -> Instant {
+ // Roughly 30 years from now.
+ // API does not provide a way to obtain max `Instant`
+ // or convert specific date in the future to instant.
+ // 1000 years overflows on macOS, 100 years overflows on FreeBSD.
+ Self::now() + Duration::from_secs(86400 * 365 * 30)
+ }
+
/// Convert the value into a `std::time::Instant`.
pub fn into_std(self) -> std::time::Instant {
self.std
diff --git a/src/time/mod.rs b/src/time/mod.rs
index 8aaf9c1..98bb2af 100644
--- a/src/time/mod.rs
+++ b/src/time/mod.rs
@@ -24,10 +24,8 @@
//! Wait 100ms and print "100 ms have elapsed"
//!
//! ```
-//! use tokio::time::sleep;
-//!
//! use std::time::Duration;
-//!
+//! use tokio::time::sleep;
//!
//! #[tokio::main]
//! async fn main() {
@@ -36,7 +34,7 @@
//! }
//! ```
//!
-//! Require that an operation takes no more than 300ms.
+//! Require that an operation takes no more than 1s.
//!
//! ```
//! use tokio::time::{timeout, Duration};
@@ -56,10 +54,10 @@
//!
//! A simple example using [`interval`] to execute a task every two seconds.
//!
-//! The difference between [`interval`] and [`sleep`] is that an
-//! [`interval`] measures the time since the last tick, which means that
-//! `.tick().await` may wait for a shorter time than the duration specified
-//! for the interval if some time has passed between calls to `.tick().await`.
+//! The difference between [`interval`] and [`sleep`] is that an [`interval`]
+//! measures the time since the last tick, which means that `.tick().await`
+//! may wait for a shorter time than the duration specified for the interval
+//! if some time has passed between calls to `.tick().await`.
//!
//! If the tick in the example below was replaced with [`sleep`], the task
//! would only be executed once every three seconds, and not every two
@@ -75,11 +73,9 @@
//!
//! #[tokio::main]
//! async fn main() {
-//! let interval = time::interval(time::Duration::from_secs(2));
-//! tokio::pin!(interval);
-//!
+//! let mut interval = time::interval(time::Duration::from_secs(2));
//! for _i in 0..5 {
-//! interval.as_mut().tick().await;
+//! interval.tick().await;
//! task_that_takes_a_second().await;
//! }
//! }
diff --git a/src/time/timeout.rs b/src/time/timeout.rs
index 9d15a72..61964ad 100644
--- a/src/time/timeout.rs
+++ b/src/time/timeout.rs
@@ -49,7 +49,11 @@ pub fn timeout<T>(duration: Duration, future: T) -> Timeout<T>
where
T: Future,
{
- let delay = Sleep::new_timeout(Instant::now() + duration);
+ let deadline = Instant::now().checked_add(duration);
+ let delay = match deadline {
+ Some(deadline) => Sleep::new_timeout(deadline),
+ None => Sleep::far_future(),
+ };
Timeout::new_with_delay(future, delay)
}
diff --git a/src/util/error.rs b/src/util/error.rs
index 518cb2c..0e52364 100644
--- a/src/util/error.rs
+++ b/src/util/error.rs
@@ -1,3 +1,9 @@
/// Error string explaining that the Tokio context hasn't been instantiated.
pub(crate) const CONTEXT_MISSING_ERROR: &str =
"there is no reactor running, must be called from the context of a Tokio 1.x runtime";
+
+// some combinations of features might not use this
+#[allow(dead_code)]
+/// Error string explaining that the Tokio context is shutting down and cannot drive timers.
+pub(crate) const RUNTIME_SHUTTING_DOWN_ERROR: &str =
+ "A Tokio 1.x context was found, but it is being shutdown.";
diff --git a/src/util/linked_list.rs b/src/util/linked_list.rs
index 4681276..dd00e14 100644
--- a/src/util/linked_list.rs
+++ b/src/util/linked_list.rs
@@ -6,10 +6,11 @@
//! structure's APIs are `unsafe` as they require the caller to ensure the
//! specified node is actually contained by the list.
+use core::cell::UnsafeCell;
use core::fmt;
-use core::marker::PhantomData;
+use core::marker::{PhantomData, PhantomPinned};
use core::mem::ManuallyDrop;
-use core::ptr::NonNull;
+use core::ptr::{self, NonNull};
/// An intrusive linked list.
///
@@ -60,11 +61,40 @@ pub(crate) unsafe trait Link {
/// Previous / next pointers
pub(crate) struct Pointers<T> {
+ inner: UnsafeCell<PointersInner<T>>,
+}
+/// We do not want the compiler to put the `noalias` attribute on mutable
+/// references to this type, so the type has been made `!Unpin` with a
+/// `PhantomPinned` field.
+///
+/// Additionally, we never access the `prev` or `next` fields directly, as any
+/// such access would implicitly involve the creation of a reference to the
+/// field, which we want to avoid since the fields are not `!Unpin`, and would
+/// hence be given the `noalias` attribute if we were to do such an access.
+/// As an alternative to accessing the fields directly, the `Pointers` type
+/// provides getters and setters for the two fields, and those are implemented
+/// using raw pointer casts and offsets, which is valid since the struct is
+/// #[repr(C)].
+///
+/// See this link for more information:
+/// https://github.com/rust-lang/rust/pull/82834
+#[repr(C)]
+struct PointersInner<T> {
/// The previous node in the list. null if there is no previous node.
+ ///
+ /// This field is accessed through pointer manipulation, so it is not dead code.
+ #[allow(dead_code)]
prev: Option<NonNull<T>>,
/// The next node in the list. null if there is no previous node.
+ ///
+ /// This field is accessed through pointer manipulation, so it is not dead code.
+ #[allow(dead_code)]
next: Option<NonNull<T>>,
+
+ /// This type is !Unpin due to the heuristic from:
+ /// https://github.com/rust-lang/rust/pull/82834
+ _pin: PhantomPinned,
}
unsafe impl<T: Send> Send for Pointers<T> {}
@@ -91,11 +121,11 @@ impl<L: Link> LinkedList<L, L::Target> {
let ptr = L::as_raw(&*val);
assert_ne!(self.head, Some(ptr));
unsafe {
- L::pointers(ptr).as_mut().next = self.head;
- L::pointers(ptr).as_mut().prev = None;
+ L::pointers(ptr).as_mut().set_next(self.head);
+ L::pointers(ptr).as_mut().set_prev(None);
if let Some(head) = self.head {
- L::pointers(head).as_mut().prev = Some(ptr);
+ L::pointers(head).as_mut().set_prev(Some(ptr));
}
self.head = Some(ptr);
@@ -111,22 +141,22 @@ impl<L: Link> LinkedList<L, L::Target> {
pub(crate) fn pop_back(&mut self) -> Option<L::Handle> {
unsafe {
let last = self.tail?;
- self.tail = L::pointers(last).as_ref().prev;
+ self.tail = L::pointers(last).as_ref().get_prev();
- if let Some(prev) = L::pointers(last).as_ref().prev {
- L::pointers(prev).as_mut().next = None;
+ if let Some(prev) = L::pointers(last).as_ref().get_prev() {
+ L::pointers(prev).as_mut().set_next(None);
} else {
self.head = None
}
- L::pointers(last).as_mut().prev = None;
- L::pointers(last).as_mut().next = None;
+ L::pointers(last).as_mut().set_prev(None);
+ L::pointers(last).as_mut().set_next(None);
Some(L::from_raw(last))
}
}
- /// Returns whether the linked list doesn not contain any node
+ /// Returns whether the linked list does not contain any node
pub(crate) fn is_empty(&self) -> bool {
if self.head.is_some() {
return false;
@@ -143,31 +173,35 @@ impl<L: Link> LinkedList<L, L::Target> {
/// The caller **must** ensure that `node` is currently contained by
/// `self` or not contained by any other list.
pub(crate) unsafe fn remove(&mut self, node: NonNull<L::Target>) -> Option<L::Handle> {
- if let Some(prev) = L::pointers(node).as_ref().prev {
- debug_assert_eq!(L::pointers(prev).as_ref().next, Some(node));
- L::pointers(prev).as_mut().next = L::pointers(node).as_ref().next;
+ if let Some(prev) = L::pointers(node).as_ref().get_prev() {
+ debug_assert_eq!(L::pointers(prev).as_ref().get_next(), Some(node));
+ L::pointers(prev)
+ .as_mut()
+ .set_next(L::pointers(node).as_ref().get_next());
} else {
if self.head != Some(node) {
return None;
}
- self.head = L::pointers(node).as_ref().next;
+ self.head = L::pointers(node).as_ref().get_next();
}
- if let Some(next) = L::pointers(node).as_ref().next {
- debug_assert_eq!(L::pointers(next).as_ref().prev, Some(node));
- L::pointers(next).as_mut().prev = L::pointers(node).as_ref().prev;
+ if let Some(next) = L::pointers(node).as_ref().get_next() {
+ debug_assert_eq!(L::pointers(next).as_ref().get_prev(), Some(node));
+ L::pointers(next)
+ .as_mut()
+ .set_prev(L::pointers(node).as_ref().get_prev());
} else {
// This might be the last item in the list
if self.tail != Some(node) {
return None;
}
- self.tail = L::pointers(node).as_ref().prev;
+ self.tail = L::pointers(node).as_ref().get_prev();
}
- L::pointers(node).as_mut().next = None;
- L::pointers(node).as_mut().prev = None;
+ L::pointers(node).as_mut().set_next(None);
+ L::pointers(node).as_mut().set_prev(None);
Some(L::from_raw(node))
}
@@ -224,7 +258,7 @@ cfg_rt_multi_thread! {
fn next(&mut self) -> Option<&'a T::Target> {
let curr = self.curr?;
// safety: the pointer references data contained by the list
- self.curr = unsafe { T::pointers(curr).as_ref() }.next;
+ self.curr = unsafe { T::pointers(curr).as_ref() }.get_next();
// safety: the value is still owned by the linked list.
Some(unsafe { &*curr.as_ptr() })
@@ -265,7 +299,7 @@ cfg_io_readiness! {
fn next(&mut self) -> Option<Self::Item> {
while let Some(curr) = self.curr {
// safety: the pointer references data contained by the list
- self.curr = unsafe { T::pointers(curr).as_ref() }.next;
+ self.curr = unsafe { T::pointers(curr).as_ref() }.get_next();
// safety: the value is still owned by the linked list.
if (self.filter)(unsafe { &mut *curr.as_ptr() }) {
@@ -284,17 +318,58 @@ impl<T> Pointers<T> {
/// Create a new set of empty pointers
pub(crate) fn new() -> Pointers<T> {
Pointers {
- prev: None,
- next: None,
+ inner: UnsafeCell::new(PointersInner {
+ prev: None,
+ next: None,
+ _pin: PhantomPinned,
+ }),
+ }
+ }
+
+ fn get_prev(&self) -> Option<NonNull<T>> {
+ // SAFETY: prev is the first field in PointersInner, which is #[repr(C)].
+ unsafe {
+ let inner = self.inner.get();
+ let prev = inner as *const Option<NonNull<T>>;
+ ptr::read(prev)
+ }
+ }
+ fn get_next(&self) -> Option<NonNull<T>> {
+ // SAFETY: next is the second field in PointersInner, which is #[repr(C)].
+ unsafe {
+ let inner = self.inner.get();
+ let prev = inner as *const Option<NonNull<T>>;
+ let next = prev.add(1);
+ ptr::read(next)
+ }
+ }
+
+ fn set_prev(&mut self, value: Option<NonNull<T>>) {
+ // SAFETY: prev is the first field in PointersInner, which is #[repr(C)].
+ unsafe {
+ let inner = self.inner.get();
+ let prev = inner as *mut Option<NonNull<T>>;
+ ptr::write(prev, value);
+ }
+ }
+ fn set_next(&mut self, value: Option<NonNull<T>>) {
+ // SAFETY: next is the second field in PointersInner, which is #[repr(C)].
+ unsafe {
+ let inner = self.inner.get();
+ let prev = inner as *mut Option<NonNull<T>>;
+ let next = prev.add(1);
+ ptr::write(next, value);
}
}
}
impl<T> fmt::Debug for Pointers<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let prev = self.get_prev();
+ let next = self.get_next();
f.debug_struct("Pointers")
- .field("prev", &self.prev)
- .field("next", &self.next)
+ .field("prev", &prev)
+ .field("next", &next)
.finish()
}
}
@@ -321,7 +396,7 @@ mod tests {
}
unsafe fn from_raw(ptr: NonNull<Entry>) -> Pin<&'a Entry> {
- Pin::new(&*ptr.as_ptr())
+ Pin::new_unchecked(&*ptr.as_ptr())
}
unsafe fn pointers(mut target: NonNull<Entry>) -> NonNull<Pointers<Entry>> {
@@ -361,8 +436,8 @@ mod tests {
macro_rules! assert_clean {
($e:ident) => {{
- assert!($e.pointers.next.is_none());
- assert!($e.pointers.prev.is_none());
+ assert!($e.pointers.get_next().is_none());
+ assert!($e.pointers.get_prev().is_none());
}};
}
@@ -460,8 +535,8 @@ mod tests {
assert_clean!(a);
assert_ptr_eq!(b, list.head);
- assert_ptr_eq!(c, b.pointers.next);
- assert_ptr_eq!(b, c.pointers.prev);
+ assert_ptr_eq!(c, b.pointers.get_next());
+ assert_ptr_eq!(b, c.pointers.get_prev());
let items = collect_list(&mut list);
assert_eq!([31, 7].to_vec(), items);
@@ -476,8 +551,8 @@ mod tests {
assert!(list.remove(ptr(&b)).is_some());
assert_clean!(b);
- assert_ptr_eq!(c, a.pointers.next);
- assert_ptr_eq!(a, c.pointers.prev);
+ assert_ptr_eq!(c, a.pointers.get_next());
+ assert_ptr_eq!(a, c.pointers.get_prev());
let items = collect_list(&mut list);
assert_eq!([31, 5].to_vec(), items);
@@ -493,7 +568,7 @@ mod tests {
assert!(list.remove(ptr(&c)).is_some());
assert_clean!(c);
- assert!(b.pointers.next.is_none());
+ assert!(b.pointers.get_next().is_none());
assert_ptr_eq!(b, list.tail);
let items = collect_list(&mut list);
@@ -516,8 +591,8 @@ mod tests {
assert_ptr_eq!(b, list.head);
assert_ptr_eq!(b, list.tail);
- assert!(b.pointers.next.is_none());
- assert!(b.pointers.prev.is_none());
+ assert!(b.pointers.get_next().is_none());
+ assert!(b.pointers.get_prev().is_none());
let items = collect_list(&mut list);
assert_eq!([7].to_vec(), items);
@@ -536,8 +611,8 @@ mod tests {
assert_ptr_eq!(a, list.head);
assert_ptr_eq!(a, list.tail);
- assert!(a.pointers.next.is_none());
- assert!(a.pointers.prev.is_none());
+ assert!(a.pointers.get_next().is_none());
+ assert!(a.pointers.get_prev().is_none());
let items = collect_list(&mut list);
assert_eq!([5].to_vec(), items);
diff --git a/src/util/wake.rs b/src/util/wake.rs
index e49f1e8..001577d 100644
--- a/src/util/wake.rs
+++ b/src/util/wake.rs
@@ -4,7 +4,7 @@ use std::ops::Deref;
use std::sync::Arc;
use std::task::{RawWaker, RawWakerVTable, Waker};
-/// Simplfied waking interface based on Arcs
+/// Simplified waking interface based on Arcs
pub(crate) trait Wake: Send + Sync {
/// Wake by value
fn wake(self: Arc<Self>);
diff --git a/tests/async_send_sync.rs b/tests/async_send_sync.rs
index 671fa4a..211c572 100644
--- a/tests/async_send_sync.rs
+++ b/tests/async_send_sync.rs
@@ -1,9 +1,12 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
+#![allow(clippy::type_complexity)]
use std::cell::Cell;
+use std::future::Future;
use std::io::{Cursor, SeekFrom};
use std::net::SocketAddr;
+use std::pin::Pin;
use std::rc::Rc;
use tokio::net::TcpStream;
use tokio::time::{Duration, Instant};
@@ -265,6 +268,28 @@ async_assert_fn!(tokio::sync::watch::Sender<u8>::closed(_): Send & Sync);
async_assert_fn!(tokio::sync::watch::Sender<Cell<u8>>::closed(_): !Send & !Sync);
async_assert_fn!(tokio::sync::watch::Sender<Rc<u8>>::closed(_): !Send & !Sync);
+async_assert_fn!(tokio::sync::OnceCell<u8>::get_or_init(
+ _, fn() -> Pin<Box<dyn Future<Output = u8> + Send + Sync>>): Send & Sync);
+async_assert_fn!(tokio::sync::OnceCell<u8>::get_or_init(
+ _, fn() -> Pin<Box<dyn Future<Output = u8> + Send>>): Send & !Sync);
+async_assert_fn!(tokio::sync::OnceCell<u8>::get_or_init(
+ _, fn() -> Pin<Box<dyn Future<Output = u8>>>): !Send & !Sync);
+async_assert_fn!(tokio::sync::OnceCell<Cell<u8>>::get_or_init(
+ _, fn() -> Pin<Box<dyn Future<Output = Cell<u8>> + Send + Sync>>): !Send & !Sync);
+async_assert_fn!(tokio::sync::OnceCell<Cell<u8>>::get_or_init(
+ _, fn() -> Pin<Box<dyn Future<Output = Cell<u8>> + Send>>): !Send & !Sync);
+async_assert_fn!(tokio::sync::OnceCell<Cell<u8>>::get_or_init(
+ _, fn() -> Pin<Box<dyn Future<Output = Cell<u8>>>>): !Send & !Sync);
+async_assert_fn!(tokio::sync::OnceCell<Rc<u8>>::get_or_init(
+ _, fn() -> Pin<Box<dyn Future<Output = Rc<u8>> + Send + Sync>>): !Send & !Sync);
+async_assert_fn!(tokio::sync::OnceCell<Rc<u8>>::get_or_init(
+ _, fn() -> Pin<Box<dyn Future<Output = Rc<u8>> + Send>>): !Send & !Sync);
+async_assert_fn!(tokio::sync::OnceCell<Rc<u8>>::get_or_init(
+ _, fn() -> Pin<Box<dyn Future<Output = Rc<u8>>>>): !Send & !Sync);
+assert_value!(tokio::sync::OnceCell<u8>: Send & Sync);
+assert_value!(tokio::sync::OnceCell<Cell<u8>>: Send & !Sync);
+assert_value!(tokio::sync::OnceCell<Rc<u8>>: !Send & !Sync);
+
async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSync<()>): Send & Sync);
async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFutureSend<()>): Send & !Sync);
async_assert_fn!(tokio::task::LocalKey<u32>::scope(_, u32, BoxFuture<()>): !Send & !Sync);
diff --git a/tests/io_copy_bidirectional.rs b/tests/io_copy_bidirectional.rs
new file mode 100644
index 0000000..17c0597
--- /dev/null
+++ b/tests/io_copy_bidirectional.rs
@@ -0,0 +1,128 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::time::Duration;
+use tokio::io::{self, copy_bidirectional, AsyncReadExt, AsyncWriteExt};
+use tokio::net::TcpStream;
+use tokio::task::JoinHandle;
+
+async fn make_socketpair() -> (TcpStream, TcpStream) {
+ let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
+ let addr = listener.local_addr().unwrap();
+ let connector = TcpStream::connect(addr);
+ let acceptor = listener.accept();
+
+ let (c1, c2) = tokio::join!(connector, acceptor);
+
+ (c1.unwrap(), c2.unwrap().0)
+}
+
+async fn block_write(s: &mut TcpStream) -> usize {
+ static BUF: [u8; 2048] = [0; 2048];
+
+ let mut copied = 0;
+ loop {
+ tokio::select! {
+ result = s.write(&BUF) => {
+ copied += result.expect("write error")
+ },
+ _ = tokio::time::sleep(Duration::from_millis(100)) => {
+ break;
+ }
+ }
+ }
+
+ copied
+}
+
+async fn symmetric<F, Fut>(mut cb: F)
+where
+ F: FnMut(JoinHandle<io::Result<(u64, u64)>>, TcpStream, TcpStream) -> Fut,
+ Fut: std::future::Future<Output = ()>,
+{
+ // We run the test twice, with streams passed to copy_bidirectional in
+ // different orders, in order to ensure that the two arguments are
+ // interchangable.
+
+ let (a, mut a1) = make_socketpair().await;
+ let (b, mut b1) = make_socketpair().await;
+
+ let handle = tokio::spawn(async move { copy_bidirectional(&mut a1, &mut b1).await });
+ cb(handle, a, b).await;
+
+ let (a, mut a1) = make_socketpair().await;
+ let (b, mut b1) = make_socketpair().await;
+
+ let handle = tokio::spawn(async move { copy_bidirectional(&mut b1, &mut a1).await });
+
+ cb(handle, b, a).await;
+}
+
+#[tokio::test]
+async fn test_basic_transfer() {
+ symmetric(|_handle, mut a, mut b| async move {
+ a.write_all(b"test").await.unwrap();
+ let mut tmp = [0; 4];
+ b.read_exact(&mut tmp).await.unwrap();
+ assert_eq!(&tmp[..], b"test");
+ })
+ .await
+}
+
+#[tokio::test]
+async fn test_transfer_after_close() {
+ symmetric(|handle, mut a, mut b| async move {
+ AsyncWriteExt::shutdown(&mut a).await.unwrap();
+ b.read_to_end(&mut Vec::new()).await.unwrap();
+
+ b.write_all(b"quux").await.unwrap();
+ let mut tmp = [0; 4];
+ a.read_exact(&mut tmp).await.unwrap();
+ assert_eq!(&tmp[..], b"quux");
+
+ // Once both are closed, we should have our handle back
+ drop(b);
+
+ assert_eq!(handle.await.unwrap().unwrap(), (0, 4));
+ })
+ .await
+}
+
+#[tokio::test]
+async fn blocking_one_side_does_not_block_other() {
+ symmetric(|handle, mut a, mut b| async move {
+ block_write(&mut a).await;
+
+ b.write_all(b"quux").await.unwrap();
+ let mut tmp = [0; 4];
+ a.read_exact(&mut tmp).await.unwrap();
+ assert_eq!(&tmp[..], b"quux");
+
+ AsyncWriteExt::shutdown(&mut a).await.unwrap();
+
+ let mut buf = Vec::new();
+ b.read_to_end(&mut buf).await.unwrap();
+
+ drop(b);
+
+ assert_eq!(handle.await.unwrap().unwrap(), (buf.len() as u64, 4));
+ })
+ .await
+}
+
+#[tokio::test]
+async fn immediate_exit_on_error() {
+ symmetric(|handle, mut a, mut b| async move {
+ block_write(&mut a).await;
+
+ // Fill up the b->copy->a path. We expect that this will _not_ drain
+ // before we exit the copy task.
+ let _bytes_written = block_write(&mut b).await;
+
+ // Drop b. We should not wait for a to consume the data buffered in the
+ // copy loop, since b will be failing writes.
+ drop(b);
+ assert!(handle.await.unwrap().is_err());
+ })
+ .await
+}
diff --git a/tests/macros_select.rs b/tests/macros_select.rs
index 3359849..ea06d51 100644
--- a/tests/macros_select.rs
+++ b/tests/macros_select.rs
@@ -481,3 +481,62 @@ async fn mut_on_left_hand_side() {
.await;
assert_eq!(v, 2);
}
+
+#[tokio::test]
+async fn biased_one_not_ready() {
+ let (_tx1, rx1) = oneshot::channel::<i32>();
+ let (tx2, rx2) = oneshot::channel::<i32>();
+ let (tx3, rx3) = oneshot::channel::<i32>();
+
+ tx2.send(2).unwrap();
+ tx3.send(3).unwrap();
+
+ let v = tokio::select! {
+ biased;
+
+ _ = rx1 => unreachable!(),
+ res = rx2 => {
+ assert_ok!(res)
+ },
+ _ = rx3 => {
+ panic!("This branch should never be activated because `rx2` should be polled before `rx3` due to `biased;`.")
+ }
+ };
+
+ assert_eq!(2, v);
+}
+
+#[tokio::test]
+async fn biased_eventually_ready() {
+ use tokio::task::yield_now;
+
+ let one = async {};
+ let two = async { yield_now().await };
+ let three = async { yield_now().await };
+
+ let mut count = 0u8;
+
+ tokio::pin!(one, two, three);
+
+ loop {
+ tokio::select! {
+ biased;
+
+ _ = &mut two, if count < 2 => {
+ count += 1;
+ assert_eq!(count, 2);
+ }
+ _ = &mut three, if count < 3 => {
+ count += 1;
+ assert_eq!(count, 3);
+ }
+ _ = &mut one, if count < 1 => {
+ count += 1;
+ assert_eq!(count, 1);
+ }
+ else => break,
+ }
+ }
+
+ assert_eq!(count, 3);
+}
diff --git a/tests/rt_common.rs b/tests/rt_common.rs
index 9aef4b9..cb1d0f6 100644
--- a/tests/rt_common.rs
+++ b/tests/rt_common.rs
@@ -1017,6 +1017,32 @@ rt_test! {
});
}
+ #[test]
+ fn coop_unconstrained() {
+ use std::task::Poll::Ready;
+
+ let rt = rt();
+
+ rt.block_on(async {
+ // Create a bunch of tasks
+ let mut tasks = (0..1_000).map(|_| {
+ tokio::spawn(async { })
+ }).collect::<Vec<_>>();
+
+ // Hope that all the tasks complete...
+ time::sleep(Duration::from_millis(100)).await;
+
+ tokio::task::unconstrained(poll_fn(|cx| {
+ // All the tasks should be ready
+ for task in &mut tasks {
+ assert!(Pin::new(task).poll(cx).is_ready());
+ }
+
+ Ready(())
+ })).await;
+ });
+ }
+
// Tests that the "next task" scheduler optimization is not able to starve
// other tasks.
#[test]
diff --git a/tests/rt_handle_block_on.rs b/tests/rt_handle_block_on.rs
new file mode 100644
index 0000000..5234258
--- /dev/null
+++ b/tests/rt_handle_block_on.rs
@@ -0,0 +1,511 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+// All io tests that deal with shutdown is currently ignored because there are known bugs in with
+// shutting down the io driver while concurrently registering new resources. See
+// https://github.com/tokio-rs/tokio/pull/3569#pullrequestreview-612703467 fo more details.
+//
+// When this has been fixed we want to re-enable these tests.
+
+use std::time::Duration;
+use tokio::runtime::{Handle, Runtime};
+use tokio::sync::mpsc;
+use tokio::task::spawn_blocking;
+use tokio::{fs, net, time};
+
+macro_rules! multi_threaded_rt_test {
+ ($($t:tt)*) => {
+ mod threaded_scheduler_4_threads_only {
+ use super::*;
+
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(4)
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+
+ mod threaded_scheduler_1_thread_only {
+ use super::*;
+
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(1)
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+ }
+}
+
+macro_rules! rt_test {
+ ($($t:tt)*) => {
+ mod current_thread_scheduler {
+ use super::*;
+
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+
+ mod threaded_scheduler_4_threads {
+ use super::*;
+
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(4)
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+
+ mod threaded_scheduler_1_thread {
+ use super::*;
+
+ $($t)*
+
+ fn rt() -> Runtime {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(1)
+ .enable_all()
+ .build()
+ .unwrap()
+ }
+ }
+ }
+}
+
+// ==== runtime independent futures ======
+
+#[test]
+fn basic() {
+ test_with_runtimes(|| {
+ let one = Handle::current().block_on(async { 1 });
+ assert_eq!(1, one);
+ });
+}
+
+#[test]
+fn bounded_mpsc_channel() {
+ test_with_runtimes(|| {
+ let (tx, mut rx) = mpsc::channel(1024);
+
+ Handle::current().block_on(tx.send(42)).unwrap();
+
+ let value = Handle::current().block_on(rx.recv()).unwrap();
+ assert_eq!(value, 42);
+ });
+}
+
+#[test]
+fn unbounded_mpsc_channel() {
+ test_with_runtimes(|| {
+ let (tx, mut rx) = mpsc::unbounded_channel();
+
+ let _ = tx.send(42);
+
+ let value = Handle::current().block_on(rx.recv()).unwrap();
+ assert_eq!(value, 42);
+ })
+}
+
+rt_test! {
+ // ==== spawn blocking futures ======
+
+ #[test]
+ fn basic_fs() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let contents = Handle::current()
+ .block_on(fs::read_to_string("Cargo.toml"))
+ .unwrap();
+ assert!(contents.contains("Cargo.toml"));
+ }
+
+ #[test]
+ fn fs_shutdown_before_started() {
+ let rt = rt();
+ let _enter = rt.enter();
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let err: std::io::Error = Handle::current()
+ .block_on(fs::read_to_string("Cargo.toml"))
+ .unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+
+ let inner_err = err.get_ref().expect("no inner error");
+ assert_eq!(inner_err.to_string(), "background task failed");
+ }
+
+ #[test]
+ fn basic_spawn_blocking() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let answer = Handle::current()
+ .block_on(spawn_blocking(|| {
+ std::thread::sleep(Duration::from_millis(100));
+ 42
+ }))
+ .unwrap();
+
+ assert_eq!(answer, 42);
+ }
+
+ #[test]
+ fn spawn_blocking_after_shutdown_fails() {
+ let rt = rt();
+ let _enter = rt.enter();
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let join_err = Handle::current()
+ .block_on(spawn_blocking(|| {
+ std::thread::sleep(Duration::from_millis(100));
+ 42
+ }))
+ .unwrap_err();
+
+ assert!(join_err.is_cancelled());
+ }
+
+ #[test]
+ fn spawn_blocking_started_before_shutdown_continues() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let handle = spawn_blocking(|| {
+ std::thread::sleep(Duration::from_secs(1));
+ 42
+ });
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let answer = Handle::current().block_on(handle).unwrap();
+
+ assert_eq!(answer, 42);
+ }
+
+ // ==== net ======
+
+ #[test]
+ fn tcp_listener_bind() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ Handle::current()
+ .block_on(net::TcpListener::bind("127.0.0.1:0"))
+ .unwrap();
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[test]
+ fn tcp_listener_connect_after_shutdown() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let err = Handle::current()
+ .block_on(net::TcpListener::bind("127.0.0.1:0"))
+ .unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(
+ err.get_ref().unwrap().to_string(),
+ "A Tokio 1.x context was found, but it is being shutdown.",
+ );
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[test]
+ fn tcp_listener_connect_before_shutdown() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let bind_future = net::TcpListener::bind("127.0.0.1:0");
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let err = Handle::current().block_on(bind_future).unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(
+ err.get_ref().unwrap().to_string(),
+ "A Tokio 1.x context was found, but it is being shutdown.",
+ );
+ }
+
+ #[test]
+ fn udp_socket_bind() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ Handle::current()
+ .block_on(net::UdpSocket::bind("127.0.0.1:0"))
+ .unwrap();
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[test]
+ fn udp_stream_bind_after_shutdown() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let err = Handle::current()
+ .block_on(net::UdpSocket::bind("127.0.0.1:0"))
+ .unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(
+ err.get_ref().unwrap().to_string(),
+ "A Tokio 1.x context was found, but it is being shutdown.",
+ );
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[test]
+ fn udp_stream_bind_before_shutdown() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let bind_future = net::UdpSocket::bind("127.0.0.1:0");
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let err = Handle::current().block_on(bind_future).unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(
+ err.get_ref().unwrap().to_string(),
+ "A Tokio 1.x context was found, but it is being shutdown.",
+ );
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[cfg(unix)]
+ #[test]
+ fn unix_listener_bind_after_shutdown() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let dir = tempfile::tempdir().unwrap();
+ let path = dir.path().join("socket");
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ let err = net::UnixListener::bind(path).unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(
+ err.get_ref().unwrap().to_string(),
+ "A Tokio 1.x context was found, but it is being shutdown.",
+ );
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[cfg(unix)]
+ #[test]
+ fn unix_listener_shutdown_after_bind() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let dir = tempfile::tempdir().unwrap();
+ let path = dir.path().join("socket");
+
+ let listener = net::UnixListener::bind(path).unwrap();
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ // this should not timeout but fail immediately since the runtime has been shutdown
+ let err = Handle::current().block_on(listener.accept()).unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(err.get_ref().unwrap().to_string(), "reactor gone");
+ }
+
+ // All io tests are ignored for now. See above why that is.
+ #[ignore]
+ #[cfg(unix)]
+ #[test]
+ fn unix_listener_shutdown_after_accept() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let dir = tempfile::tempdir().unwrap();
+ let path = dir.path().join("socket");
+
+ let listener = net::UnixListener::bind(path).unwrap();
+
+ let accept_future = listener.accept();
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ // this should not timeout but fail immediately since the runtime has been shutdown
+ let err = Handle::current().block_on(accept_future).unwrap_err();
+
+ assert_eq!(err.kind(), std::io::ErrorKind::Other);
+ assert_eq!(err.get_ref().unwrap().to_string(), "reactor gone");
+ }
+
+ // ==== nesting ======
+
+ #[test]
+ #[should_panic(
+ expected = "Cannot start a runtime from within a runtime. This happens because a function (like `block_on`) attempted to block the current thread while the thread is being used to drive asynchronous tasks."
+ )]
+ fn nesting() {
+ fn some_non_async_function() -> i32 {
+ Handle::current().block_on(time::sleep(Duration::from_millis(10)));
+ 1
+ }
+
+ let rt = rt();
+
+ rt.block_on(async { some_non_async_function() });
+ }
+}
+
+multi_threaded_rt_test! {
+ #[cfg(unix)]
+ #[test]
+ fn unix_listener_bind() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let dir = tempfile::tempdir().unwrap();
+ let path = dir.path().join("socket");
+
+ let listener = net::UnixListener::bind(path).unwrap();
+
+ // this should timeout and not fail immediately since the runtime has not been shutdown
+ let _: tokio::time::error::Elapsed = Handle::current()
+ .block_on(tokio::time::timeout(
+ Duration::from_millis(10),
+ listener.accept(),
+ ))
+ .unwrap_err();
+ }
+
+ // ==== timers ======
+
+ // `Handle::block_on` doesn't work with timer futures on a current thread runtime as there is no
+ // one to drive the timers so they will just hang forever. Therefore they are not tested.
+
+ #[test]
+ fn sleep() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ Handle::current().block_on(time::sleep(Duration::from_millis(100)));
+ }
+
+ #[test]
+ #[should_panic(expected = "A Tokio 1.x context was found, but it is being shutdown.")]
+ fn sleep_before_shutdown_panics() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ let f = time::sleep(Duration::from_millis(100));
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ Handle::current().block_on(f);
+ }
+
+ #[test]
+ #[should_panic(expected = "A Tokio 1.x context was found, but it is being shutdown.")]
+ fn sleep_after_shutdown_panics() {
+ let rt = rt();
+ let _enter = rt.enter();
+
+ rt.shutdown_timeout(Duration::from_secs(1000));
+
+ Handle::current().block_on(time::sleep(Duration::from_millis(100)));
+ }
+}
+
+// ==== utils ======
+
+/// Create a new multi threaded runtime
+fn new_multi_thread(n: usize) -> Runtime {
+ tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(n)
+ .enable_all()
+ .build()
+ .unwrap()
+}
+
+/// Create a new single threaded runtime
+fn new_current_thread() -> Runtime {
+ tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .build()
+ .unwrap()
+}
+
+/// Utility to test things on both kinds of runtimes both before and after shutting it down.
+fn test_with_runtimes<F>(f: F)
+where
+ F: Fn(),
+{
+ {
+ println!("current thread runtime");
+
+ let rt = new_current_thread();
+ let _enter = rt.enter();
+ f();
+
+ println!("current thread runtime after shutdown");
+ rt.shutdown_timeout(Duration::from_secs(1000));
+ f();
+ }
+
+ {
+ println!("multi thread (1 thread) runtime");
+
+ let rt = new_multi_thread(1);
+ let _enter = rt.enter();
+ f();
+
+ println!("multi thread runtime after shutdown");
+ rt.shutdown_timeout(Duration::from_secs(1000));
+ f();
+ }
+
+ {
+ println!("multi thread (4 threads) runtime");
+
+ let rt = new_multi_thread(4);
+ let _enter = rt.enter();
+ f();
+
+ println!("multi thread runtime after shutdown");
+ rt.shutdown_timeout(Duration::from_secs(1000));
+ f();
+ }
+}
diff --git a/tests/sync_notify.rs b/tests/sync_notify.rs
index 8ffe020..6c6620b 100644
--- a/tests/sync_notify.rs
+++ b/tests/sync_notify.rs
@@ -134,3 +134,20 @@ fn notify_in_drop_after_wake() {
// Now, notifying **should not** deadlock
notify.notify_waiters();
}
+
+#[test]
+fn notify_one_after_dropped_all() {
+ let notify = Notify::new();
+ let mut notified1 = spawn(async { notify.notified().await });
+
+ assert_pending!(notified1.poll());
+
+ notify.notify_waiters();
+ notify.notify_one();
+
+ drop(notified1);
+
+ let mut notified2 = spawn(async { notify.notified().await });
+
+ assert_ready!(notified2.poll());
+}
diff --git a/tests/sync_once_cell.rs b/tests/sync_once_cell.rs
new file mode 100644
index 0000000..60f50d2
--- /dev/null
+++ b/tests/sync_once_cell.rs
@@ -0,0 +1,268 @@
+#![warn(rust_2018_idioms)]
+#![cfg(feature = "full")]
+
+use std::mem;
+use std::ops::Drop;
+use std::sync::atomic::{AtomicU32, Ordering};
+use std::time::Duration;
+use tokio::runtime;
+use tokio::sync::{OnceCell, SetError};
+use tokio::time;
+
+async fn func1() -> u32 {
+ 5
+}
+
+async fn func2() -> u32 {
+ time::sleep(Duration::from_millis(1)).await;
+ 10
+}
+
+async fn func_err() -> Result<u32, ()> {
+ Err(())
+}
+
+async fn func_ok() -> Result<u32, ()> {
+ Ok(10)
+}
+
+async fn func_panic() -> u32 {
+ time::sleep(Duration::from_millis(1)).await;
+ panic!();
+}
+
+async fn sleep_and_set() -> u32 {
+ // Simulate sleep by pausing time and waiting for another thread to
+ // resume clock when calling `set`, then finding the cell being initialized
+ // by this call
+ time::sleep(Duration::from_millis(2)).await;
+ 5
+}
+
+async fn advance_time_and_set(cell: &'static OnceCell<u32>, v: u32) -> Result<(), SetError<u32>> {
+ time::advance(Duration::from_millis(1)).await;
+ cell.set(v)
+}
+
+#[test]
+fn get_or_init() {
+ let rt = runtime::Builder::new_current_thread()
+ .enable_time()
+ .start_paused(true)
+ .build()
+ .unwrap();
+
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+
+ rt.block_on(async {
+ let handle1 = rt.spawn(async { ONCE.get_or_init(func1).await });
+ let handle2 = rt.spawn(async { ONCE.get_or_init(func2).await });
+
+ time::advance(Duration::from_millis(1)).await;
+ time::resume();
+
+ let result1 = handle1.await.unwrap();
+ let result2 = handle2.await.unwrap();
+
+ assert_eq!(*result1, 5);
+ assert_eq!(*result2, 5);
+ });
+}
+
+#[test]
+fn get_or_init_panic() {
+ let rt = runtime::Builder::new_current_thread()
+ .enable_time()
+ .build()
+ .unwrap();
+
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+
+ rt.block_on(async {
+ time::pause();
+
+ let handle1 = rt.spawn(async { ONCE.get_or_init(func1).await });
+ let handle2 = rt.spawn(async { ONCE.get_or_init(func_panic).await });
+
+ time::advance(Duration::from_millis(1)).await;
+
+ let result1 = handle1.await.unwrap();
+ let result2 = handle2.await.unwrap();
+
+ assert_eq!(*result1, 5);
+ assert_eq!(*result2, 5);
+ });
+}
+
+#[test]
+fn set_and_get() {
+ let rt = runtime::Builder::new_current_thread()
+ .enable_time()
+ .build()
+ .unwrap();
+
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+
+ rt.block_on(async {
+ let _ = rt.spawn(async { ONCE.set(5) }).await;
+ let value = ONCE.get().unwrap();
+ assert_eq!(*value, 5);
+ });
+}
+
+#[test]
+fn get_uninit() {
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+ let uninit = ONCE.get();
+ assert!(uninit.is_none());
+}
+
+#[test]
+fn set_twice() {
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+
+ let first = ONCE.set(5);
+ assert_eq!(first, Ok(()));
+ let second = ONCE.set(6);
+ assert!(second.err().unwrap().is_already_init_err());
+}
+
+#[test]
+fn set_while_initializing() {
+ let rt = runtime::Builder::new_current_thread()
+ .enable_time()
+ .build()
+ .unwrap();
+
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+
+ rt.block_on(async {
+ time::pause();
+
+ let handle1 = rt.spawn(async { ONCE.get_or_init(sleep_and_set).await });
+ let handle2 = rt.spawn(async { advance_time_and_set(&ONCE, 10).await });
+
+ time::advance(Duration::from_millis(2)).await;
+
+ let result1 = handle1.await.unwrap();
+ let result2 = handle2.await.unwrap();
+
+ assert_eq!(*result1, 5);
+ assert!(result2.err().unwrap().is_initializing_err());
+ });
+}
+
+#[test]
+fn get_or_try_init() {
+ let rt = runtime::Builder::new_current_thread()
+ .enable_time()
+ .start_paused(true)
+ .build()
+ .unwrap();
+
+ static ONCE: OnceCell<u32> = OnceCell::const_new();
+
+ rt.block_on(async {
+ let handle1 = rt.spawn(async { ONCE.get_or_try_init(func_err).await });
+ let handle2 = rt.spawn(async { ONCE.get_or_try_init(func_ok).await });
+
+ time::advance(Duration::from_millis(1)).await;
+ time::resume();
+
+ let result1 = handle1.await.unwrap();
+ assert!(result1.is_err());
+
+ let result2 = handle2.await.unwrap();
+ assert_eq!(*result2.unwrap(), 10);
+ });
+}
+
+#[test]
+fn drop_cell() {
+ static NUM_DROPS: AtomicU32 = AtomicU32::new(0);
+
+ struct Foo {}
+
+ let fooer = Foo {};
+
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ NUM_DROPS.fetch_add(1, Ordering::Release);
+ }
+ }
+
+ {
+ let once_cell = OnceCell::new();
+ let prev = once_cell.set(fooer);
+ assert!(prev.is_ok())
+ }
+ assert!(NUM_DROPS.load(Ordering::Acquire) == 1);
+}
+
+#[test]
+fn drop_cell_new_with() {
+ static NUM_DROPS: AtomicU32 = AtomicU32::new(0);
+
+ struct Foo {}
+
+ let fooer = Foo {};
+
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ NUM_DROPS.fetch_add(1, Ordering::Release);
+ }
+ }
+
+ {
+ let once_cell = OnceCell::new_with(Some(fooer));
+ assert!(once_cell.initialized());
+ }
+ assert!(NUM_DROPS.load(Ordering::Acquire) == 1);
+}
+
+#[test]
+fn drop_into_inner() {
+ static NUM_DROPS: AtomicU32 = AtomicU32::new(0);
+
+ struct Foo {}
+
+ let fooer = Foo {};
+
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ NUM_DROPS.fetch_add(1, Ordering::Release);
+ }
+ }
+
+ let once_cell = OnceCell::new();
+ assert!(once_cell.set(fooer).is_ok());
+ let fooer = once_cell.into_inner();
+ let count = NUM_DROPS.load(Ordering::Acquire);
+ assert!(count == 0);
+ drop(fooer);
+ let count = NUM_DROPS.load(Ordering::Acquire);
+ assert!(count == 1);
+}
+
+#[test]
+fn drop_into_inner_new_with() {
+ static NUM_DROPS: AtomicU32 = AtomicU32::new(0);
+
+ struct Foo {}
+
+ let fooer = Foo {};
+
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ NUM_DROPS.fetch_add(1, Ordering::Release);
+ }
+ }
+
+ let once_cell = OnceCell::new_with(Some(fooer));
+ let fooer = once_cell.into_inner();
+ let count = NUM_DROPS.load(Ordering::Acquire);
+ assert!(count == 0);
+ mem::drop(fooer);
+ let count = NUM_DROPS.load(Ordering::Acquire);
+ assert!(count == 1);
+}
diff --git a/tests/sync_oneshot.rs b/tests/sync_oneshot.rs
index 195c255..1aab810 100644
--- a/tests/sync_oneshot.rs
+++ b/tests/sync_oneshot.rs
@@ -2,6 +2,7 @@
#![cfg(feature = "full")]
use tokio::sync::oneshot;
+use tokio::sync::oneshot::error::TryRecvError;
use tokio_test::*;
use std::future::Future;
@@ -181,6 +182,27 @@ fn close_try_recv_poll() {
}
#[test]
+fn close_after_recv() {
+ let (tx, mut rx) = oneshot::channel::<i32>();
+
+ tx.send(17).unwrap();
+
+ assert_eq!(17, rx.try_recv().unwrap());
+ rx.close();
+}
+
+#[test]
+fn try_recv_after_completion() {
+ let (tx, mut rx) = oneshot::channel::<i32>();
+
+ tx.send(17).unwrap();
+
+ assert_eq!(17, rx.try_recv().unwrap());
+ assert_eq!(Err(TryRecvError::Closed), rx.try_recv());
+ rx.close();
+}
+
+#[test]
fn drops_tasks() {
let (mut tx, mut rx) = oneshot::channel::<i32>();
let mut tx_task = task::spawn(());
diff --git a/tests/sync_rwlock.rs b/tests/sync_rwlock.rs
index 872b845..e12052b 100644
--- a/tests/sync_rwlock.rs
+++ b/tests/sync_rwlock.rs
@@ -54,7 +54,7 @@ fn read_exclusive_pending() {
// should be made available when one of the shared acesses is dropped
#[test]
fn exhaust_reading() {
- let rwlock = RwLock::new(100);
+ let rwlock = RwLock::with_max_readers(100, 1024);
let mut reads = Vec::new();
loop {
let mut t = spawn(rwlock.read());
diff --git a/tests/sync_semaphore_owned.rs b/tests/sync_semaphore_owned.rs
index 8ed6209..478c3a3 100644
--- a/tests/sync_semaphore_owned.rs
+++ b/tests/sync_semaphore_owned.rs
@@ -16,6 +16,22 @@ fn try_acquire() {
assert!(p3.is_ok());
}
+#[test]
+fn try_acquire_many() {
+ let sem = Arc::new(Semaphore::new(42));
+ {
+ let p1 = sem.clone().try_acquire_many_owned(42);
+ assert!(p1.is_ok());
+ let p2 = sem.clone().try_acquire_owned();
+ assert!(p2.is_err());
+ }
+ let p3 = sem.clone().try_acquire_many_owned(32);
+ assert!(p3.is_ok());
+ let p4 = sem.clone().try_acquire_many_owned(10);
+ assert!(p4.is_ok());
+ assert!(sem.try_acquire_owned().is_err());
+}
+
#[tokio::test]
async fn acquire() {
let sem = Arc::new(Semaphore::new(1));
@@ -29,6 +45,21 @@ async fn acquire() {
}
#[tokio::test]
+async fn acquire_many() {
+ let semaphore = Arc::new(Semaphore::new(42));
+ let permit32 = semaphore.clone().try_acquire_many_owned(32).unwrap();
+ let (sender, receiver) = tokio::sync::oneshot::channel();
+ let join_handle = tokio::spawn(async move {
+ let _permit10 = semaphore.clone().acquire_many_owned(10).await.unwrap();
+ sender.send(()).unwrap();
+ let _permit32 = semaphore.acquire_many_owned(32).await.unwrap();
+ });
+ receiver.await.unwrap();
+ drop(permit32);
+ join_handle.await.unwrap();
+}
+
+#[tokio::test]
async fn add_permits() {
let sem = Arc::new(Semaphore::new(0));
let sem_clone = sem.clone();
diff --git a/tests/task_abort.rs b/tests/task_abort.rs
index e84f19c..1d72ac3 100644
--- a/tests/task_abort.rs
+++ b/tests/task_abort.rs
@@ -24,3 +24,70 @@ fn test_abort_without_panic_3157() {
let _ = handle.await;
});
}
+
+/// Checks that a suspended task can be aborted inside of a current_thread
+/// executor without panicking as reported in issue #3662:
+/// <https://github.com/tokio-rs/tokio/issues/3662>.
+#[test]
+fn test_abort_without_panic_3662() {
+ use std::sync::atomic::{AtomicBool, Ordering};
+ use std::sync::Arc;
+
+ struct DropCheck(Arc<AtomicBool>);
+
+ impl Drop for DropCheck {
+ fn drop(&mut self) {
+ self.0.store(true, Ordering::SeqCst);
+ }
+ }
+
+ let rt = tokio::runtime::Builder::new_current_thread()
+ .build()
+ .unwrap();
+
+ rt.block_on(async move {
+ let drop_flag = Arc::new(AtomicBool::new(false));
+ let drop_check = DropCheck(drop_flag.clone());
+
+ let j = tokio::spawn(async move {
+ // NB: just grab the drop check here so that it becomes part of the
+ // task.
+ let _drop_check = drop_check;
+ futures::future::pending::<()>().await;
+ });
+
+ let drop_flag2 = drop_flag.clone();
+
+ let task = std::thread::spawn(move || {
+ // This runs in a separate thread so it doesn't have immediate
+ // thread-local access to the executor. It does however transition
+ // the underlying task to be completed, which will cause it to be
+ // dropped (in this thread no less).
+ assert!(!drop_flag2.load(Ordering::SeqCst));
+ j.abort();
+ // TODO: is this guaranteed at this point?
+ // assert!(drop_flag2.load(Ordering::SeqCst));
+ j
+ })
+ .join()
+ .unwrap();
+
+ assert!(drop_flag.load(Ordering::SeqCst));
+ let result = task.await;
+ assert!(result.unwrap_err().is_cancelled());
+
+ // Note: We do the following to trigger a deferred task cleanup.
+ //
+ // The relevant piece of code you want to look at is in:
+ // `Inner::block_on` of `basic_scheduler.rs`.
+ //
+ // We cause the cleanup to happen by having a poll return Pending once
+ // so that the scheduler can go into the "auxilliary tasks" mode, at
+ // which point the task is removed from the scheduler.
+ let i = tokio::spawn(async move {
+ tokio::task::yield_now().await;
+ });
+
+ i.await.unwrap();
+ });
+}
diff --git a/tests/time_timeout.rs b/tests/time_timeout.rs
index 4efcd8c..dbd80eb 100644
--- a/tests/time_timeout.rs
+++ b/tests/time_timeout.rs
@@ -75,6 +75,33 @@ async fn future_and_timeout_in_future() {
}
#[tokio::test]
+async fn very_large_timeout() {
+ time::pause();
+
+ // Not yet complete
+ let (tx, rx) = oneshot::channel();
+
+ // copy-paste unstable `Duration::MAX`
+ let duration_max = Duration::from_secs(u64::MAX) + Duration::from_nanos(999_999_999);
+
+ // Wrap it with a deadline
+ let mut fut = task::spawn(timeout(duration_max, rx));
+
+ // Ready!
+ assert_pending!(fut.poll());
+
+ // Turn the timer, it runs for the elapsed time
+ time::advance(Duration::from_secs(86400 * 365 * 10)).await;
+
+ assert_pending!(fut.poll());
+
+ // Complete the future
+ tx.send(()).unwrap();
+
+ assert_ready_ok!(fut.poll()).unwrap();
+}
+
+#[tokio::test]
async fn deadline_now_elapses() {
use futures::future::pending;