aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Yu <yumike@google.com>2022-06-08 13:18:31 +0000
committerMike Yu <yumike@google.com>2022-06-27 05:45:29 +0000
commit9551ff13c24fbd2413d110ba81aafe92253808d2 (patch)
tree45188d10e6a02faecdcf5aec929a733053811c6a
parentb5c5a71cb879aeb9d41b009d035373489219ee32 (diff)
downloadDnsResolver-9551ff13c24fbd2413d110ba81aafe92253808d2.tar.gz
DoH: Support Early Data
Early Data can be enabled by the flag `doh_early_data`. As the value of the flag is cached in the DoH client, if there are some networks existing before setting a new value to the flag, those network will still use old flag until private DNS setting changes. Bug: 235763732 Test: atest Test: atest doh_ffi_test Test: manual test on wifi. 1. adb shell tc qdisc add dev wlan0 root netem delay 500ms 2. Observed the first DNS query latency on a subsequent connection: - when doh_early_data=0/doh_session_resumption=0, it took ~1500 ms - when doh_early_data=1/doh_session_resumption=1, it took ~500 ms Change-Id: I06bc5f9aa006e357dc3ecf04a693126a55782e87
-rw-r--r--Experiments.h1
-rw-r--r--PrivateDnsConfiguration.cpp4
-rw-r--r--doh.h1
-rw-r--r--doh/config.rs49
-rw-r--r--doh/connection/driver.rs33
-rw-r--r--doh/connection/mod.rs2
-rw-r--r--doh/dispatcher/driver.rs1
-rw-r--r--doh/dispatcher/mod.rs4
-rw-r--r--doh/ffi.rs3
-rw-r--r--doh/network/mod.rs1
-rw-r--r--tests/doh/include/lib.rs.h14
-rw-r--r--tests/doh/src/client.rs14
-rw-r--r--tests/doh/src/dns_https_frontend.rs2
-rw-r--r--tests/doh/src/ffi.rs1
-rw-r--r--tests/doh/src/stats.rs2
-rw-r--r--tests/doh_ffi_test.cpp1
-rw-r--r--tests/doh_frontend.cpp9
-rw-r--r--tests/doh_frontend.h3
-rw-r--r--tests/resolv_private_dns_test.cpp39
19 files changed, 147 insertions, 37 deletions
diff --git a/Experiments.h b/Experiments.h
index 209a208e..fde8a481 100644
--- a/Experiments.h
+++ b/Experiments.h
@@ -63,6 +63,7 @@ class Experiments {
"dot_validation_latency_factor",
"dot_validation_latency_offset_ms",
"doh",
+ "doh_early_data",
"doh_query_timeout_ms",
"doh_probe_timeout_ms",
"doh_idle_timeout_ms",
diff --git a/PrivateDnsConfiguration.cpp b/PrivateDnsConfiguration.cpp
index 340a75e0..088ff87a 100644
--- a/PrivateDnsConfiguration.cpp
+++ b/PrivateDnsConfiguration.cpp
@@ -510,10 +510,12 @@ int PrivateDnsConfiguration::setDoh(int32_t netId, uint32_t mark,
getTimeoutFromFlag("doh_idle_timeout_ms", kDohIdleDefaultTimeoutMs),
.use_session_resumption =
Experiments::getInstance()->getFlag("doh_session_resumption", 0) == 1,
+ .enable_early_data = Experiments::getInstance()->getFlag("doh_early_data", 0) == 1,
};
LOG(DEBUG) << __func__ << ": probe_timeout_ms=" << flags.probe_timeout_ms
<< ", idle_timeout_ms=" << flags.idle_timeout_ms
- << ", use_session_resumption=" << flags.use_session_resumption;
+ << ", use_session_resumption=" << flags.use_session_resumption
+ << ", enable_early_data=" << flags.enable_early_data;
return doh_net_new(mDohDispatcher, netId, dohId.httpsTemplate.c_str(), dohId.host.c_str(),
dohId.ipAddr.c_str(), mark, caCert.c_str(), &flags);
diff --git a/doh.h b/doh.h
index 75ecf10a..4fa8a5d7 100644
--- a/doh.h
+++ b/doh.h
@@ -54,6 +54,7 @@ struct FeatureFlags {
uint64_t probe_timeout_ms;
uint64_t idle_timeout_ms;
bool use_session_resumption;
+ bool enable_early_data;
};
using ValidationCallback = void (*)(uint32_t net_id, bool success, const char* ip_addr,
diff --git a/doh/config.rs b/doh/config.rs
index 1f91a151..bcc21184 100644
--- a/doh/config.rs
+++ b/doh/config.rs
@@ -63,6 +63,9 @@ impl Config {
}
None => config.verify_peer(false),
}
+ if key.enable_early_data {
+ config.enable_early_data();
+ }
// Some of these configs are necessary, or the server can't respond the HTTP/3 request.
config.set_max_idle_timeout(key.max_idle_timeout);
@@ -126,6 +129,7 @@ pub struct Cache {
pub struct Key {
pub cert_path: Option<String>,
pub max_idle_timeout: u64,
+ pub enable_early_data: bool,
}
impl Cache {
@@ -174,13 +178,15 @@ impl Cache {
#[test]
fn create_quiche_config() {
assert!(
- Config::from_key(&Key { cert_path: None, max_idle_timeout: 1000 }).is_ok(),
+ Config::from_key(&Key { cert_path: None, max_idle_timeout: 1000, enable_early_data: true })
+ .is_ok(),
"quiche config without cert creating failed"
);
assert!(
Config::from_key(&Key {
cert_path: Some("data/local/tmp/".to_string()),
- max_idle_timeout: 1000
+ max_idle_timeout: 1000,
+ enable_early_data: true,
})
.is_ok(),
"quiche config with cert creating failed"
@@ -191,38 +197,53 @@ fn create_quiche_config() {
fn shared_cache() {
let cache_a = Cache::new();
let cache_b = cache_a.clone();
- let config_a = cache_a.get(&Key { cert_path: None, max_idle_timeout: 1000 }).unwrap();
+ let config_a = cache_a
+ .get(&Key { cert_path: None, max_idle_timeout: 1000, enable_early_data: true })
+ .unwrap();
assert_eq!(Arc::strong_count(&config_a.0), 2);
- let _config_b = cache_b.get(&Key { cert_path: None, max_idle_timeout: 1000 }).unwrap();
+ let _config_b = cache_b
+ .get(&Key { cert_path: None, max_idle_timeout: 1000, enable_early_data: true })
+ .unwrap();
assert_eq!(Arc::strong_count(&config_a.0), 3);
}
#[test]
fn different_keys() {
let cache = Cache::new();
- let key_a = Key { cert_path: None, max_idle_timeout: 1000 };
- let key_b = Key { cert_path: Some("a".to_string()), max_idle_timeout: 1000 };
- let key_c = Key { cert_path: Some("a".to_string()), max_idle_timeout: 5000 };
+ let key_a = Key { cert_path: None, max_idle_timeout: 1000, enable_early_data: false };
+ let key_b =
+ Key { cert_path: Some("a".to_string()), max_idle_timeout: 1000, enable_early_data: false };
+ let key_c =
+ Key { cert_path: Some("a".to_string()), max_idle_timeout: 5000, enable_early_data: false };
+ let key_d =
+ Key { cert_path: Some("a".to_string()), max_idle_timeout: 5000, enable_early_data: true };
let config_a = cache.get(&key_a).unwrap();
let config_b = cache.get(&key_b).unwrap();
let _config_b = cache.get(&key_b).unwrap();
let config_c = cache.get(&key_c).unwrap();
let _config_c = cache.get(&key_c).unwrap();
+ let config_d = cache.get(&key_d).unwrap();
+ let _config_d = cache.get(&key_d).unwrap();
assert_eq!(Arc::strong_count(&config_a.0), 1);
assert_eq!(Arc::strong_count(&config_b.0), 2);
+ assert_eq!(Arc::strong_count(&config_c.0), 2);
- // config_c was most recently created, so it should have an extra strong reference due to
+ // config_d was most recently created, so it should have an extra strong reference due to
// keep-alive in the cache.
- assert_eq!(Arc::strong_count(&config_c.0), 3);
+ assert_eq!(Arc::strong_count(&config_d.0), 3);
}
#[test]
fn lifetimes() {
let cache = Cache::new();
- let key_a = Key { cert_path: Some("a".to_string()), max_idle_timeout: 1000 };
- let key_b = Key { cert_path: Some("b".to_string()), max_idle_timeout: 1000 };
- let config_none = cache.get(&Key { cert_path: None, max_idle_timeout: 1000 }).unwrap();
+ let key_a =
+ Key { cert_path: Some("a".to_string()), max_idle_timeout: 1000, enable_early_data: true };
+ let key_b =
+ Key { cert_path: Some("b".to_string()), max_idle_timeout: 1000, enable_early_data: true };
+ let config_none = cache
+ .get(&Key { cert_path: None, max_idle_timeout: 1000, enable_early_data: true })
+ .unwrap();
let config_a = cache.get(&key_a).unwrap();
let config_b = cache.get(&key_b).unwrap();
@@ -268,7 +289,9 @@ fn lifetimes() {
#[tokio::test]
async fn quiche_connect() {
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
- let mut config = Config::from_key(&Key { cert_path: None, max_idle_timeout: 10 }).unwrap();
+ let mut config =
+ Config::from_key(&Key { cert_path: None, max_idle_timeout: 10, enable_early_data: true })
+ .unwrap();
let socket_addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 42));
let conn_id = quiche::ConnectionId::from_ref(&[]);
quiche::connect(None, &conn_id, socket_addr, config.take().await.deref_mut()).unwrap();
diff --git a/doh/connection/driver.rs b/doh/connection/driver.rs
index 1ba8faa7..f452d0a0 100644
--- a/doh/connection/driver.rs
+++ b/doh/connection/driver.rs
@@ -194,6 +194,19 @@ impl Driver {
}
async fn drive_once(mut self) -> Result<Self> {
+ // If the QUIC connection is live, but the HTTP/3 is not, try to bring it up
+ if self.quiche_conn.is_established() || self.quiche_conn.is_in_early_data() {
+ info!(
+ "Connection {} established on network {}",
+ self.quiche_conn.trace_id(),
+ self.net_id
+ );
+ let h3_config = h3::Config::new()?;
+ let h3_conn = h3::Connection::with_transport(&mut self.quiche_conn, &h3_config)?;
+ self = H3Driver::new(self, h3_conn).drive().await?;
+ let _ = self.status_tx.send(Status::QUIC);
+ }
+
let timer = optional_timeout(self.quiche_conn.timeout(), self.net_id);
select! {
// If a quiche timer would fire, call their callback
@@ -210,19 +223,6 @@ impl Driver {
// Any of the actions in the select could require us to send packets to the peer
self.flush_tx().await?;
- // If the QUIC connection is live, but the HTTP/3 is not, try to bring it up
- if self.quiche_conn.is_established() {
- info!(
- "Connection {} established on network {}",
- self.quiche_conn.trace_id(),
- self.net_id
- );
- let h3_config = h3::Config::new()?;
- let h3_conn = h3::Connection::with_transport(&mut self.quiche_conn, &h3_config)?;
- self = H3Driver::new(self, h3_conn).drive().await?;
- let _ = self.status_tx.send(Status::QUIC);
- }
-
// If the connection has entered draining state (the server is closing the connection),
// tell the status watcher not to use the connection. Besides, per Quiche document,
// the connection should not be dropped until is_closed() returns true.
@@ -285,7 +285,8 @@ impl H3Driver {
}
select! {
// Only attempt to enqueue new requests if we have no buffered request and aren't
- // closing
+ // closing. Maybe limit the number of in-flight queries if the handshake
+ // still hasn't finished.
msg = self.driver.request_rx.recv(), if !self.driver.closing && self.buffered_request.is_none() => {
match msg {
Some(request) => self.handle_request(request)?,
@@ -321,8 +322,8 @@ impl H3Driver {
}
fn handle_request(&mut self, request: Request) -> Result<()> {
- info!("Handling DNS request on network {}, stats=[{:?}], peer_streams_left_bidi={}, peer_streams_left_uni={}",
- self.driver.net_id, self.driver.quiche_conn.stats(), self.driver.quiche_conn.peer_streams_left_bidi(), self.driver.quiche_conn.peer_streams_left_uni());
+ info!("Handling DNS request on network {}, is_in_early_data={}, stats=[{:?}], peer_streams_left_bidi={}, peer_streams_left_uni={}",
+ self.driver.net_id, self.driver.quiche_conn.is_in_early_data(), self.driver.quiche_conn.stats(), self.driver.quiche_conn.peer_streams_left_bidi(), self.driver.quiche_conn.peer_streams_left_uni());
// If the request has already timed out, don't issue it to the server.
if let Some(expiry) = request.expiry {
if BootTime::now() > expiry {
diff --git a/doh/connection/mod.rs b/doh/connection/mod.rs
index edd79cf9..8634014d 100644
--- a/doh/connection/mod.rs
+++ b/doh/connection/mod.rs
@@ -142,6 +142,8 @@ impl Connection {
let scid = new_scid();
let mut quiche_conn =
quiche::connect(server_name, &quiche::ConnectionId::from_ref(&scid), to, config)?;
+
+ // We will fall back to a full handshake if the session is expired.
if let Some(session) = session {
debug!("Setting session");
quiche_conn.set_session(&session)?;
diff --git a/doh/dispatcher/driver.rs b/doh/dispatcher/driver.rs
index 5e0359cb..aaea68cc 100644
--- a/doh/dispatcher/driver.rs
+++ b/doh/dispatcher/driver.rs
@@ -117,6 +117,7 @@ impl Driver {
let key = config::Key {
cert_path: info.cert_path.clone(),
max_idle_timeout: info.idle_timeout_ms,
+ enable_early_data: info.enable_early_data,
};
let config = self.config_cache.get(&key)?;
vacant.insert(
diff --git a/doh/dispatcher/mod.rs b/doh/dispatcher/mod.rs
index 18439c8e..6dff3e14 100644
--- a/doh/dispatcher/mod.rs
+++ b/doh/dispatcher/mod.rs
@@ -87,7 +87,9 @@ impl Dispatcher {
.build()?;
let join_handle = runtime.spawn(async {
let result = Driver::new(cmd_receiver, validation, tagger).drive().await;
- if let Err(ref e) = result { error!("Dispatcher driver exited due to {:?}", e) }
+ if let Err(ref e) = result {
+ error!("Dispatcher driver exited due to {:?}", e)
+ }
result
});
Ok(Dispatcher { cmd_sender, join_handle, runtime })
diff --git a/doh/ffi.rs b/doh/ffi.rs
index 65ca012e..6e9f64ad 100644
--- a/doh/ffi.rs
+++ b/doh/ffi.rs
@@ -43,6 +43,7 @@ pub struct FeatureFlags {
probe_timeout_ms: uint64_t,
idle_timeout_ms: uint64_t,
use_session_resumption: bool,
+ enable_early_data: bool,
}
fn wrap_validation_callback(validation_fn: ValidationCallback) -> ValidationReporter {
@@ -233,6 +234,7 @@ pub unsafe extern "C" fn doh_net_new(
cert_path,
idle_timeout_ms: flags.idle_timeout_ms,
use_session_resumption: flags.use_session_resumption,
+ enable_early_data: flags.enable_early_data,
},
timeout: Duration::from_millis(flags.probe_timeout_ms),
};
@@ -384,6 +386,7 @@ mod tests {
cert_path: None,
idle_timeout_ms: 0,
use_session_resumption: true,
+ enable_early_data: true,
};
wrap_validation_callback(success_cb)(&info, true).await;
diff --git a/doh/network/mod.rs b/doh/network/mod.rs
index 19be8643..7e39f60b 100644
--- a/doh/network/mod.rs
+++ b/doh/network/mod.rs
@@ -49,6 +49,7 @@ pub struct ServerInfo {
pub cert_path: Option<String>,
pub idle_timeout_ms: u64,
pub use_session_resumption: bool,
+ pub enable_early_data: bool,
}
#[derive(Debug)]
diff --git a/tests/doh/include/lib.rs.h b/tests/doh/include/lib.rs.h
index 9e259a0d..cd3f19a7 100644
--- a/tests/doh/include/lib.rs.h
+++ b/tests/doh/include/lib.rs.h
@@ -5,7 +5,7 @@
#pragma once
-/* Generated with cbindgen:0.19.0 */
+/* Generated with cbindgen:0.24.2 */
#include <stdint.h>
#include <sys/types.h>
@@ -13,15 +13,17 @@
namespace test {
namespace rust {
-static const uintptr_t DNS_HEADER_SIZE = 12;
+constexpr static const uintptr_t DNS_HEADER_SIZE = 12;
-static const uintptr_t MAX_UDP_PAYLOAD_SIZE = 1350;
+constexpr static const uintptr_t MAX_UDP_PAYLOAD_SIZE = 1350;
+
+constexpr static const uintptr_t CONN_ID_LEN = 8;
/// Default value for max_idle_timeout transport parameter.
-static const uint64_t QUICHE_IDLE_TIMEOUT_MS = 10000;
+constexpr static const uint64_t QUICHE_IDLE_TIMEOUT_MS = 10000;
/// Default value for initial_max_streams_bidi transport parameter.
-static const uint64_t MAX_STREAMS_BIDI = 100;
+constexpr static const uint64_t MAX_STREAMS_BIDI = 100;
/// Frontend object.
struct DohFrontend;
@@ -35,6 +37,8 @@ struct Stats {
uint32_t alive_connections;
/// The number of QUIC connections using session resumption.
uint32_t resumed_connections;
+ /// The number of QUIC connections that received early data.
+ uint32_t early_data_connections;
};
extern "C" {
diff --git a/tests/doh/src/client.rs b/tests/doh/src/client.rs
index 0cfa18e2..8e3d2965 100644
--- a/tests/doh/src/client.rs
+++ b/tests/doh/src/client.rs
@@ -53,6 +53,9 @@ pub struct Client {
/// Queues the second part DNS answers needed to be sent after first part.
/// <Stream ID, ans>
pending_answers: Vec<(u64, Vec<u8>)>,
+
+ /// Returns true if early data is received.
+ handled_early_data: bool,
}
impl Client {
@@ -64,6 +67,7 @@ impl Client {
id,
in_flight_queries: HashMap::new(),
pending_answers: Vec::new(),
+ handled_early_data: false,
}
}
@@ -190,12 +194,16 @@ impl Client {
self.conn.recv(data, recv_info)?;
if (self.conn.is_in_early_data() || self.conn.is_established()) && self.h3_conn.is_none() {
- // Create a HTTP3 connection as soon as the QUIC connection is established.
+ // Create a HTTP3 connection as soon as either the QUIC connection is established or
+ // the handshake has progressed enough to receive early data.
self.create_http3_connection()?;
info!("HTTP/3 connection created");
}
if self.h3_conn.is_some() {
+ if self.conn.is_in_early_data() {
+ self.handled_early_data = true;
+ }
return self.handle_http3_request();
}
@@ -233,6 +241,10 @@ impl Client {
pub fn close(&mut self) {
let _ = self.conn.close(false, 0, b"Graceful shutdown");
}
+
+ pub fn handled_early_data(&self) -> bool {
+ self.handled_early_data
+ }
}
impl std::fmt::Debug for Client {
diff --git a/tests/doh/src/dns_https_frontend.rs b/tests/doh/src/dns_https_frontend.rs
index 121dadaa..7ce74480 100644
--- a/tests/doh/src/dns_https_frontend.rs
+++ b/tests/doh/src/dns_https_frontend.rs
@@ -397,6 +397,7 @@ async fn worker_thread(params: WorkerParams) -> Result<()> {
connections_accepted: clients.len() as u32,
alive_connections: clients.iter().filter(|(_, client)| client.is_alive()).count() as u32,
resumed_connections: clients.iter().filter(|(_, client)| client.is_resumed()).count() as u32,
+ early_data_connections: clients.iter().filter(|(_, client)| client.handled_early_data()).count() as u32,
};
if let Err(e) = resp.send(stats) {
error!("Failed to send ControlCommand::Stats response: {:?}", e);
@@ -452,6 +453,7 @@ fn create_quiche_config(
quiche_config.set_initial_max_streams_bidi(config.lock().unwrap().max_streams_bidi);
quiche_config.set_initial_max_streams_uni(100);
quiche_config.set_disable_active_migration(true);
+ quiche_config.enable_early_data();
Ok(quiche_config)
}
diff --git a/tests/doh/src/ffi.rs b/tests/doh/src/ffi.rs
index 99b04268..11d98502 100644
--- a/tests/doh/src/ffi.rs
+++ b/tests/doh/src/ffi.rs
@@ -162,6 +162,7 @@ pub extern "C" fn frontend_stats(doh: &mut DohFrontend, out: &mut Stats) -> bool
out.connections_accepted = stats.connections_accepted;
out.alive_connections = stats.alive_connections;
out.resumed_connections = stats.resumed_connections;
+ out.early_data_connections = stats.early_data_connections;
})
.or_else(logging_and_return_err)
.is_ok()
diff --git a/tests/doh/src/stats.rs b/tests/doh/src/stats.rs
index f927f2fc..5f71e9e0 100644
--- a/tests/doh/src/stats.rs
+++ b/tests/doh/src/stats.rs
@@ -27,6 +27,8 @@ pub struct Stats {
pub alive_connections: u32,
/// The number of QUIC connections using session resumption.
pub resumed_connections: u32,
+ /// The number of QUIC connections that received early data.
+ pub early_data_connections: u32,
}
impl Stats {
diff --git a/tests/doh_ffi_test.cpp b/tests/doh_ffi_test.cpp
index 61180ce7..5339009d 100644
--- a/tests/doh_ffi_test.cpp
+++ b/tests/doh_ffi_test.cpp
@@ -61,6 +61,7 @@ TEST_F(DoHFFITest, SmokeTest) {
.probe_timeout_ms = TIMEOUT_MS,
.idle_timeout_ms = TIMEOUT_MS,
.use_session_resumption = true,
+ .enable_early_data = true,
};
// TODO: Use a local server instead of dns.google.
diff --git a/tests/doh_frontend.cpp b/tests/doh_frontend.cpp
index 92a04e1b..0f4b48ae 100644
--- a/tests/doh_frontend.cpp
+++ b/tests/doh_frontend.cpp
@@ -102,6 +102,15 @@ int DohFrontend::resumedConnections() const {
return stats.resumed_connections;
}
+int DohFrontend::earlyDataConnections() const {
+ std::lock_guard guard(mMutex);
+ if (!mRustDoh) return 0;
+
+ rust::Stats stats;
+ rust::frontend_stats(mRustDoh, &stats);
+ return stats.early_data_connections;
+}
+
void DohFrontend::clearQueries() {
std::lock_guard guard(mMutex);
if (mRustDoh) {
diff --git a/tests/doh_frontend.h b/tests/doh_frontend.h
index ebd4a034..d1ca71e3 100644
--- a/tests/doh_frontend.h
+++ b/tests/doh_frontend.h
@@ -60,6 +60,9 @@ class DohFrontend {
// Returns the number of connections using session resumption.
int resumedConnections() const;
+ // Returns the number of connections that had early data.
+ int earlyDataConnections() const;
+
void clearQueries();
bool block_sending(bool block);
bool waitForAllClientsDisconnected() const;
diff --git a/tests/resolv_private_dns_test.cpp b/tests/resolv_private_dns_test.cpp
index 6801ad0a..634ea537 100644
--- a/tests/resolv_private_dns_test.cpp
+++ b/tests/resolv_private_dns_test.cpp
@@ -60,6 +60,7 @@ const std::string kDohProbeTimeoutFlag("persist.device_config.netd_native.doh_pr
const std::string kDohIdleTimeoutFlag("persist.device_config.netd_native.doh_idle_timeout_ms");
const std::string kDohSessionResumptionFlag(
"persist.device_config.netd_native.doh_session_resumption");
+const std::string kDohEarlyDataFlag("persist.device_config.netd_native.doh_early_data");
const std::string kDotAsyncHandshakeFlag("persist.device_config.netd_native.dot_async_handshake");
const std::string kDotMaxretriesFlag("persist.device_config.netd_native.dot_maxtries");
@@ -805,6 +806,12 @@ TEST_F(PrivateDnsDohTest, ExcessDnsRequests) {
// connection mpsc channel; the other one that will get blocked at dispatcher sending channel).
const int timeout_queries = 52;
+ // If early data flag is enabled, DnsResolver doesn't wait for the connection established.
+ // It will send DNS queries along with 0-RTT rather than queue them in connection mpsc channel.
+ // So we disable the flag.
+ ScopedSystemProperties sp(kDohEarlyDataFlag, "0");
+ resetNetwork();
+
const int initial_max_idle_timeout_ms = 2000;
ASSERT_TRUE(doh.stopServer());
EXPECT_TRUE(doh.setMaxIdleTimeout(initial_max_idle_timeout_ms));
@@ -1105,6 +1112,38 @@ TEST_F(PrivateDnsDohTest, SessionResumption) {
}
}
+// Tests that the flag "doh_early_data" works as expected.
+TEST_F(PrivateDnsDohTest, TestEarlyDataFlag) {
+ const int initial_max_idle_timeout_ms = 1000;
+ for (const auto& flag : {"0", "1"}) {
+ SCOPED_TRACE(fmt::format("flag: {}", flag));
+ ScopedSystemProperties sp1(kDohSessionResumptionFlag, flag);
+ ScopedSystemProperties sp2(kDohEarlyDataFlag, flag);
+ resetNetwork();
+
+ ASSERT_TRUE(doh.stopServer());
+ EXPECT_TRUE(doh.setMaxIdleTimeout(initial_max_idle_timeout_ms));
+ ASSERT_TRUE(doh.startServer());
+
+ const auto parcel = DnsResponderClient::GetDefaultResolverParamsParcel();
+ ASSERT_TRUE(mDnsClient.SetResolversFromParcel(parcel));
+ EXPECT_TRUE(WaitForDohValidation(test::kDefaultListenAddr, true));
+ EXPECT_TRUE(WaitForDotValidation(test::kDefaultListenAddr, true));
+ EXPECT_TRUE(dot.waitForQueries(1));
+ dot.clearQueries();
+ doh.clearQueries();
+ dns.clearQueries();
+
+ // Wait for the connection closed, and then send a DNS query.
+ // Expect the query to be sent in early data if the flag is on.
+ sleep_for(milliseconds(initial_max_idle_timeout_ms + 500));
+ int fd = resNetworkQuery(TEST_NETID, kQueryHostname, ns_c_in, ns_t_aaaa,
+ ANDROID_RESOLV_NO_CACHE_LOOKUP);
+ expectAnswersValid(fd, AF_INET6, kQueryAnswerAAAA);
+ EXPECT_EQ(doh.earlyDataConnections(), (strcmp(flag, "1") ? 0 : 1));
+ }
+}
+
// Tests that after the connection is closed by the server (known by sending CONNECTION_CLOSE
// frame), the DnsResolver can initiate another new connection for DNS requests.
TEST_F(PrivateDnsDohTest, RemoteConnectionClosed) {