summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-05-17 21:07:57 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-05-17 21:07:57 +0000
commit391dab55876a8a2cd12babf3dcee5cdd01350c07 (patch)
tree85aaec5d2742eabb0579278b5dccda73eb6ef3e9
parent1ec90ab2b40236b6641f754ef73e0ff2761c0aad (diff)
parentea372e35525ced3c652535d82aaba7dc638cbd3e (diff)
downloadsecurity-android12L-platform-release.tar.gz
Change-Id: I7c01d3b6a4e3401852ee362609e2e7750499452a
-rw-r--r--keystore2/legacykeystore/lib.rs65
-rw-r--r--keystore2/src/attestation_key_utils.rs9
-rw-r--r--keystore2/src/authorization.rs4
-rw-r--r--keystore2/src/database.rs304
-rw-r--r--keystore2/src/globals.rs6
-rw-r--r--keystore2/src/legacy_blob.rs832
-rw-r--r--keystore2/src/legacy_blob/test/legacy_blob_test_vectors.rs172
-rw-r--r--keystore2/src/legacy_importer.rs (renamed from keystore2/src/legacy_migrator.rs)510
-rw-r--r--keystore2/src/lib.rs2
-rw-r--r--keystore2/src/maintenance.rs20
-rw-r--r--keystore2/src/raw_device.rs7
-rw-r--r--keystore2/src/remote_provisioning.rs20
-rw-r--r--keystore2/src/security_level.rs156
-rw-r--r--keystore2/src/service.rs27
-rw-r--r--keystore2/src/super_key.rs82
-rw-r--r--keystore2/src/utils.rs90
16 files changed, 1816 insertions, 490 deletions
diff --git a/keystore2/legacykeystore/lib.rs b/keystore2/legacykeystore/lib.rs
index efa0870f..c23c29cd 100644
--- a/keystore2/legacykeystore/lib.rs
+++ b/keystore2/legacykeystore/lib.rs
@@ -25,8 +25,9 @@ use android_security_legacykeystore::binder::{
};
use anyhow::{Context, Result};
use keystore2::{
- async_task::AsyncTask, legacy_blob::LegacyBlobLoader, maintenance::DeleteListener,
- maintenance::Domain, utils::watchdog as wd,
+ async_task::AsyncTask, globals::SUPER_KEY, legacy_blob::LegacyBlobLoader,
+ maintenance::DeleteListener, maintenance::Domain, utils::uid_to_android_user,
+ utils::watchdog as wd,
};
use rusqlite::{
params, Connection, OptionalExtension, Transaction, TransactionBehavior, NO_PARAMS,
@@ -312,8 +313,8 @@ impl LegacyKeystore {
if let Some(entry) = db.get(uid, alias).context("In get: Trying to load entry from DB.")? {
return Ok(entry);
}
- if self.get_legacy(uid, alias).context("In get: Trying to migrate legacy blob.")? {
- // If we were able to migrate a legacy blob try again.
+ if self.get_legacy(uid, alias).context("In get: Trying to import legacy blob.")? {
+ // If we were able to import a legacy blob try again.
if let Some(entry) =
db.get(uid, alias).context("In get: Trying to load entry from DB.")?
{
@@ -325,19 +326,20 @@ impl LegacyKeystore {
fn put(&self, alias: &str, uid: i32, entry: &[u8]) -> Result<()> {
let uid = Self::get_effective_uid(uid).context("In put.")?;
- // In order to make sure that we don't have stale legacy entries, make sure they are
- // migrated before replacing them.
- let _ = self.get_legacy(uid, alias);
let mut db = self.open_db().context("In put.")?;
- db.put(uid, alias, entry).context("In put: Trying to insert entry into DB.")
+ db.put(uid, alias, entry).context("In put: Trying to insert entry into DB.")?;
+ // When replacing an entry, make sure that there is no stale legacy file entry.
+ let _ = self.remove_legacy(uid, alias);
+ Ok(())
}
fn remove(&self, alias: &str, uid: i32) -> Result<()> {
let uid = Self::get_effective_uid(uid).context("In remove.")?;
let mut db = self.open_db().context("In remove.")?;
- // In order to make sure that we don't have stale legacy entries, make sure they are
- // migrated before removing them.
- let _ = self.get_legacy(uid, alias);
+
+ if self.remove_legacy(uid, alias).context("In remove: trying to remove legacy entry")? {
+ return Ok(());
+ }
let removed =
db.remove(uid, alias).context("In remove: Trying to remove entry from DB.")?;
if removed {
@@ -427,17 +429,30 @@ impl LegacyKeystore {
return Ok(true);
}
let mut db = DB::new(&state.db_path).context("In open_db: Failed to open db.")?;
- let migrated =
- Self::migrate_one_legacy_entry(uid, &alias, &state.legacy_loader, &mut db)
- .context("Trying to migrate legacy keystore entries.")?;
- if migrated {
+ let imported =
+ Self::import_one_legacy_entry(uid, &alias, &state.legacy_loader, &mut db)
+ .context("Trying to import legacy keystore entries.")?;
+ if imported {
state.recently_imported.insert((uid, alias));
}
- Ok(migrated)
+ Ok(imported)
})
.context("In get_legacy.")
}
+ fn remove_legacy(&self, uid: u32, alias: &str) -> Result<bool> {
+ let alias = alias.to_string();
+ self.do_serialized(move |state| {
+ if state.recently_imported.contains(&(uid, alias.clone())) {
+ return Ok(false);
+ }
+ state
+ .legacy_loader
+ .remove_legacy_keystore_entry(uid, &alias)
+ .context("Trying to remove legacy entry.")
+ })
+ }
+
fn bulk_delete_uid(&self, uid: u32) -> Result<()> {
self.do_serialized(move |state| {
let entries = state
@@ -470,21 +485,29 @@ impl LegacyKeystore {
})
}
- fn migrate_one_legacy_entry(
+ fn import_one_legacy_entry(
uid: u32,
alias: &str,
legacy_loader: &LegacyBlobLoader,
db: &mut DB,
) -> Result<bool> {
let blob = legacy_loader
- .read_legacy_keystore_entry(uid, alias)
- .context("In migrate_one_legacy_entry: Trying to read legacy keystore entry.")?;
+ .read_legacy_keystore_entry(uid, alias, |ciphertext, iv, tag, _salt, _key_size| {
+ if let Some(key) =
+ SUPER_KEY.get_per_boot_key_by_user_id(uid_to_android_user(uid as u32))
+ {
+ key.decrypt(ciphertext, iv, tag)
+ } else {
+ Err(Error::sys()).context("No key found for user. Device may be locked.")
+ }
+ })
+ .context("In import_one_legacy_entry: Trying to read legacy keystore entry.")?;
if let Some(entry) = blob {
db.put(uid, alias, &entry)
- .context("In migrate_one_legacy_entry: Trying to insert entry into DB.")?;
+ .context("In import_one_legacy_entry: Trying to insert entry into DB.")?;
legacy_loader
.remove_legacy_keystore_entry(uid, alias)
- .context("In migrate_one_legacy_entry: Trying to delete legacy keystore entry.")?;
+ .context("In import_one_legacy_entry: Trying to delete legacy keystore entry.")?;
Ok(true)
} else {
Ok(false)
diff --git a/keystore2/src/attestation_key_utils.rs b/keystore2/src/attestation_key_utils.rs
index ca00539b..b2bc86c2 100644
--- a/keystore2/src/attestation_key_utils.rs
+++ b/keystore2/src/attestation_key_utils.rs
@@ -35,6 +35,7 @@ use keystore2_crypto::parse_subject_from_certificate;
/// handled quite differently, thus the different representations.
pub enum AttestationKeyInfo {
RemoteProvisioned {
+ key_id_guard: KeyIdGuard,
attestation_key: AttestationKey,
attestation_certs: Certificate,
},
@@ -66,8 +67,12 @@ pub fn get_attest_key_info(
"Trying to get remotely provisioned attestation key."
))
.map(|result| {
- result.map(|(attestation_key, attestation_certs)| {
- AttestationKeyInfo::RemoteProvisioned { attestation_key, attestation_certs }
+ result.map(|(key_id_guard, attestation_key, attestation_certs)| {
+ AttestationKeyInfo::RemoteProvisioned {
+ key_id_guard,
+ attestation_key,
+ attestation_certs,
+ }
})
}),
None => Ok(None),
diff --git a/keystore2/src/authorization.rs b/keystore2/src/authorization.rs
index 777089f4..e2058834 100644
--- a/keystore2/src/authorization.rs
+++ b/keystore2/src/authorization.rs
@@ -15,7 +15,7 @@
//! This module implements IKeystoreAuthorization AIDL interface.
use crate::error::Error as KeystoreError;
-use crate::globals::{ENFORCEMENTS, SUPER_KEY, DB, LEGACY_MIGRATOR};
+use crate::globals::{ENFORCEMENTS, SUPER_KEY, DB, LEGACY_IMPORTER};
use crate::permission::KeystorePerm;
use crate::super_key::UserState;
use crate::utils::{check_keystore_permission, watchdog as wd};
@@ -161,7 +161,7 @@ impl AuthorizationManager {
.with(|db| {
UserState::get_with_password_unlock(
&mut db.borrow_mut(),
- &LEGACY_MIGRATOR,
+ &LEGACY_IMPORTER,
&SUPER_KEY,
user_id as u32,
&password,
diff --git a/keystore2/src/database.rs b/keystore2/src/database.rs
index de233289..e1a704c5 100644
--- a/keystore2/src/database.rs
+++ b/keystore2/src/database.rs
@@ -323,6 +323,8 @@ pub static KEYSTORE_UUID: Uuid = Uuid([
0x41, 0xe3, 0xb9, 0xce, 0x27, 0x58, 0x4e, 0x91, 0xbc, 0xfd, 0xa5, 0x5d, 0x91, 0x85, 0xab, 0x11,
]);
+static EXPIRATION_BUFFER_MS: i64 = 20000;
+
/// Indicates how the sensitive part of this key blob is encrypted.
#[derive(Debug, Eq, PartialEq, Ord, PartialOrd)]
pub enum EncryptedBy {
@@ -578,6 +580,36 @@ pub struct CertificateInfo {
cert_chain: Option<Vec<u8>>,
}
+/// This type represents a Blob with its metadata and an optional superseded blob.
+#[derive(Debug)]
+pub struct BlobInfo<'a> {
+ blob: &'a [u8],
+ metadata: &'a BlobMetaData,
+ /// Superseded blobs are an artifact of legacy import. In some rare occasions
+ /// the key blob needs to be upgraded during import. In that case two
+ /// blob are imported, the superseded one will have to be imported first,
+ /// so that the garbage collector can reap it.
+ superseded_blob: Option<(&'a [u8], &'a BlobMetaData)>,
+}
+
+impl<'a> BlobInfo<'a> {
+ /// Create a new instance of blob info with blob and corresponding metadata
+ /// and no superseded blob info.
+ pub fn new(blob: &'a [u8], metadata: &'a BlobMetaData) -> Self {
+ Self { blob, metadata, superseded_blob: None }
+ }
+
+ /// Create a new instance of blob info with blob and corresponding metadata
+ /// as well as superseded blob info.
+ pub fn new_with_superseded(
+ blob: &'a [u8],
+ metadata: &'a BlobMetaData,
+ superseded_blob: Option<(&'a [u8], &'a BlobMetaData)>,
+ ) -> Self {
+ Self { blob, metadata, superseded_blob }
+ }
+}
+
impl CertificateInfo {
/// Constructs a new CertificateInfo object from `cert` and `cert_chain`
pub fn new(cert: Option<Vec<u8>>, cert_chain: Option<Vec<u8>>) -> Self {
@@ -1909,8 +1941,11 @@ impl KeystoreDB {
)?
.collect::<rusqlite::Result<Vec<(i64, DateTime)>>>()
.context("Failed to get date metadata")?;
+ // Calculate curr_time with a discount factor to avoid a key that's milliseconds away
+ // from expiration dodging this delete call.
let curr_time = DateTime::from_millis_epoch(
- SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64,
+ SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64
+ + EXPIRATION_BUFFER_MS,
);
let mut num_deleted = 0;
for id in key_ids_to_check.iter().filter(|kt| kt.1 < curr_time).map(|kt| kt.0) {
@@ -2019,6 +2054,41 @@ impl KeystoreDB {
.context("In get_attestation_pool_status: ")
}
+ fn query_kid_for_attestation_key_and_cert_chain(
+ &self,
+ tx: &Transaction,
+ domain: Domain,
+ namespace: i64,
+ km_uuid: &Uuid,
+ ) -> Result<Option<i64>> {
+ let mut stmt = tx.prepare(
+ "SELECT id
+ FROM persistent.keyentry
+ WHERE key_type = ?
+ AND domain = ?
+ AND namespace = ?
+ AND state = ?
+ AND km_uuid = ?;",
+ )?;
+ let rows = stmt
+ .query_map(
+ params![
+ KeyType::Attestation,
+ domain.0 as u32,
+ namespace,
+ KeyLifeCycle::Live,
+ km_uuid
+ ],
+ |row| row.get(0),
+ )?
+ .collect::<rusqlite::Result<Vec<i64>>>()
+ .context("query failed.")?;
+ if rows.is_empty() {
+ return Ok(None);
+ }
+ Ok(Some(rows[0]))
+ }
+
/// Fetches the private key and corresponding certificate chain assigned to a
/// domain/namespace pair. Will either return nothing if the domain/namespace is
/// not assigned, or one CertificateChain.
@@ -2027,7 +2097,7 @@ impl KeystoreDB {
domain: Domain,
namespace: i64,
km_uuid: &Uuid,
- ) -> Result<Option<CertificateChain>> {
+ ) -> Result<Option<(KeyIdGuard, CertificateChain)>> {
let _wp = wd::watch_millis("KeystoreDB::retrieve_attestation_key_and_cert_chain", 500);
match domain {
@@ -2037,69 +2107,70 @@ impl KeystoreDB {
.context(format!("Domain {:?} must be either App or SELinux.", domain));
}
}
- self.with_transaction(TransactionBehavior::Deferred, |tx| {
- let mut stmt = tx.prepare(
- "SELECT subcomponent_type, blob
- FROM persistent.blobentry
- WHERE keyentryid IN
- (SELECT id
- FROM persistent.keyentry
- WHERE key_type = ?
- AND domain = ?
- AND namespace = ?
- AND state = ?
- AND km_uuid = ?);",
- )?;
- let rows = stmt
- .query_map(
- params![
- KeyType::Attestation,
- domain.0 as u32,
- namespace,
- KeyLifeCycle::Live,
- km_uuid
- ],
- |row| Ok((row.get(0)?, row.get(1)?)),
- )?
- .collect::<rusqlite::Result<Vec<(SubComponentType, Vec<u8>)>>>()
- .context("query failed.")?;
- if rows.is_empty() {
- return Ok(None).no_gc();
- } else if rows.len() != 3 {
- return Err(KsError::sys()).context(format!(
- concat!(
- "Expected to get a single attestation",
- "key, cert, and cert chain for a total of 3 entries, but instead got {}."
- ),
- rows.len()
- ));
- }
- let mut km_blob: Vec<u8> = Vec::new();
- let mut cert_chain_blob: Vec<u8> = Vec::new();
- let mut batch_cert_blob: Vec<u8> = Vec::new();
- for row in rows {
- let sub_type: SubComponentType = row.0;
- match sub_type {
- SubComponentType::KEY_BLOB => {
- km_blob = row.1;
- }
- SubComponentType::CERT_CHAIN => {
- cert_chain_blob = row.1;
- }
- SubComponentType::CERT => {
- batch_cert_blob = row.1;
- }
- _ => Err(KsError::sys()).context("Unknown or incorrect subcomponent type.")?,
+
+ self.delete_expired_attestation_keys().context(
+ "In retrieve_attestation_key_and_cert_chain: failed to prune expired attestation keys",
+ )?;
+ let tx = self.conn.unchecked_transaction().context(
+ "In retrieve_attestation_key_and_cert_chain: Failed to initialize transaction.",
+ )?;
+ let key_id: i64;
+ match self.query_kid_for_attestation_key_and_cert_chain(&tx, domain, namespace, km_uuid)? {
+ None => return Ok(None),
+ Some(kid) => key_id = kid,
+ }
+ tx.commit()
+ .context("In retrieve_attestation_key_and_cert_chain: Failed to commit keyid query")?;
+ let key_id_guard = KEY_ID_LOCK.get(key_id);
+ let tx = self.conn.unchecked_transaction().context(
+ "In retrieve_attestation_key_and_cert_chain: Failed to initialize transaction.",
+ )?;
+ let mut stmt = tx.prepare(
+ "SELECT subcomponent_type, blob
+ FROM persistent.blobentry
+ WHERE keyentryid = ?;",
+ )?;
+ let rows = stmt
+ .query_map(params![key_id_guard.id()], |row| Ok((row.get(0)?, row.get(1)?)))?
+ .collect::<rusqlite::Result<Vec<(SubComponentType, Vec<u8>)>>>()
+ .context("query failed.")?;
+ if rows.is_empty() {
+ return Ok(None);
+ } else if rows.len() != 3 {
+ return Err(KsError::sys()).context(format!(
+ concat!(
+ "Expected to get a single attestation",
+ "key, cert, and cert chain for a total of 3 entries, but instead got {}."
+ ),
+ rows.len()
+ ));
+ }
+ let mut km_blob: Vec<u8> = Vec::new();
+ let mut cert_chain_blob: Vec<u8> = Vec::new();
+ let mut batch_cert_blob: Vec<u8> = Vec::new();
+ for row in rows {
+ let sub_type: SubComponentType = row.0;
+ match sub_type {
+ SubComponentType::KEY_BLOB => {
+ km_blob = row.1;
+ }
+ SubComponentType::CERT_CHAIN => {
+ cert_chain_blob = row.1;
+ }
+ SubComponentType::CERT => {
+ batch_cert_blob = row.1;
}
+ _ => Err(KsError::sys()).context("Unknown or incorrect subcomponent type.")?,
}
- Ok(Some(CertificateChain {
+ }
+ Ok(Some((
+ key_id_guard,
+ CertificateChain {
private_key: ZVec::try_from(km_blob)?,
batch_cert: batch_cert_blob,
cert_chain: cert_chain_blob,
- }))
- .no_gc()
- })
- .context("In retrieve_attestation_key_and_cert_chain:")
+ },
+ )))
}
/// Updates the alias column of the given key id `newid` with the given alias,
@@ -2233,7 +2304,7 @@ impl KeystoreDB {
key: &KeyDescriptor,
key_type: KeyType,
params: &[KeyParameter],
- blob_info: &(&[u8], &BlobMetaData),
+ blob_info: &BlobInfo,
cert_info: &CertificateInfo,
metadata: &KeyMetaData,
km_uuid: &Uuid,
@@ -2253,7 +2324,27 @@ impl KeystoreDB {
self.with_transaction(TransactionBehavior::Immediate, |tx| {
let key_id = Self::create_key_entry_internal(tx, &domain, namespace, key_type, km_uuid)
.context("Trying to create new key entry.")?;
- let (blob, blob_metadata) = *blob_info;
+ let BlobInfo { blob, metadata: blob_metadata, superseded_blob } = *blob_info;
+
+ // In some occasions the key blob is already upgraded during the import.
+ // In order to make sure it gets properly deleted it is inserted into the
+ // database here and then immediately replaced by the superseding blob.
+ // The garbage collector will then subject the blob to deleteKey of the
+ // KM back end to permanently invalidate the key.
+ let need_gc = if let Some((blob, blob_metadata)) = superseded_blob {
+ Self::set_blob_internal(
+ tx,
+ key_id.id(),
+ SubComponentType::KEY_BLOB,
+ Some(blob),
+ Some(blob_metadata),
+ )
+ .context("Trying to insert superseded key blob.")?;
+ true
+ } else {
+ false
+ };
+
Self::set_blob_internal(
tx,
key_id.id(),
@@ -2280,7 +2371,8 @@ impl KeystoreDB {
.context("Trying to insert key parameters.")?;
metadata.store_in_db(key_id.id(), tx).context("Trying to insert key metadata.")?;
let need_gc = Self::rebind_alias(tx, &key_id, &alias, &domain, namespace, key_type)
- .context("Trying to rebind alias.")?;
+ .context("Trying to rebind alias.")?
+ || need_gc;
Ok(key_id).do_gc(need_gc)
})
.context("In store_new_key.")
@@ -3235,6 +3327,7 @@ mod tests {
use std::sync::Arc;
use std::thread;
use std::time::{Duration, SystemTime};
+ use crate::utils::AesGcm;
#[cfg(disabled)]
use std::time::Instant;
@@ -3457,7 +3550,10 @@ mod tests {
#[test]
fn test_store_signed_attestation_certificate_chain() -> Result<()> {
let mut db = new_test_db()?;
- let expiration_date: i64 = 20;
+ let expiration_date: i64 =
+ SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64
+ + EXPIRATION_BUFFER_MS
+ + 10000;
let namespace: i64 = 30;
let base_byte: u8 = 1;
let loaded_values =
@@ -3465,7 +3561,7 @@ mod tests {
let chain =
db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
assert_eq!(true, chain.is_some());
- let cert_chain = chain.unwrap();
+ let (_, cert_chain) = chain.unwrap();
assert_eq!(cert_chain.private_key.to_vec(), loaded_values.priv_key);
assert_eq!(cert_chain.batch_cert, loaded_values.batch_cert);
assert_eq!(cert_chain.cert_chain, loaded_values.cert_chain);
@@ -3534,7 +3630,9 @@ mod tests {
TempDir::new("test_remove_expired_certs_").expect("Failed to create temp dir.");
let mut db = new_test_db_with_gc(temp_dir.path(), |_, _| Ok(()))?;
let expiration_date: i64 =
- SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64 + 10000;
+ SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64
+ + EXPIRATION_BUFFER_MS
+ + 10000;
let namespace: i64 = 30;
let namespace_del1: i64 = 45;
let namespace_del2: i64 = 60;
@@ -3545,7 +3643,7 @@ mod tests {
0x01, /* base_byte */
)?;
load_attestation_key_pool(&mut db, 45, namespace_del1, 0x02)?;
- load_attestation_key_pool(&mut db, 60, namespace_del2, 0x03)?;
+ load_attestation_key_pool(&mut db, expiration_date - 10001, namespace_del2, 0x03)?;
let blob_entry_row_count: u32 = db
.conn
@@ -3560,7 +3658,7 @@ mod tests {
let mut cert_chain =
db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace, &KEYSTORE_UUID)?;
assert!(cert_chain.is_some());
- let value = cert_chain.unwrap();
+ let (_, value) = cert_chain.unwrap();
assert_eq!(entry_values.batch_cert, value.batch_cert);
assert_eq!(entry_values.cert_chain, value.cert_chain);
assert_eq!(entry_values.priv_key, value.private_key.to_vec());
@@ -3592,6 +3690,73 @@ mod tests {
Ok(())
}
+ fn compare_rem_prov_values(
+ expected: &RemoteProvValues,
+ actual: Option<(KeyIdGuard, CertificateChain)>,
+ ) {
+ assert!(actual.is_some());
+ let (_, value) = actual.unwrap();
+ assert_eq!(expected.batch_cert, value.batch_cert);
+ assert_eq!(expected.cert_chain, value.cert_chain);
+ assert_eq!(expected.priv_key, value.private_key.to_vec());
+ }
+
+ #[test]
+ fn test_dont_remove_valid_certs() -> Result<()> {
+ let temp_dir =
+ TempDir::new("test_remove_expired_certs_").expect("Failed to create temp dir.");
+ let mut db = new_test_db_with_gc(temp_dir.path(), |_, _| Ok(()))?;
+ let expiration_date: i64 =
+ SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?.as_millis() as i64
+ + EXPIRATION_BUFFER_MS
+ + 10000;
+ let namespace1: i64 = 30;
+ let namespace2: i64 = 45;
+ let namespace3: i64 = 60;
+ let entry_values1 = load_attestation_key_pool(
+ &mut db,
+ expiration_date,
+ namespace1,
+ 0x01, /* base_byte */
+ )?;
+ let entry_values2 =
+ load_attestation_key_pool(&mut db, expiration_date + 40000, namespace2, 0x02)?;
+ let entry_values3 =
+ load_attestation_key_pool(&mut db, expiration_date - 9000, namespace3, 0x03)?;
+
+ let blob_entry_row_count: u32 = db
+ .conn
+ .query_row("SELECT COUNT(id) FROM persistent.blobentry;", NO_PARAMS, |row| row.get(0))
+ .expect("Failed to get blob entry row count.");
+ // We expect 9 rows here because there are three blobs per attestation key, i.e.,
+ // one key, one certificate chain, and one certificate.
+ assert_eq!(blob_entry_row_count, 9);
+
+ let mut cert_chain =
+ db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace1, &KEYSTORE_UUID)?;
+ compare_rem_prov_values(&entry_values1, cert_chain);
+
+ cert_chain =
+ db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace2, &KEYSTORE_UUID)?;
+ compare_rem_prov_values(&entry_values2, cert_chain);
+
+ cert_chain =
+ db.retrieve_attestation_key_and_cert_chain(Domain::APP, namespace3, &KEYSTORE_UUID)?;
+ compare_rem_prov_values(&entry_values3, cert_chain);
+
+ // Give the garbage collector half a second to catch up.
+ std::thread::sleep(Duration::from_millis(500));
+
+ let blob_entry_row_count: u32 = db
+ .conn
+ .query_row("SELECT COUNT(id) FROM persistent.blobentry;", NO_PARAMS, |row| row.get(0))
+ .expect("Failed to get blob entry row count.");
+ // There shound be 9 blob entries left, because all three keys are valid with
+ // three blobs each.
+ assert_eq!(blob_entry_row_count, 9);
+
+ Ok(())
+ }
#[test]
fn test_delete_all_attestation_keys() -> Result<()> {
let mut db = new_test_db()?;
@@ -5561,8 +5726,7 @@ mod tests {
None,
)?;
- let decrypted_secret_bytes =
- loaded_super_key.aes_gcm_decrypt(&encrypted_secret, &iv, &tag)?;
+ let decrypted_secret_bytes = loaded_super_key.decrypt(&encrypted_secret, &iv, &tag)?;
assert_eq!(secret_bytes, &*decrypted_secret_bytes);
Ok(())
diff --git a/keystore2/src/globals.rs b/keystore2/src/globals.rs
index 8212213e..1111d756 100644
--- a/keystore2/src/globals.rs
+++ b/keystore2/src/globals.rs
@@ -18,7 +18,7 @@
use crate::gc::Gc;
use crate::legacy_blob::LegacyBlobLoader;
-use crate::legacy_migrator::LegacyMigrator;
+use crate::legacy_importer::LegacyImporter;
use crate::super_key::SuperKeyManager;
use crate::utils::watchdog as wd;
use crate::utils::Asp;
@@ -158,8 +158,8 @@ lazy_static! {
pub static ref LEGACY_BLOB_LOADER: Arc<LegacyBlobLoader> = Arc::new(LegacyBlobLoader::new(
&DB_PATH.read().expect("Could not get the database path for legacy blob loader.")));
/// Legacy migrator. Atomically migrates legacy blobs to the database.
- pub static ref LEGACY_MIGRATOR: Arc<LegacyMigrator> =
- Arc::new(LegacyMigrator::new(Arc::new(Default::default())));
+ pub static ref LEGACY_IMPORTER: Arc<LegacyImporter> =
+ Arc::new(LegacyImporter::new(Arc::new(Default::default())));
/// Background thread which handles logging via statsd and logd
pub static ref LOGS_HANDLER: Arc<AsyncTask> = Default::default();
diff --git a/keystore2/src/legacy_blob.rs b/keystore2/src/legacy_blob.rs
index 6b16d2e0..8cbcda1d 100644
--- a/keystore2/src/legacy_blob.rs
+++ b/keystore2/src/legacy_blob.rs
@@ -17,8 +17,8 @@
use crate::{
error::{Error as KsError, ResponseCode},
key_parameter::{KeyParameter, KeyParameterValue},
- super_key::SuperKeyManager,
utils::uid_to_android_user,
+ utils::AesGcm,
};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
SecurityLevel::SecurityLevel, Tag::Tag, TagType::TagType,
@@ -26,6 +26,7 @@ use android_hardware_security_keymint::aidl::android::hardware::security::keymin
use anyhow::{Context, Result};
use keystore2_crypto::{aes_gcm_decrypt, Password, ZVec};
use std::collections::{HashMap, HashSet};
+use std::sync::Arc;
use std::{convert::TryInto, fs::File, path::Path, path::PathBuf};
use std::{
fs,
@@ -87,6 +88,14 @@ pub enum Error {
/// an invalid alias filename encoding.
#[error("Invalid alias filename encoding.")]
BadEncoding,
+ /// A component of the requested entry other than the KM key blob itself
+ /// was encrypted and no super key was provided.
+ #[error("Locked entry component.")]
+ LockedComponent,
+ /// The uids presented to move_keystore_entry belonged to different
+ /// Android users.
+ #[error("Cannot move keys across Android users.")]
+ AndroidUserMismatch,
}
/// The blob payload, optionally with all information required to decrypt it.
@@ -96,6 +105,16 @@ pub enum BlobValue {
Generic(Vec<u8>),
/// A legacy key characteristics file. This has only a single list of Authorizations.
Characteristics(Vec<u8>),
+ /// A legacy key characteristics file. This has only a single list of Authorizations.
+ /// Additionally, this characteristics file was encrypted with the user's super key.
+ EncryptedCharacteristics {
+ /// Initialization vector.
+ iv: Vec<u8>,
+ /// Aead tag for integrity verification.
+ tag: Vec<u8>,
+ /// Ciphertext.
+ data: Vec<u8>,
+ },
/// A key characteristics cache has both a hardware enforced and a software enforced list
/// of authorizations.
CharacteristicsCache(Vec<u8>),
@@ -124,6 +143,17 @@ pub enum BlobValue {
/// Ciphertext.
data: Vec<u8>,
},
+ /// An encrypted blob. Includes the initialization vector, the aead tag, and the
+ /// ciphertext data. The key can be selected from context, i.e., the owner of the key
+ /// blob. This is a special case for generic encrypted blobs as opposed to key blobs.
+ EncryptedGeneric {
+ /// Initialization vector.
+ iv: Vec<u8>,
+ /// Aead tag for integrity verification.
+ tag: Vec<u8>,
+ /// Ciphertext.
+ data: Vec<u8>,
+ },
/// Holds the plaintext key blob either after unwrapping an encrypted blob or when the
/// blob was stored in "plaintext" on disk. The "plaintext" of a key blob is not actual
/// plaintext because all KeyMint blobs are encrypted with a device bound key. The key
@@ -132,6 +162,19 @@ pub enum BlobValue {
Decrypted(ZVec),
}
+/// Keystore used two different key characteristics file formats in the past.
+/// The key characteristics cache which superseded the characteristics file.
+/// The latter stored only one list of key parameters, while the former stored
+/// a hardware enforced and a software enforced list. This Enum indicates which
+/// type was read from the file system.
+#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)]
+pub enum LegacyKeyCharacteristics {
+ /// A characteristics cache was read.
+ Cache(Vec<KeyParameter>),
+ /// A characteristics file was read.
+ File(Vec<KeyParameter>),
+}
+
/// Represents a loaded legacy key blob file.
#[derive(Debug, Eq, PartialEq)]
pub struct Blob {
@@ -169,6 +212,16 @@ fn read_ne_i64(stream: &mut dyn Read) -> Result<i64> {
}
impl Blob {
+ /// Creates a new blob from flags and value.
+ pub fn new(flags: u8, value: BlobValue) -> Self {
+ Self { flags, value }
+ }
+
+ /// Return the raw flags of this Blob.
+ pub fn get_flags(&self) -> u8 {
+ self.flags
+ }
+
/// This blob was generated with a fallback software KM device.
pub fn is_fallback(&self) -> bool {
self.flags & flags::FALLBACK != 0
@@ -212,10 +265,14 @@ impl LegacyBlobLoader {
// version (1 Byte)
// blob_type (1 Byte)
// flags (1 Byte)
- // info (1 Byte)
+ // info (1 Byte) Size of an info field appended to the blob.
// initialization_vector (16 Bytes)
// integrity (MD5 digest or gcm tag) (16 Bytes)
// length (4 Bytes)
+ //
+ // The info field is used to store the salt for password encrypted blobs.
+ // The beginning of the info field can be computed from the file length
+ // and the info byte from the header: <file length> - <info> bytes.
const COMMON_HEADER_SIZE: usize = 4 + Self::IV_SIZE + Self::GCM_TAG_LENGTH + 4;
const VERSION_OFFSET: usize = 0;
@@ -341,12 +398,28 @@ impl LegacyBlobLoader {
let tag = &buffer[Self::AEAD_TAG_OFFSET..Self::AEAD_TAG_OFFSET + Self::GCM_TAG_LENGTH];
match (blob_type, is_encrypted, salt) {
- (blob_types::GENERIC, _, _) => {
+ (blob_types::GENERIC, false, _) => {
Ok(Blob { flags, value: BlobValue::Generic(value.to_vec()) })
}
- (blob_types::KEY_CHARACTERISTICS, _, _) => {
+ (blob_types::GENERIC, true, _) => Ok(Blob {
+ flags,
+ value: BlobValue::EncryptedGeneric {
+ iv: iv.to_vec(),
+ tag: tag.to_vec(),
+ data: value.to_vec(),
+ },
+ }),
+ (blob_types::KEY_CHARACTERISTICS, false, _) => {
Ok(Blob { flags, value: BlobValue::Characteristics(value.to_vec()) })
}
+ (blob_types::KEY_CHARACTERISTICS, true, _) => Ok(Blob {
+ flags,
+ value: BlobValue::EncryptedCharacteristics {
+ iv: iv.to_vec(),
+ tag: tag.to_vec(),
+ data: value.to_vec(),
+ },
+ }),
(blob_types::KEY_CHARACTERISTICS_CACHE, _, _) => {
Ok(Blob { flags, value: BlobValue::CharacteristicsCache(value.to_vec()) })
}
@@ -427,6 +500,15 @@ impl LegacyBlobLoader {
.context("In new_from_stream_decrypt_with.")?,
),
}),
+ BlobValue::EncryptedGeneric { iv, tag, data } => Ok(Blob {
+ flags: blob.flags,
+ value: BlobValue::Generic(
+ decrypt(data, iv, tag, None, None)
+ .context("In new_from_stream_decrypt_with.")?[..]
+ .to_vec(),
+ ),
+ }),
+
_ => Ok(blob),
}
}
@@ -546,24 +628,91 @@ impl LegacyBlobLoader {
Ok(params)
}
+ /// This function takes a Blob and an optional AesGcm. Plain text blob variants are
+ /// passed through as is. If a super key is given an attempt is made to decrypt the
+ /// blob thereby mapping BlobValue variants as follows:
+ /// BlobValue::Encrypted => BlobValue::Decrypted
+ /// BlobValue::EncryptedGeneric => BlobValue::Generic
+ /// BlobValue::EncryptedCharacteristics => BlobValue::Characteristics
+ /// If now super key is given or BlobValue::PwEncrypted is encountered,
+ /// Err(Error::LockedComponent) is returned.
+ fn decrypt_if_required(super_key: &Option<Arc<dyn AesGcm>>, blob: Blob) -> Result<Blob> {
+ match blob {
+ Blob { value: BlobValue::Generic(_), .. }
+ | Blob { value: BlobValue::Characteristics(_), .. }
+ | Blob { value: BlobValue::CharacteristicsCache(_), .. }
+ | Blob { value: BlobValue::Decrypted(_), .. } => Ok(blob),
+ Blob { value: BlobValue::EncryptedCharacteristics { iv, tag, data }, flags }
+ if super_key.is_some() =>
+ {
+ Ok(Blob {
+ value: BlobValue::Characteristics(
+ super_key.as_ref().unwrap().decrypt(&data, &iv, &tag).context(
+ "In decrypt_if_required: Failed to decrypt EncryptedCharacteristics",
+ )?[..]
+ .to_vec(),
+ ),
+ flags,
+ })
+ }
+ Blob { value: BlobValue::Encrypted { iv, tag, data }, flags }
+ if super_key.is_some() =>
+ {
+ Ok(Blob {
+ value: BlobValue::Decrypted(
+ super_key
+ .as_ref()
+ .unwrap()
+ .decrypt(&data, &iv, &tag)
+ .context("In decrypt_if_required: Failed to decrypt Encrypted")?,
+ ),
+ flags,
+ })
+ }
+ Blob { value: BlobValue::EncryptedGeneric { iv, tag, data }, flags }
+ if super_key.is_some() =>
+ {
+ Ok(Blob {
+ value: BlobValue::Generic(
+ super_key
+ .as_ref()
+ .unwrap()
+ .decrypt(&data, &iv, &tag)
+ .context("In decrypt_if_required: Failed to decrypt Encrypted")?[..]
+ .to_vec(),
+ ),
+ flags,
+ })
+ }
+ // This arm catches all encrypted cases where super key is not present or cannot
+ // decrypt the blob, the latter being BlobValue::PwEncrypted.
+ _ => Err(Error::LockedComponent)
+ .context("In decrypt_if_required: Encountered encrypted blob without super key."),
+ }
+ }
+
fn read_characteristics_file(
&self,
uid: u32,
prefix: &str,
alias: &str,
hw_sec_level: SecurityLevel,
- ) -> Result<Vec<KeyParameter>> {
+ super_key: &Option<Arc<dyn AesGcm>>,
+ ) -> Result<LegacyKeyCharacteristics> {
let blob = Self::read_generic_blob(&self.make_chr_filename(uid, alias, prefix))
.context("In read_characteristics_file")?;
let blob = match blob {
- None => return Ok(Vec::new()),
+ None => return Ok(LegacyKeyCharacteristics::Cache(Vec::new())),
Some(blob) => blob,
};
- let mut stream = match blob.value() {
- BlobValue::Characteristics(data) => &data[..],
- BlobValue::CharacteristicsCache(data) => &data[..],
+ let blob = Self::decrypt_if_required(super_key, blob)
+ .context("In read_characteristics_file: Trying to decrypt blob.")?;
+
+ let (mut stream, is_cache) = match blob.value() {
+ BlobValue::Characteristics(data) => (&data[..], false),
+ BlobValue::CharacteristicsCache(data) => (&data[..], true),
_ => {
return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(concat!(
"In read_characteristics_file: ",
@@ -589,7 +738,12 @@ impl LegacyBlobLoader {
.into_iter()
.map(|value| KeyParameter::new(value, SecurityLevel::KEYSTORE));
- Ok(hw_list.into_iter().flatten().chain(sw_list).collect())
+ let params: Vec<KeyParameter> = hw_list.into_iter().flatten().chain(sw_list).collect();
+ if is_cache {
+ Ok(LegacyKeyCharacteristics::Cache(params))
+ } else {
+ Ok(LegacyKeyCharacteristics::File(params))
+ }
}
// This is a list of known prefixes that the Keystore 1.0 SPI used to use.
@@ -639,14 +793,40 @@ impl LegacyBlobLoader {
Ok(Some(Self::new_from_stream(&mut file).context("In read_generic_blob.")?))
}
+ fn read_generic_blob_decrypt_with<F>(path: &Path, decrypt: F) -> Result<Option<Blob>>
+ where
+ F: FnOnce(&[u8], &[u8], &[u8], Option<&[u8]>, Option<usize>) -> Result<ZVec>,
+ {
+ let mut file = match Self::with_retry_interrupted(|| File::open(path)) {
+ Ok(file) => file,
+ Err(e) => match e.kind() {
+ ErrorKind::NotFound => return Ok(None),
+ _ => return Err(e).context("In read_generic_blob_decrypt_with."),
+ },
+ };
+
+ Ok(Some(
+ Self::new_from_stream_decrypt_with(&mut file, decrypt)
+ .context("In read_generic_blob_decrypt_with.")?,
+ ))
+ }
+
/// Read a legacy keystore entry blob.
- pub fn read_legacy_keystore_entry(&self, uid: u32, alias: &str) -> Result<Option<Vec<u8>>> {
+ pub fn read_legacy_keystore_entry<F>(
+ &self,
+ uid: u32,
+ alias: &str,
+ decrypt: F,
+ ) -> Result<Option<Vec<u8>>>
+ where
+ F: FnOnce(&[u8], &[u8], &[u8], Option<&[u8]>, Option<usize>) -> Result<ZVec>,
+ {
let path = match self.make_legacy_keystore_entry_filename(uid, alias) {
Some(path) => path,
None => return Ok(None),
};
- let blob = Self::read_generic_blob(&path)
+ let blob = Self::read_generic_blob_decrypt_with(&path, decrypt)
.context("In read_legacy_keystore_entry: Failed to read blob.")?;
Ok(blob.and_then(|blob| match blob.value {
@@ -659,22 +839,23 @@ impl LegacyBlobLoader {
}
/// Remove a legacy keystore entry by the name alias with owner uid.
- pub fn remove_legacy_keystore_entry(&self, uid: u32, alias: &str) -> Result<()> {
+ pub fn remove_legacy_keystore_entry(&self, uid: u32, alias: &str) -> Result<bool> {
let path = match self.make_legacy_keystore_entry_filename(uid, alias) {
Some(path) => path,
- None => return Ok(()),
+ None => return Ok(false),
};
if let Err(e) = Self::with_retry_interrupted(|| fs::remove_file(path.as_path())) {
match e.kind() {
- ErrorKind::NotFound => return Ok(()),
+ ErrorKind::NotFound => return Ok(false),
_ => return Err(e).context("In remove_legacy_keystore_entry."),
}
}
let user_id = uid_to_android_user(uid);
self.remove_user_dir_if_empty(user_id)
- .context("In remove_legacy_keystore_entry: Trying to remove empty user dir.")
+ .context("In remove_legacy_keystore_entry: Trying to remove empty user dir.")?;
+ Ok(true)
}
/// List all entries belonging to the given uid.
@@ -1004,79 +1185,66 @@ impl LegacyBlobLoader {
&self,
uid: u32,
alias: &str,
- key_manager: Option<&SuperKeyManager>,
- ) -> Result<(Option<(Blob, Vec<KeyParameter>)>, Option<Vec<u8>>, Option<Vec<u8>>)> {
+ super_key: &Option<Arc<dyn AesGcm>>,
+ ) -> Result<(Option<(Blob, LegacyKeyCharacteristics)>, Option<Vec<u8>>, Option<Vec<u8>>)> {
let km_blob = self.read_km_blob_file(uid, alias).context("In load_by_uid_alias.")?;
let km_blob = match km_blob {
Some((km_blob, prefix)) => {
- let km_blob = match km_blob {
- Blob { flags: _, value: BlobValue::Decrypted(_) } => km_blob,
- // Unwrap the key blob if required and if we have key_manager.
- Blob { flags, value: BlobValue::Encrypted { ref iv, ref tag, ref data } } => {
- if let Some(key_manager) = key_manager {
- let decrypted = match key_manager
- .get_per_boot_key_by_user_id(uid_to_android_user(uid))
- {
- Some(key) => key.aes_gcm_decrypt(data, iv, tag).context(
- "In load_by_uid_alias: while trying to decrypt legacy blob.",
- )?,
- None => {
- return Err(KsError::Rc(ResponseCode::LOCKED)).context(format!(
- concat!(
- "In load_by_uid_alias: ",
- "User {} has not unlocked the keystore yet.",
- ),
- uid_to_android_user(uid)
- ))
- }
- };
- Blob { flags, value: BlobValue::Decrypted(decrypted) }
- } else {
- km_blob
- }
- }
- _ => {
- return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
+ let km_blob =
+ match km_blob {
+ Blob { flags: _, value: BlobValue::Decrypted(_) }
+ | Blob { flags: _, value: BlobValue::Encrypted { .. } } => km_blob,
+ _ => return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
"In load_by_uid_alias: Found wrong blob type in legacy key blob file.",
- )
- }
- };
+ ),
+ };
let hw_sec_level = match km_blob.is_strongbox() {
true => SecurityLevel::STRONGBOX,
false => SecurityLevel::TRUSTED_ENVIRONMENT,
};
let key_parameters = self
- .read_characteristics_file(uid, &prefix, alias, hw_sec_level)
+ .read_characteristics_file(uid, &prefix, alias, hw_sec_level, super_key)
.context("In load_by_uid_alias.")?;
Some((km_blob, key_parameters))
}
None => None,
};
- let user_cert =
- match Self::read_generic_blob(&self.make_blob_filename(uid, alias, "USRCERT"))
- .context("In load_by_uid_alias: While loading user cert.")?
- {
- Some(Blob { value: BlobValue::Generic(data), .. }) => Some(data),
- None => None,
- _ => {
- return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED)).context(
- "In load_by_uid_alias: Found unexpected blob type in USRCERT file",
- )
- }
- };
+ let user_cert_blob =
+ Self::read_generic_blob(&self.make_blob_filename(uid, alias, "USRCERT"))
+ .context("In load_by_uid_alias: While loading user cert.")?;
- let ca_cert = match Self::read_generic_blob(&self.make_blob_filename(uid, alias, "CACERT"))
- .context("In load_by_uid_alias: While loading ca cert.")?
- {
- Some(Blob { value: BlobValue::Generic(data), .. }) => Some(data),
- None => None,
- _ => {
+ let user_cert = if let Some(blob) = user_cert_blob {
+ let blob = Self::decrypt_if_required(super_key, blob)
+ .context("In load_by_uid_alias: While decrypting user cert.")?;
+
+ if let Blob { value: BlobValue::Generic(data), .. } = blob {
+ Some(data)
+ } else {
+ return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED))
+ .context("In load_by_uid_alias: Found unexpected blob type in USRCERT file");
+ }
+ } else {
+ None
+ };
+
+ let ca_cert_blob = Self::read_generic_blob(&self.make_blob_filename(uid, alias, "CACERT"))
+ .context("In load_by_uid_alias: While loading ca cert.")?;
+
+ let ca_cert = if let Some(blob) = ca_cert_blob {
+ let blob = Self::decrypt_if_required(super_key, blob)
+ .context("In load_by_uid_alias: While decrypting ca cert.")?;
+
+ if let Blob { value: BlobValue::Generic(data), .. } = blob {
+ Some(data)
+ } else {
return Err(KsError::Rc(ResponseCode::VALUE_CORRUPTED))
- .context("In load_by_uid_alias: Found unexpected blob type in CACERT file")
+ .context("In load_by_uid_alias: Found unexpected blob type in CACERT file");
}
+ } else {
+ None
};
Ok((km_blob, user_cert, ca_cert))
@@ -1139,15 +1307,271 @@ impl LegacyBlobLoader {
#[cfg(test)]
mod test {
+ #![allow(dead_code)]
use super::*;
- use anyhow::anyhow;
- use keystore2_crypto::aes_gcm_decrypt;
+ use keystore2_crypto::{aes_gcm_decrypt, aes_gcm_encrypt};
use rand::Rng;
use std::string::FromUtf8Error;
mod legacy_blob_test_vectors;
- use crate::error;
+ use crate::legacy_blob::blob_types::{
+ GENERIC, KEY_CHARACTERISTICS, KEY_CHARACTERISTICS_CACHE, KM_BLOB, SUPER_KEY,
+ SUPER_KEY_AES256,
+ };
use crate::legacy_blob::test::legacy_blob_test_vectors::*;
+ use anyhow::{anyhow, Result};
use keystore2_test_utils::TempDir;
+ use std::convert::TryInto;
+ use std::fs::OpenOptions;
+ use std::io::Write;
+ use std::ops::Deref;
+
+ /// This function takes a blob and synchronizes the encrypted/super encrypted flags
+ /// with the blob type for the pairs Generic/EncryptedGeneric,
+ /// Characteristics/EncryptedCharacteristics and Encrypted/Decrypted.
+ /// E.g. if a non encrypted enum variant is encountered with flags::SUPER_ENCRYPTED
+ /// or flags::ENCRYPTED is set, the payload is encrypted and the corresponding
+ /// encrypted variant is returned, and vice versa. All other variants remain untouched
+ /// even if flags and BlobValue variant are inconsistent.
+ fn prepare_blob(blob: Blob, key: &[u8]) -> Result<Blob> {
+ match blob {
+ Blob { value: BlobValue::Generic(data), flags } if blob.is_encrypted() => {
+ let (ciphertext, iv, tag) = aes_gcm_encrypt(&data, key).unwrap();
+ Ok(Blob { value: BlobValue::EncryptedGeneric { data: ciphertext, iv, tag }, flags })
+ }
+ Blob { value: BlobValue::Characteristics(data), flags } if blob.is_encrypted() => {
+ let (ciphertext, iv, tag) = aes_gcm_encrypt(&data, key).unwrap();
+ Ok(Blob {
+ value: BlobValue::EncryptedCharacteristics { data: ciphertext, iv, tag },
+ flags,
+ })
+ }
+ Blob { value: BlobValue::Decrypted(data), flags } if blob.is_encrypted() => {
+ let (ciphertext, iv, tag) = aes_gcm_encrypt(&data, key).unwrap();
+ Ok(Blob { value: BlobValue::Encrypted { data: ciphertext, iv, tag }, flags })
+ }
+ Blob { value: BlobValue::EncryptedGeneric { data, iv, tag }, flags }
+ if !blob.is_encrypted() =>
+ {
+ let plaintext = aes_gcm_decrypt(&data, &iv, &tag, key).unwrap();
+ Ok(Blob { value: BlobValue::Generic(plaintext[..].to_vec()), flags })
+ }
+ Blob { value: BlobValue::EncryptedCharacteristics { data, iv, tag }, flags }
+ if !blob.is_encrypted() =>
+ {
+ let plaintext = aes_gcm_decrypt(&data, &iv, &tag, key).unwrap();
+ Ok(Blob { value: BlobValue::Characteristics(plaintext[..].to_vec()), flags })
+ }
+ Blob { value: BlobValue::Encrypted { data, iv, tag }, flags }
+ if !blob.is_encrypted() =>
+ {
+ let plaintext = aes_gcm_decrypt(&data, &iv, &tag, key).unwrap();
+ Ok(Blob { value: BlobValue::Decrypted(plaintext), flags })
+ }
+ _ => Ok(blob),
+ }
+ }
+
+ struct LegacyBlobHeader {
+ version: u8,
+ blob_type: u8,
+ flags: u8,
+ info: u8,
+ iv: [u8; 12],
+ tag: [u8; 16],
+ blob_size: u32,
+ }
+
+ /// This function takes a Blob and writes it to out as a legacy blob file
+ /// version 3. Note that the flags field and the values field may be
+ /// inconsistent and could be sanitized by this function. It is intentionally
+ /// not done to enable tests to construct malformed blobs.
+ fn write_legacy_blob(out: &mut dyn Write, blob: Blob) -> Result<usize> {
+ let (header, data, salt) = match blob {
+ Blob { value: BlobValue::Generic(data), flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: GENERIC,
+ flags,
+ info: 0,
+ iv: [0u8; 12],
+ tag: [0u8; 16],
+ blob_size: data.len() as u32,
+ },
+ data,
+ None,
+ ),
+ Blob { value: BlobValue::Characteristics(data), flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: KEY_CHARACTERISTICS,
+ flags,
+ info: 0,
+ iv: [0u8; 12],
+ tag: [0u8; 16],
+ blob_size: data.len() as u32,
+ },
+ data,
+ None,
+ ),
+ Blob { value: BlobValue::CharacteristicsCache(data), flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: KEY_CHARACTERISTICS_CACHE,
+ flags,
+ info: 0,
+ iv: [0u8; 12],
+ tag: [0u8; 16],
+ blob_size: data.len() as u32,
+ },
+ data,
+ None,
+ ),
+ Blob { value: BlobValue::PwEncrypted { iv, tag, data, salt, key_size }, flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: if key_size == keystore2_crypto::AES_128_KEY_LENGTH {
+ SUPER_KEY
+ } else {
+ SUPER_KEY_AES256
+ },
+ flags,
+ info: 0,
+ iv: iv.try_into().unwrap(),
+ tag: tag[..].try_into().unwrap(),
+ blob_size: data.len() as u32,
+ },
+ data,
+ Some(salt),
+ ),
+ Blob { value: BlobValue::Encrypted { iv, tag, data }, flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: KM_BLOB,
+ flags,
+ info: 0,
+ iv: iv.try_into().unwrap(),
+ tag: tag[..].try_into().unwrap(),
+ blob_size: data.len() as u32,
+ },
+ data,
+ None,
+ ),
+ Blob { value: BlobValue::EncryptedGeneric { iv, tag, data }, flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: GENERIC,
+ flags,
+ info: 0,
+ iv: iv.try_into().unwrap(),
+ tag: tag[..].try_into().unwrap(),
+ blob_size: data.len() as u32,
+ },
+ data,
+ None,
+ ),
+ Blob { value: BlobValue::EncryptedCharacteristics { iv, tag, data }, flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: KEY_CHARACTERISTICS,
+ flags,
+ info: 0,
+ iv: iv.try_into().unwrap(),
+ tag: tag[..].try_into().unwrap(),
+ blob_size: data.len() as u32,
+ },
+ data,
+ None,
+ ),
+ Blob { value: BlobValue::Decrypted(data), flags } => (
+ LegacyBlobHeader {
+ version: 3,
+ blob_type: KM_BLOB,
+ flags,
+ info: 0,
+ iv: [0u8; 12],
+ tag: [0u8; 16],
+ blob_size: data.len() as u32,
+ },
+ data[..].to_vec(),
+ None,
+ ),
+ };
+ write_legacy_blob_helper(out, &header, &data, salt.as_deref())
+ }
+
+ fn write_legacy_blob_helper(
+ out: &mut dyn Write,
+ header: &LegacyBlobHeader,
+ data: &[u8],
+ info: Option<&[u8]>,
+ ) -> Result<usize> {
+ if 1 != out.write(&[header.version])? {
+ return Err(anyhow!("Unexpected size while writing version."));
+ }
+ if 1 != out.write(&[header.blob_type])? {
+ return Err(anyhow!("Unexpected size while writing blob_type."));
+ }
+ if 1 != out.write(&[header.flags])? {
+ return Err(anyhow!("Unexpected size while writing flags."));
+ }
+ if 1 != out.write(&[header.info])? {
+ return Err(anyhow!("Unexpected size while writing info."));
+ }
+ if 12 != out.write(&header.iv)? {
+ return Err(anyhow!("Unexpected size while writing iv."));
+ }
+ if 4 != out.write(&[0u8; 4])? {
+ return Err(anyhow!("Unexpected size while writing last 4 bytes of iv."));
+ }
+ if 16 != out.write(&header.tag)? {
+ return Err(anyhow!("Unexpected size while writing tag."));
+ }
+ if 4 != out.write(&header.blob_size.to_be_bytes())? {
+ return Err(anyhow!("Unexpected size while writing blob size."));
+ }
+ if data.len() != out.write(data)? {
+ return Err(anyhow!("Unexpected size while writing blob."));
+ }
+ if let Some(info) = info {
+ if info.len() != out.write(info)? {
+ return Err(anyhow!("Unexpected size while writing inof."));
+ }
+ }
+ Ok(40 + data.len() + info.map(|v| v.len()).unwrap_or(0))
+ }
+
+ fn make_encrypted_characteristics_file<P: AsRef<Path>>(path: P, key: &[u8]) -> Result<()> {
+ let mut file = OpenOptions::new().write(true).create_new(true).open(path).unwrap();
+ let blob = Blob {
+ value: BlobValue::Characteristics(KEY_PARAMETERS.to_vec()),
+ flags: flags::ENCRYPTED,
+ };
+ let blob = prepare_blob(blob, key).unwrap();
+ write_legacy_blob(&mut file, blob).unwrap();
+ Ok(())
+ }
+
+ fn make_encrypted_usr_cert_file<P: AsRef<Path>>(path: P, key: &[u8]) -> Result<()> {
+ let mut file = OpenOptions::new().write(true).create_new(true).open(path).unwrap();
+ let blob = Blob {
+ value: BlobValue::Generic(LOADED_CERT_AUTHBOUND.to_vec()),
+ flags: flags::ENCRYPTED,
+ };
+ let blob = prepare_blob(blob, key).unwrap();
+ write_legacy_blob(&mut file, blob).unwrap();
+ Ok(())
+ }
+
+ fn make_encrypted_ca_cert_file<P: AsRef<Path>>(path: P, key: &[u8]) -> Result<()> {
+ let mut file = OpenOptions::new().write(true).create_new(true).open(path).unwrap();
+ let blob = Blob {
+ value: BlobValue::Generic(LOADED_CACERT_AUTHBOUND.to_vec()),
+ flags: flags::ENCRYPTED,
+ };
+ let blob = prepare_blob(blob, key).unwrap();
+ write_legacy_blob(&mut file, blob).unwrap();
+ Ok(())
+ }
#[test]
fn decode_encode_alias_test() {
@@ -1203,7 +1627,8 @@ mod test {
fn read_golden_key_blob_test() -> anyhow::Result<()> {
let blob = LegacyBlobLoader::new_from_stream_decrypt_with(&mut &*BLOB, |_, _, _, _, _| {
Err(anyhow!("should not be called"))
- })?;
+ })
+ .unwrap();
assert!(!blob.is_encrypted());
assert!(!blob.is_fallback());
assert!(!blob.is_strongbox());
@@ -1213,7 +1638,8 @@ mod test {
let blob = LegacyBlobLoader::new_from_stream_decrypt_with(
&mut &*REAL_LEGACY_BLOB,
|_, _, _, _, _| Err(anyhow!("should not be called")),
- )?;
+ )
+ .unwrap();
assert!(!blob.is_encrypted());
assert!(!blob.is_fallback());
assert!(!blob.is_strongbox());
@@ -1301,62 +1727,75 @@ mod test {
#[test]
fn test_legacy_blobs() -> anyhow::Result<()> {
- let temp_dir = TempDir::new("legacy_blob_test")?;
- std::fs::create_dir(&*temp_dir.build().push("user_0"))?;
+ let temp_dir = TempDir::new("legacy_blob_test").unwrap();
+ std::fs::create_dir(&*temp_dir.build().push("user_0")).unwrap();
- std::fs::write(&*temp_dir.build().push("user_0").push(".masterkey"), SUPERKEY)?;
+ std::fs::write(&*temp_dir.build().push("user_0").push(".masterkey"), SUPERKEY).unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push("10223_USRPKEY_authbound"),
USRPKEY_AUTHBOUND,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push(".10223_chr_USRPKEY_authbound"),
USRPKEY_AUTHBOUND_CHR,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push("10223_USRCERT_authbound"),
USRCERT_AUTHBOUND,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push("10223_CACERT_authbound"),
CACERT_AUTHBOUND,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push("10223_USRPKEY_non_authbound"),
USRPKEY_NON_AUTHBOUND,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push(".10223_chr_USRPKEY_non_authbound"),
USRPKEY_NON_AUTHBOUND_CHR,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push("10223_USRCERT_non_authbound"),
USRCERT_NON_AUTHBOUND,
- )?;
+ )
+ .unwrap();
std::fs::write(
&*temp_dir.build().push("user_0").push("10223_CACERT_non_authbound"),
CACERT_NON_AUTHBOUND,
- )?;
+ )
+ .unwrap();
- let key_manager: SuperKeyManager = Default::default();
- let mut db = crate::database::KeystoreDB::new(temp_dir.path(), None)?;
let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
- assert_eq!(
- legacy_blob_loader
- .load_by_uid_alias(10223, "authbound", Some(&key_manager))
- .unwrap_err()
- .root_cause()
- .downcast_ref::<error::Error>(),
- Some(&error::Error::Rc(ResponseCode::LOCKED))
- );
-
- key_manager.unlock_user_key(&mut db, 0, &(PASSWORD.into()), &legacy_blob_loader)?;
+ if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain)) =
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &None)?
+ {
+ assert_eq!(flags, 4);
+ assert_eq!(
+ value,
+ BlobValue::Encrypted {
+ data: USRPKEY_AUTHBOUND_ENC_PAYLOAD.to_vec(),
+ iv: USRPKEY_AUTHBOUND_IV.to_vec(),
+ tag: USRPKEY_AUTHBOUND_TAG.to_vec()
+ }
+ );
+ assert_eq!(&cert[..], LOADED_CERT_AUTHBOUND);
+ assert_eq!(&chain[..], LOADED_CACERT_AUTHBOUND);
+ } else {
+ panic!("");
+ }
if let (Some((Blob { flags, value: _ }, _params)), Some(cert), Some(chain)) =
- legacy_blob_loader.load_by_uid_alias(10223, "authbound", Some(&key_manager))?
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &None)?
{
assert_eq!(flags, 4);
//assert_eq!(value, BlobValue::Encrypted(..));
@@ -1366,7 +1805,7 @@ mod test {
panic!("");
}
if let (Some((Blob { flags, value }, _params)), Some(cert), Some(chain)) =
- legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", Some(&key_manager))?
+ legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", &None)?
{
assert_eq!(flags, 0);
assert_eq!(value, BlobValue::Decrypted(LOADED_USRPKEY_NON_AUTHBOUND.try_into()?));
@@ -1383,11 +1822,11 @@ mod test {
assert_eq!(
(None, None, None),
- legacy_blob_loader.load_by_uid_alias(10223, "authbound", Some(&key_manager))?
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &None)?
);
assert_eq!(
(None, None, None),
- legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", Some(&key_manager))?
+ legacy_blob_loader.load_by_uid_alias(10223, "non_authbound", &None)?
);
// The database should not be empty due to the super key.
@@ -1406,9 +1845,196 @@ mod test {
Ok(())
}
+ struct TestKey(ZVec);
+
+ impl crate::utils::AesGcmKey for TestKey {
+ fn key(&self) -> &[u8] {
+ &self.0
+ }
+ }
+
+ impl Deref for TestKey {
+ type Target = [u8];
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+ }
+
+ #[test]
+ fn test_with_encrypted_characteristics() -> anyhow::Result<()> {
+ let temp_dir = TempDir::new("test_with_encrypted_characteristics").unwrap();
+ std::fs::create_dir(&*temp_dir.build().push("user_0")).unwrap();
+
+ let pw: Password = PASSWORD.into();
+ let pw_key = TestKey(pw.derive_key(Some(SUPERKEY_SALT), 32).unwrap());
+ let super_key =
+ Arc::new(TestKey(pw_key.decrypt(SUPERKEY_PAYLOAD, SUPERKEY_IV, SUPERKEY_TAG).unwrap()));
+
+ std::fs::write(&*temp_dir.build().push("user_0").push(".masterkey"), SUPERKEY).unwrap();
+
+ std::fs::write(
+ &*temp_dir.build().push("user_0").push("10223_USRPKEY_authbound"),
+ USRPKEY_AUTHBOUND,
+ )
+ .unwrap();
+ make_encrypted_characteristics_file(
+ &*temp_dir.build().push("user_0").push(".10223_chr_USRPKEY_authbound"),
+ &super_key,
+ )
+ .unwrap();
+ std::fs::write(
+ &*temp_dir.build().push("user_0").push("10223_USRCERT_authbound"),
+ USRCERT_AUTHBOUND,
+ )
+ .unwrap();
+ std::fs::write(
+ &*temp_dir.build().push("user_0").push("10223_CACERT_authbound"),
+ CACERT_AUTHBOUND,
+ )
+ .unwrap();
+
+ let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
+
+ assert_eq!(
+ legacy_blob_loader
+ .load_by_uid_alias(10223, "authbound", &None)
+ .unwrap_err()
+ .root_cause()
+ .downcast_ref::<Error>(),
+ Some(&Error::LockedComponent)
+ );
+
+ assert_eq!(
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &Some(super_key)).unwrap(),
+ (
+ Some((
+ Blob {
+ flags: 4,
+ value: BlobValue::Encrypted {
+ data: USRPKEY_AUTHBOUND_ENC_PAYLOAD.to_vec(),
+ iv: USRPKEY_AUTHBOUND_IV.to_vec(),
+ tag: USRPKEY_AUTHBOUND_TAG.to_vec()
+ }
+ },
+ structured_test_params()
+ )),
+ Some(LOADED_CERT_AUTHBOUND.to_vec()),
+ Some(LOADED_CACERT_AUTHBOUND.to_vec())
+ )
+ );
+
+ legacy_blob_loader.remove_keystore_entry(10223, "authbound").expect("This should succeed.");
+
+ assert_eq!(
+ (None, None, None),
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &None).unwrap()
+ );
+
+ // The database should not be empty due to the super key.
+ assert!(!legacy_blob_loader.is_empty().unwrap());
+ assert!(!legacy_blob_loader.is_empty_user(0).unwrap());
+
+ // The database should be considered empty for user 1.
+ assert!(legacy_blob_loader.is_empty_user(1).unwrap());
+
+ legacy_blob_loader.remove_super_key(0);
+
+ // Now it should be empty.
+ assert!(legacy_blob_loader.is_empty_user(0).unwrap());
+ assert!(legacy_blob_loader.is_empty().unwrap());
+
+ Ok(())
+ }
+
+ #[test]
+ fn test_with_encrypted_certificates() -> anyhow::Result<()> {
+ let temp_dir = TempDir::new("test_with_encrypted_certificates").unwrap();
+ std::fs::create_dir(&*temp_dir.build().push("user_0")).unwrap();
+
+ let pw: Password = PASSWORD.into();
+ let pw_key = TestKey(pw.derive_key(Some(SUPERKEY_SALT), 32).unwrap());
+ let super_key =
+ Arc::new(TestKey(pw_key.decrypt(SUPERKEY_PAYLOAD, SUPERKEY_IV, SUPERKEY_TAG).unwrap()));
+
+ std::fs::write(&*temp_dir.build().push("user_0").push(".masterkey"), SUPERKEY).unwrap();
+
+ std::fs::write(
+ &*temp_dir.build().push("user_0").push("10223_USRPKEY_authbound"),
+ USRPKEY_AUTHBOUND,
+ )
+ .unwrap();
+ std::fs::write(
+ &*temp_dir.build().push("user_0").push(".10223_chr_USRPKEY_authbound"),
+ USRPKEY_AUTHBOUND_CHR,
+ )
+ .unwrap();
+ make_encrypted_usr_cert_file(
+ &*temp_dir.build().push("user_0").push("10223_USRCERT_authbound"),
+ &super_key,
+ )
+ .unwrap();
+ make_encrypted_ca_cert_file(
+ &*temp_dir.build().push("user_0").push("10223_CACERT_authbound"),
+ &super_key,
+ )
+ .unwrap();
+
+ let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
+
+ assert_eq!(
+ legacy_blob_loader
+ .load_by_uid_alias(10223, "authbound", &None)
+ .unwrap_err()
+ .root_cause()
+ .downcast_ref::<Error>(),
+ Some(&Error::LockedComponent)
+ );
+
+ assert_eq!(
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &Some(super_key)).unwrap(),
+ (
+ Some((
+ Blob {
+ flags: 4,
+ value: BlobValue::Encrypted {
+ data: USRPKEY_AUTHBOUND_ENC_PAYLOAD.to_vec(),
+ iv: USRPKEY_AUTHBOUND_IV.to_vec(),
+ tag: USRPKEY_AUTHBOUND_TAG.to_vec()
+ }
+ },
+ structured_test_params_cache()
+ )),
+ Some(LOADED_CERT_AUTHBOUND.to_vec()),
+ Some(LOADED_CACERT_AUTHBOUND.to_vec())
+ )
+ );
+
+ legacy_blob_loader.remove_keystore_entry(10223, "authbound").expect("This should succeed.");
+
+ assert_eq!(
+ (None, None, None),
+ legacy_blob_loader.load_by_uid_alias(10223, "authbound", &None).unwrap()
+ );
+
+ // The database should not be empty due to the super key.
+ assert!(!legacy_blob_loader.is_empty().unwrap());
+ assert!(!legacy_blob_loader.is_empty_user(0).unwrap());
+
+ // The database should be considered empty for user 1.
+ assert!(legacy_blob_loader.is_empty_user(1).unwrap());
+
+ legacy_blob_loader.remove_super_key(0);
+
+ // Now it should be empty.
+ assert!(legacy_blob_loader.is_empty_user(0).unwrap());
+ assert!(legacy_blob_loader.is_empty().unwrap());
+
+ Ok(())
+ }
+
#[test]
fn list_non_existing_user() -> Result<()> {
- let temp_dir = TempDir::new("list_non_existing_user")?;
+ let temp_dir = TempDir::new("list_non_existing_user").unwrap();
let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
assert!(legacy_blob_loader.list_user(20)?.is_empty());
@@ -1418,7 +2044,7 @@ mod test {
#[test]
fn list_legacy_keystore_entries_on_non_existing_user() -> Result<()> {
- let temp_dir = TempDir::new("list_legacy_keystore_entries_on_non_existing_user")?;
+ let temp_dir = TempDir::new("list_legacy_keystore_entries_on_non_existing_user").unwrap();
let legacy_blob_loader = LegacyBlobLoader::new(temp_dir.path());
assert!(legacy_blob_loader.list_legacy_keystore_entries_for_user(20)?.is_empty());
diff --git a/keystore2/src/legacy_blob/test/legacy_blob_test_vectors.rs b/keystore2/src/legacy_blob/test/legacy_blob_test_vectors.rs
index 14bd40ca..2049ac2c 100644
--- a/keystore2/src/legacy_blob/test/legacy_blob_test_vectors.rs
+++ b/keystore2/src/legacy_blob/test/legacy_blob_test_vectors.rs
@@ -12,6 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+use crate::key_parameter::{KeyParameter, KeyParameterValue};
+use crate::legacy_blob::LegacyKeyCharacteristics;
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ Algorithm::Algorithm, Digest::Digest, EcCurve::EcCurve,
+ HardwareAuthenticatorType::HardwareAuthenticatorType, KeyOrigin::KeyOrigin,
+ KeyPurpose::KeyPurpose, SecurityLevel::SecurityLevel,
+};
+
pub static BLOB: &[u8] = &[
3, // version
1, // type
@@ -22,6 +30,106 @@ pub static BLOB: &[u8] = &[
0, 0, 0, 4, // length in big endian
0xde, 0xed, 0xbe, 0xef, // payload
];
+
+pub fn structured_test_params() -> LegacyKeyCharacteristics {
+ LegacyKeyCharacteristics::File(vec![
+ KeyParameter::new(KeyParameterValue::KeyPurpose(KeyPurpose::SIGN), SecurityLevel::KEYSTORE),
+ KeyParameter::new(
+ KeyParameterValue::KeyPurpose(KeyPurpose::VERIFY),
+ SecurityLevel::KEYSTORE,
+ ),
+ KeyParameter::new(KeyParameterValue::Digest(Digest::SHA_2_256), SecurityLevel::KEYSTORE),
+ KeyParameter::new(
+ KeyParameterValue::UserSecureID(2100322049669824240),
+ SecurityLevel::KEYSTORE,
+ ),
+ KeyParameter::new(KeyParameterValue::Algorithm(Algorithm::EC), SecurityLevel::KEYSTORE),
+ KeyParameter::new(KeyParameterValue::KeySize(256), SecurityLevel::KEYSTORE),
+ KeyParameter::new(KeyParameterValue::EcCurve(EcCurve::P_256), SecurityLevel::KEYSTORE),
+ KeyParameter::new(
+ KeyParameterValue::HardwareAuthenticatorType(HardwareAuthenticatorType::FINGERPRINT),
+ SecurityLevel::KEYSTORE,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::KeyOrigin(KeyOrigin::GENERATED),
+ SecurityLevel::KEYSTORE,
+ ),
+ KeyParameter::new(KeyParameterValue::OSVersion(110000), SecurityLevel::KEYSTORE),
+ KeyParameter::new(KeyParameterValue::OSPatchLevel(202101), SecurityLevel::KEYSTORE),
+ KeyParameter::new(KeyParameterValue::BootPatchLevel(20210105), SecurityLevel::KEYSTORE),
+ KeyParameter::new(KeyParameterValue::VendorPatchLevel(20210105), SecurityLevel::KEYSTORE),
+ ])
+}
+
+pub fn structured_test_params_cache() -> LegacyKeyCharacteristics {
+ LegacyKeyCharacteristics::Cache(vec![
+ KeyParameter::new(
+ KeyParameterValue::KeyPurpose(KeyPurpose::SIGN),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::KeyPurpose(KeyPurpose::VERIFY),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::Digest(Digest::SHA_2_256),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::UserSecureID(2100322049669824240),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::Algorithm(Algorithm::EC),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(KeyParameterValue::KeySize(256), SecurityLevel::TRUSTED_ENVIRONMENT),
+ KeyParameter::new(
+ KeyParameterValue::EcCurve(EcCurve::P_256),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::HardwareAuthenticatorType(HardwareAuthenticatorType::FINGERPRINT),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::KeyOrigin(KeyOrigin::GENERATED),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(KeyParameterValue::OSVersion(110000), SecurityLevel::TRUSTED_ENVIRONMENT),
+ KeyParameter::new(
+ KeyParameterValue::OSPatchLevel(202101),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::BootPatchLevel(20210105),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::VendorPatchLevel(20210105),
+ SecurityLevel::TRUSTED_ENVIRONMENT,
+ ),
+ KeyParameter::new(
+ KeyParameterValue::CreationDateTime(1607149002000),
+ SecurityLevel::KEYSTORE,
+ ),
+ KeyParameter::new(KeyParameterValue::UserID(0), SecurityLevel::KEYSTORE),
+ ])
+}
+
+// One encoded list of key parameters.
+pub static KEY_PARAMETERS: &[u8] = &[
+ 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x20,
+ 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x20, 0x03, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x20,
+ 0x04, 0x00, 0x00, 0x00, 0xf6, 0x01, 0x00, 0xa0, 0xf0, 0x7e, 0x7d, 0xb4, 0xc6, 0xd7, 0x25, 0x1d,
+ 0x02, 0x00, 0x00, 0x10, 0x03, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x30, 0x00, 0x01, 0x00, 0x00,
+ 0x0a, 0x00, 0x00, 0x10, 0x01, 0x00, 0x00, 0x00, 0x2d, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,
+ 0xf8, 0x01, 0x00, 0x10, 0x02, 0x00, 0x00, 0x00, 0xbe, 0x02, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,
+ 0xc1, 0x02, 0x00, 0x30, 0xb0, 0xad, 0x01, 0x00, 0xc2, 0x02, 0x00, 0x30, 0x75, 0x15, 0x03, 0x00,
+ 0xcf, 0x02, 0x00, 0x30, 0xb9, 0x61, 0x34, 0x01, 0xce, 0x02, 0x00, 0x30, 0xb9, 0x61, 0x34, 0x01,
+ 0x30, 0x01, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,
+];
+
pub static REAL_LEGACY_BLOB: &[u8] = &[
0x03, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -164,6 +272,24 @@ pub static SUPERKEY: &[u8] = &[
0x76, 0x04, 0x2a, 0x48, 0xd1, 0xa7, 0x59, 0xd1, 0x04, 0x5b, 0xb4, 0x8a, 0x09, 0x22, 0x13, 0x0c,
0x94, 0xb6, 0x67, 0x7b, 0x39, 0x85, 0x28, 0x11,
];
+
+pub static SUPERKEY_IV: &[u8] = &[
+ 0x9a, 0x81, 0x56, 0x7d, 0xf5, 0x86, 0x7c, 0x62, 0xd7, 0xf9, 0x26, 0x06, 0x00, 0x00, 0x00, 0x00,
+];
+
+pub static SUPERKEY_TAG: &[u8] = &[
+ 0xde, 0x2a, 0xcb, 0xac, 0x98, 0x57, 0x2b, 0xe5, 0x57, 0x18, 0x78, 0x57, 0x6e, 0x10, 0x09, 0x84,
+];
+
+pub static SUPERKEY_SALT: &[u8] = &[
+ 0x04, 0x5b, 0xb4, 0x8a, 0x09, 0x22, 0x13, 0x0c, 0x94, 0xb6, 0x67, 0x7b, 0x39, 0x85, 0x28, 0x11,
+];
+
+pub static SUPERKEY_PAYLOAD: &[u8] = &[
+ 0xac, 0x6d, 0x13, 0xe6, 0xad, 0x2c, 0x89, 0x53, 0x1a, 0x99, 0xa5, 0x6c, 0x88, 0xe9, 0xeb, 0x5c,
+ 0xef, 0x68, 0x5e, 0x5b, 0x53, 0xa8, 0xe7, 0xa2, 0x76, 0x04, 0x2a, 0x48, 0xd1, 0xa7, 0x59, 0xd1,
+];
+
pub static USRPKEY_AUTHBOUND: &[u8] = &[
0x03, 0x04, 0x04, 0x00, 0x1c, 0x34, 0x87, 0x6f, 0xc8, 0x35, 0x0d, 0x34, 0x88, 0x59, 0xbc, 0xf5,
0x00, 0x00, 0x00, 0x00, 0x62, 0xe3, 0x38, 0x2d, 0xd0, 0x58, 0x40, 0xc1, 0xb0, 0xf2, 0x4a, 0xdd,
@@ -203,6 +329,52 @@ pub static USRPKEY_AUTHBOUND: &[u8] = &[
0xaf, 0x17, 0x2f, 0x21, 0x07, 0xea, 0x61, 0xff, 0x73, 0x08, 0x50, 0xb2, 0x19, 0xe8, 0x23, 0x1b,
0x83, 0x42, 0xdd, 0x4e, 0x6d,
];
+
+pub static USRPKEY_AUTHBOUND_IV: &[u8] = &[
+ 0x1c, 0x34, 0x87, 0x6f, 0xc8, 0x35, 0x0d, 0x34, 0x88, 0x59, 0xbc, 0xf5, 0x00, 0x00, 0x00, 0x00,
+];
+
+pub static USRPKEY_AUTHBOUND_TAG: &[u8] = &[
+ 0x62, 0xe3, 0x38, 0x2d, 0xd0, 0x58, 0x40, 0xc1, 0xb0, 0xf2, 0x4a, 0xdd, 0xf7, 0x81, 0x67, 0x0b,
+];
+
+pub static USRPKEY_AUTHBOUND_ENC_PAYLOAD: &[u8] = &[
+ 0x05, 0xb2, 0x5a, 0x1d, 0x1b, 0x25, 0x19, 0x48, 0xbf, 0x76, 0x0b, 0x37, 0x8c, 0x60, 0x52, 0xea,
+ 0x30, 0x2a, 0x2c, 0x89, 0x99, 0x95, 0x57, 0x5c, 0xec, 0x62, 0x3c, 0x08, 0x1a, 0xc6, 0x65, 0xf9,
+ 0xad, 0x24, 0x99, 0xf0, 0x5c, 0x44, 0xa0, 0xea, 0x9a, 0x60, 0xa2, 0xef, 0xf5, 0x27, 0x50, 0xba,
+ 0x9c, 0xef, 0xa6, 0x08, 0x88, 0x4b, 0x0f, 0xfe, 0x5d, 0x41, 0xac, 0xba, 0xef, 0x9d, 0xa4, 0xb7,
+ 0x72, 0xd3, 0xc8, 0x11, 0x92, 0x06, 0xf6, 0x26, 0xdf, 0x90, 0xe2, 0x66, 0x89, 0xf3, 0x85, 0x16,
+ 0x4a, 0xdf, 0x7f, 0xac, 0x94, 0x4a, 0x1c, 0xce, 0x18, 0xee, 0xf4, 0x1f, 0x8e, 0xd6, 0xaf, 0xfd,
+ 0x1d, 0xe5, 0x80, 0x4a, 0x6b, 0xbf, 0x91, 0xe2, 0x36, 0x1d, 0xb3, 0x53, 0x12, 0xfd, 0xc9, 0x0b,
+ 0xa6, 0x69, 0x00, 0x45, 0xcb, 0x4c, 0x40, 0x6b, 0x70, 0xcb, 0xd2, 0xa0, 0x44, 0x0b, 0x4b, 0xec,
+ 0xd6, 0x4f, 0x6f, 0x64, 0x37, 0xa7, 0xc7, 0x25, 0x54, 0xf4, 0xac, 0x6b, 0x34, 0x53, 0xea, 0x4e,
+ 0x56, 0x49, 0xba, 0xf4, 0x1e, 0xc6, 0x52, 0x8f, 0xf4, 0x85, 0xe7, 0xb5, 0xaf, 0x49, 0x68, 0xb3,
+ 0xb8, 0x7d, 0x63, 0xfc, 0x6e, 0x83, 0xa0, 0xf3, 0x91, 0x04, 0x80, 0xfd, 0xc5, 0x54, 0x7e, 0x92,
+ 0x1a, 0x87, 0x2c, 0x6e, 0xa6, 0x29, 0xb9, 0x1e, 0x3f, 0xef, 0x30, 0x12, 0x7b, 0x2f, 0xa2, 0x16,
+ 0x61, 0x8a, 0xcf, 0x14, 0x2d, 0x62, 0x98, 0x15, 0xae, 0x3b, 0xe6, 0x08, 0x1e, 0xb1, 0xf1, 0x21,
+ 0xb0, 0x50, 0xc0, 0x4b, 0x81, 0x71, 0x29, 0xe7, 0x86, 0xbf, 0x29, 0xe1, 0xeb, 0xfe, 0xbc, 0x11,
+ 0x3c, 0xc6, 0x15, 0x47, 0x9b, 0x41, 0x84, 0x61, 0x33, 0xbf, 0xca, 0xfe, 0x24, 0x92, 0x9e, 0x70,
+ 0x26, 0x36, 0x46, 0xca, 0xfe, 0xd3, 0x5a, 0x1d, 0x9e, 0x30, 0x19, 0xbd, 0x26, 0x49, 0xb4, 0x90,
+ 0x0c, 0x8d, 0xa2, 0x28, 0xa6, 0x24, 0x62, 0x6b, 0xe2, 0xfa, 0xe0, 0x53, 0xaa, 0x01, 0xeb, 0xaa,
+ 0x41, 0x2b, 0xcb, 0xb1, 0x08, 0x66, 0x9d, 0x21, 0x2d, 0x2a, 0x47, 0x44, 0xee, 0xd5, 0x06, 0xe3,
+ 0x4a, 0xb9, 0x3f, 0xcd, 0x78, 0x67, 0x89, 0x5b, 0xf7, 0x51, 0xc0, 0xc4, 0xa9, 0x68, 0xee, 0x44,
+ 0x9c, 0x47, 0xa4, 0xbd, 0x6f, 0x7b, 0xdd, 0x64, 0xa8, 0xc7, 0x1e, 0x77, 0x1d, 0x68, 0x87, 0xaa,
+ 0xae, 0x3c, 0xfc, 0x58, 0xb6, 0x3c, 0xcf, 0x58, 0xd0, 0x10, 0xaa, 0xef, 0xf0, 0x98, 0x67, 0x14,
+ 0x29, 0x4d, 0x40, 0x8b, 0xe5, 0xb1, 0xdf, 0x7f, 0x40, 0xb1, 0xd8, 0xea, 0x6c, 0xa8, 0xf7, 0x64,
+ 0xed, 0x02, 0x8d, 0xe7, 0x93, 0xfe, 0x79, 0x9a, 0x88, 0x62, 0x4f, 0xd0, 0x8a, 0x80, 0x36, 0x42,
+ 0x0a, 0xf1, 0xa2, 0x0e, 0x30, 0x39, 0xbd, 0x26, 0x1d, 0xd4, 0xf1, 0xc8, 0x6e, 0xdd, 0xc5, 0x41,
+ 0x29, 0xd8, 0xc1, 0x9e, 0x24, 0xf0, 0x25, 0x07, 0x05, 0x06, 0xc5, 0x08, 0xe3, 0x02, 0x2b, 0xe1,
+ 0x40, 0xc5, 0x67, 0xd2, 0x82, 0x96, 0x20, 0x80, 0xcf, 0x87, 0x3a, 0xc6, 0xb0, 0xbe, 0xcc, 0xbb,
+ 0x5a, 0x01, 0xab, 0xdd, 0x00, 0xc7, 0x0e, 0x7b, 0x02, 0x35, 0x27, 0xf4, 0x70, 0xfe, 0xd1, 0x19,
+ 0x6a, 0x64, 0x23, 0x9d, 0xba, 0xe9, 0x1d, 0x76, 0x90, 0xfe, 0x7f, 0xd6, 0xb5, 0xa0, 0xe7, 0xb9,
+ 0xf3, 0x56, 0x82, 0x8e, 0x57, 0x35, 0xf2, 0x69, 0xce, 0x52, 0xac, 0xc2, 0xf6, 0x5e, 0xb6, 0x54,
+ 0x95, 0x83, 0x3b, 0x9f, 0x48, 0xbb, 0x04, 0x06, 0xac, 0x55, 0xa9, 0xb9, 0xa3, 0xe7, 0x89, 0x6e,
+ 0x5c, 0x3a, 0x08, 0x67, 0x00, 0x8f, 0x1e, 0x26, 0x1b, 0x4d, 0x8a, 0xa6, 0x17, 0xa0, 0xa6, 0x18,
+ 0xe6, 0x31, 0x43, 0x15, 0xb8, 0x7f, 0x9e, 0xf5, 0x78, 0x58, 0x98, 0xb1, 0x8c, 0xf5, 0x22, 0x42,
+ 0x33, 0xc0, 0x42, 0x72, 0x4f, 0xce, 0x9f, 0x31, 0xaf, 0x17, 0x2f, 0x21, 0x07, 0xea, 0x61, 0xff,
+ 0x73, 0x08, 0x50, 0xb2, 0x19, 0xe8, 0x23, 0x1b, 0x83, 0x42, 0xdd, 0x4e, 0x6d,
+];
+
pub static USRPKEY_AUTHBOUND_CHR: &[u8] = &[
0x03, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/keystore2/src/legacy_migrator.rs b/keystore2/src/legacy_importer.rs
index f92fd459..ee5a13f0 100644
--- a/keystore2/src/legacy_migrator.rs
+++ b/keystore2/src/legacy_importer.rs
@@ -14,19 +14,23 @@
//! This module acts as a bridge between the legacy key database and the keystore2 database.
-use crate::key_parameter::KeyParameterValue;
-use crate::legacy_blob::BlobValue;
-use crate::utils::{uid_to_android_user, watchdog as wd};
+use crate::database::{
+ BlobInfo, BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, EncryptedBy, KeyMetaData,
+ KeyMetaEntry, KeyType, KeystoreDB, Uuid, KEYSTORE_UUID,
+};
+use crate::error::{map_km_error, Error};
+use crate::key_parameter::{KeyParameter, KeyParameterValue};
+use crate::legacy_blob::{self, Blob, BlobValue, LegacyKeyCharacteristics};
+use crate::super_key::USER_SUPER_KEY;
+use crate::utils::{
+ key_characteristics_to_internal, uid_to_android_user, upgrade_keyblob_if_required_with,
+ watchdog as wd, AesGcm,
+};
use crate::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader};
-use crate::{database::KeyType, error::Error};
-use crate::{
- database::{
- BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, EncryptedBy, KeyMetaData,
- KeyMetaEntry, KeystoreDB, Uuid, KEYSTORE_UUID,
- },
- super_key::USER_SUPER_KEY,
+use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
+ IKeyMintDevice::IKeyMintDevice, SecurityLevel::SecurityLevel,
};
-use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
+use android_hardware_security_keymint::binder::Strong;
use android_system_keystore2::aidl::android::system::keystore2::{
Domain::Domain, KeyDescriptor::KeyDescriptor, ResponseCode::ResponseCode,
};
@@ -34,12 +38,13 @@ use anyhow::{Context, Result};
use core::ops::Deref;
use keystore2_crypto::{Password, ZVec};
use std::collections::{HashMap, HashSet};
+use std::convert::TryInto;
use std::sync::atomic::{AtomicU8, Ordering};
use std::sync::mpsc::channel;
use std::sync::{Arc, Mutex};
-/// Represents LegacyMigrator.
-pub struct LegacyMigrator {
+/// Represents LegacyImporter.
+pub struct LegacyImporter {
async_task: Arc<AsyncTask>,
initializer: Mutex<
Option<
@@ -51,19 +56,19 @@ pub struct LegacyMigrator {
>,
>,
/// This atomic is used for cheap interior mutability. It is intended to prevent
- /// expensive calls into the legacy migrator when the legacy database is empty.
+ /// expensive calls into the legacy importer when the legacy database is empty.
/// When transitioning from READY to EMPTY, spurious calls may occur for a brief period
/// of time. This is tolerable in favor of the common case.
state: AtomicU8,
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
-struct RecentMigration {
+struct RecentImport {
uid: u32,
alias: String,
}
-impl RecentMigration {
+impl RecentImport {
fn new(uid: u32, alias: String) -> Self {
Self { uid, alias }
}
@@ -74,15 +79,15 @@ enum BulkDeleteRequest {
User(u32),
}
-struct LegacyMigratorState {
- recently_migrated: HashSet<RecentMigration>,
- recently_migrated_super_key: HashSet<u32>,
+struct LegacyImporterState {
+ recently_imported: HashSet<RecentImport>,
+ recently_imported_super_key: HashSet<u32>,
legacy_loader: Arc<LegacyBlobLoader>,
sec_level_to_km_uuid: HashMap<SecurityLevel, Uuid>,
db: KeystoreDB,
}
-impl LegacyMigrator {
+impl LegacyImporter {
const WIFI_NAMESPACE: i64 = 102;
const AID_WIFI: u32 = 1010;
@@ -90,7 +95,7 @@ impl LegacyMigrator {
const STATE_READY: u8 = 1;
const STATE_EMPTY: u8 = 2;
- /// Constructs a new LegacyMigrator using the given AsyncTask object as migration
+ /// Constructs a new LegacyImporter using the given AsyncTask object as import
/// worker.
pub fn new(async_task: Arc<AsyncTask>) -> Self {
Self {
@@ -100,7 +105,7 @@ impl LegacyMigrator {
}
}
- /// The legacy migrator must be initialized deferred, because keystore starts very early.
+ /// The legacy importer must be initialized deferred, because keystore starts very early.
/// At this time the data partition may not be mounted. So we cannot open database connections
/// until we get actual key load requests. This sets the function that the legacy loader
/// uses to connect to the database.
@@ -125,11 +130,11 @@ impl LegacyMigrator {
Ok(())
}
- /// This function is called by the migration requestor to check if it is worth
- /// making a migration request. It also transitions the state from UNINITIALIZED
+ /// This function is called by the import requestor to check if it is worth
+ /// making an import request. It also transitions the state from UNINITIALIZED
/// to READY or EMPTY on first use. The deferred initialization is necessary, because
/// Keystore 2.0 runs early during boot, where data may not yet be mounted.
- /// Returns Ok(STATE_READY) if a migration request is worth undertaking and
+ /// Returns Ok(STATE_READY) if an import request is worth undertaking and
/// Ok(STATE_EMPTY) if the database is empty. An error is returned if the loader
/// was not initialized and cannot be initialized.
fn check_state(&self) -> Result<u8> {
@@ -157,9 +162,9 @@ impl LegacyMigrator {
}
self.async_task.queue_hi(move |shelf| {
- shelf.get_or_put_with(|| LegacyMigratorState {
- recently_migrated: Default::default(),
- recently_migrated_super_key: Default::default(),
+ shelf.get_or_put_with(|| LegacyImporterState {
+ recently_imported: Default::default(),
+ recently_imported_super_key: Default::default(),
legacy_loader,
sec_level_to_km_uuid,
db,
@@ -189,14 +194,14 @@ impl LegacyMigrator {
);
}
(Self::STATE_READY, _) => return Ok(Self::STATE_READY),
- (s, _) => panic!("Unknown legacy migrator state. {} ", s),
+ (s, _) => panic!("Unknown legacy importer state. {} ", s),
}
}
}
/// List all aliases for uid in the legacy database.
pub fn list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> {
- let _wp = wd::watch_millis("LegacyMigrator::list_uid", 500);
+ let _wp = wd::watch_millis("LegacyImporter::list_uid", 500);
let uid = match (domain, namespace) {
(Domain::APP, namespace) => namespace as u32,
@@ -217,44 +222,44 @@ impl LegacyMigrator {
)
}
- /// Sends the given closure to the migrator thread for execution after calling check_state.
+ /// Sends the given closure to the importer thread for execution after calling check_state.
/// Returns None if the database was empty and the request was not executed.
- /// Otherwise returns Some with the result produced by the migration request.
+ /// Otherwise returns Some with the result produced by the import request.
/// The loader state may transition to STATE_EMPTY during the execution of this function.
fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Option<Result<T>>
where
- F: FnOnce(&mut LegacyMigratorState) -> Result<T> + Send + 'static,
+ F: FnOnce(&mut LegacyImporterState) -> Result<T> + Send + 'static,
{
// Short circuit if the database is empty or not initialized (error case).
match self.check_state().context("In do_serialized: Checking state.") {
- Ok(LegacyMigrator::STATE_EMPTY) => return None,
- Ok(LegacyMigrator::STATE_READY) => {}
+ Ok(LegacyImporter::STATE_EMPTY) => return None,
+ Ok(LegacyImporter::STATE_READY) => {}
Err(e) => return Some(Err(e)),
- Ok(s) => panic!("Unknown legacy migrator state. {} ", s),
+ Ok(s) => panic!("Unknown legacy importer state. {} ", s),
}
// We have established that there may be a key in the legacy database.
- // Now we schedule a migration request.
+ // Now we schedule an import request.
let (sender, receiver) = channel();
self.async_task.queue_hi(move |shelf| {
- // Get the migrator state from the shelf.
- // There may not be a state. This can happen if this migration request was scheduled
+ // Get the importer state from the shelf.
+ // There may not be a state. This can happen if this import request was scheduled
// before a previous request established that the legacy database was empty
// and removed the state from the shelf. Since we know now that the database
// is empty, we can return None here.
- let (new_state, result) = if let Some(legacy_migrator_state) =
- shelf.get_downcast_mut::<LegacyMigratorState>()
+ let (new_state, result) = if let Some(legacy_importer_state) =
+ shelf.get_downcast_mut::<LegacyImporterState>()
{
- let result = f(legacy_migrator_state);
- (legacy_migrator_state.check_empty(), Some(result))
+ let result = f(legacy_importer_state);
+ (legacy_importer_state.check_empty(), Some(result))
} else {
(Self::STATE_EMPTY, None)
};
- // If the migration request determined that the database is now empty, we discard
+ // If the import request determined that the database is now empty, we discard
// the state from the shelf to free up the resources we won't need any longer.
if result.is_some() && new_state == Self::STATE_EMPTY {
- shelf.remove_downcast_ref::<LegacyMigratorState>();
+ shelf.remove_downcast_ref::<LegacyImporterState>();
}
// Send the result to the requester.
@@ -271,7 +276,7 @@ impl LegacyMigrator {
};
// We can only transition to EMPTY but never back.
- // The migrator never creates any legacy blobs.
+ // The importer never creates any legacy blobs.
if new_state == Self::STATE_EMPTY {
self.state.store(Self::STATE_EMPTY, Ordering::Relaxed)
}
@@ -280,19 +285,20 @@ impl LegacyMigrator {
}
/// Runs the key_accessor function and returns its result. If it returns an error and the
- /// root cause was KEY_NOT_FOUND, tries to migrate a key with the given parameters from
+ /// root cause was KEY_NOT_FOUND, tries to import a key with the given parameters from
/// the legacy database to the new database and runs the key_accessor function again if
- /// the migration request was successful.
- pub fn with_try_migrate<F, T>(
+ /// the import request was successful.
+ pub fn with_try_import<F, T>(
&self,
key: &KeyDescriptor,
caller_uid: u32,
+ super_key: Option<Arc<dyn AesGcm + Send + Sync>>,
key_accessor: F,
) -> Result<T>
where
F: Fn() -> Result<T>,
{
- let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate", 500);
+ let _wp = wd::watch_millis("LegacyImporter::with_try_import", 500);
// Access the key and return on success.
match key_accessor() {
@@ -304,7 +310,7 @@ impl LegacyMigrator {
}
// Filter inputs. We can only load legacy app domain keys and some special rules due
- // to which we migrate keys transparently to an SELINUX domain.
+ // to which we import keys transparently to an SELINUX domain.
let uid = match key {
KeyDescriptor { domain: Domain::APP, alias: Some(_), .. } => caller_uid,
KeyDescriptor { domain: Domain::SELINUX, nspace, alias: Some(_), .. } => {
@@ -323,12 +329,14 @@ impl LegacyMigrator {
};
let key_clone = key.clone();
- let result = self
- .do_serialized(move |migrator_state| migrator_state.check_and_migrate(uid, key_clone));
+ let result = self.do_serialized(move |importer_state| {
+ let super_key = super_key.map(|sk| -> Arc<dyn AesGcm> { sk });
+ importer_state.check_and_import(uid, key_clone, super_key)
+ });
if let Some(result) = result {
result?;
- // After successful migration try again.
+ // After successful import try again.
key_accessor()
} else {
Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)).context("Legacy database is empty.")
@@ -336,8 +344,8 @@ impl LegacyMigrator {
}
/// Calls key_accessor and returns the result on success. In the case of a KEY_NOT_FOUND error
- /// this function makes a migration request and on success retries the key_accessor.
- pub fn with_try_migrate_super_key<F, T>(
+ /// this function makes an import request and on success retries the key_accessor.
+ pub fn with_try_import_super_key<F, T>(
&self,
user_id: u32,
pw: &Password,
@@ -346,31 +354,31 @@ impl LegacyMigrator {
where
F: FnMut() -> Result<Option<T>>,
{
- let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate_super_key", 500);
+ let _wp = wd::watch_millis("LegacyImporter::with_try_import_super_key", 500);
match key_accessor() {
Ok(Some(result)) => return Ok(Some(result)),
Ok(None) => {}
Err(e) => return Err(e),
}
- let pw = pw.try_clone().context("In with_try_migrate_super_key: Cloning password.")?;
- let result = self.do_serialized(move |migrator_state| {
- migrator_state.check_and_migrate_super_key(user_id, &pw)
+ let pw = pw.try_clone().context("In with_try_import_super_key: Cloning password.")?;
+ let result = self.do_serialized(move |importer_state| {
+ importer_state.check_and_import_super_key(user_id, &pw)
});
if let Some(result) = result {
result?;
- // After successful migration try again.
+ // After successful import try again.
key_accessor()
} else {
Ok(None)
}
}
- /// Deletes all keys belonging to the given namespace, migrating them into the database
+ /// Deletes all keys belonging to the given namespace, importing them into the database
/// for subsequent garbage collection if necessary.
pub fn bulk_delete_uid(&self, domain: Domain, nspace: i64) -> Result<()> {
- let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_uid", 500);
+ let _wp = wd::watch_millis("LegacyImporter::bulk_delete_uid", 500);
let uid = match (domain, nspace) {
(Domain::APP, nspace) => nspace as u32,
@@ -379,24 +387,24 @@ impl LegacyMigrator {
_ => return Ok(()),
};
- let result = self.do_serialized(move |migrator_state| {
- migrator_state.bulk_delete(BulkDeleteRequest::Uid(uid), false)
+ let result = self.do_serialized(move |importer_state| {
+ importer_state.bulk_delete(BulkDeleteRequest::Uid(uid), false)
});
result.unwrap_or(Ok(()))
}
- /// Deletes all keys belonging to the given android user, migrating them into the database
+ /// Deletes all keys belonging to the given android user, importing them into the database
/// for subsequent garbage collection if necessary.
pub fn bulk_delete_user(
&self,
user_id: u32,
keep_non_super_encrypted_keys: bool,
) -> Result<()> {
- let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_user", 500);
+ let _wp = wd::watch_millis("LegacyImporter::bulk_delete_user", 500);
- let result = self.do_serialized(move |migrator_state| {
- migrator_state
+ let result = self.do_serialized(move |importer_state| {
+ importer_state
.bulk_delete(BulkDeleteRequest::User(user_id), keep_non_super_encrypted_keys)
});
@@ -406,12 +414,12 @@ impl LegacyMigrator {
/// Queries the legacy database for the presence of a super key for the given user.
pub fn has_super_key(&self, user_id: u32) -> Result<bool> {
let result =
- self.do_serialized(move |migrator_state| migrator_state.has_super_key(user_id));
+ self.do_serialized(move |importer_state| importer_state.has_super_key(user_id));
result.unwrap_or(Ok(false))
}
}
-impl LegacyMigratorState {
+impl LegacyImporterState {
fn get_km_uuid(&self, is_strongbox: bool) -> Result<Uuid> {
let sec_level = if is_strongbox {
SecurityLevel::STRONGBOX
@@ -430,17 +438,174 @@ impl LegacyMigratorState {
.context("In list_uid: Trying to list legacy entries.")
}
- /// This is a key migration request that must run in the migrator thread. This must
+ /// Checks if the key can potentially be unlocked. And deletes the key entry otherwise.
+ /// If the super_key has already been imported, the super key database id is returned.
+ fn get_super_key_id_check_unlockable_or_delete(
+ &mut self,
+ uid: u32,
+ alias: &str,
+ ) -> Result<i64> {
+ let user_id = uid_to_android_user(uid);
+
+ match self
+ .db
+ .load_super_key(&USER_SUPER_KEY, user_id)
+ .context("In get_super_key_id_check_unlockable_or_delete: Failed to load super key")?
+ {
+ Some((_, entry)) => Ok(entry.id()),
+ None => {
+ // This might be the first time we access the super key,
+ // and it may not have been imported. We cannot import
+ // the legacy super_key key now, because we need to reencrypt
+ // it which we cannot do if we are not unlocked, which we are
+ // not because otherwise the key would have been imported.
+ // We can check though if the key exists. If it does,
+ // we can return Locked. Otherwise, we can delete the
+ // key and return NotFound, because the key will never
+ // be unlocked again.
+ if self.legacy_loader.has_super_key(user_id) {
+ Err(Error::Rc(ResponseCode::LOCKED)).context(
+ "In get_super_key_id_check_unlockable_or_delete: \
+ Cannot import super key of this key while user is locked.",
+ )
+ } else {
+ self.legacy_loader.remove_keystore_entry(uid, alias).context(
+ "In get_super_key_id_check_unlockable_or_delete: \
+ Trying to remove obsolete key.",
+ )?;
+ Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
+ .context("In get_super_key_id_check_unlockable_or_delete: Obsolete key.")
+ }
+ }
+ }
+ }
+
+ fn characteristics_file_to_cache(
+ &mut self,
+ km_blob_params: Option<(Blob, LegacyKeyCharacteristics)>,
+ super_key: &Option<Arc<dyn AesGcm>>,
+ uid: u32,
+ alias: &str,
+ ) -> Result<(Option<(Blob, Vec<KeyParameter>)>, Option<(LegacyBlob<'static>, BlobMetaData)>)>
+ {
+ let (km_blob, params) = match km_blob_params {
+ Some((km_blob, LegacyKeyCharacteristics::File(params))) => (km_blob, params),
+ Some((km_blob, LegacyKeyCharacteristics::Cache(params))) => {
+ return Ok((Some((km_blob, params)), None))
+ }
+ None => return Ok((None, None)),
+ };
+
+ let km_uuid = self
+ .get_km_uuid(km_blob.is_strongbox())
+ .context("In characteristics_file_to_cache: Trying to get KM UUID")?;
+
+ let blob = match (&km_blob.value(), super_key.as_ref()) {
+ (BlobValue::Encrypted { iv, tag, data }, Some(super_key)) => {
+ let blob = super_key
+ .decrypt(data, iv, tag)
+ .context("In characteristics_file_to_cache: Decryption failed.")?;
+ LegacyBlob::ZVec(blob)
+ }
+ (BlobValue::Encrypted { .. }, None) => {
+ return Err(Error::Rc(ResponseCode::LOCKED)).context(
+ "In characteristics_file_to_cache: Oh uh, so close. \
+ This ancient key cannot be imported unless the user is unlocked.",
+ );
+ }
+ (BlobValue::Decrypted(data), _) => LegacyBlob::Ref(data),
+ _ => {
+ return Err(Error::sys())
+ .context("In characteristics_file_to_cache: Unexpected blob type.")
+ }
+ };
+
+ let (km_params, upgraded_blob) = get_key_characteristics_without_app_data(&km_uuid, &*blob)
+ .context(
+ "In characteristics_file_to_cache: Failed to get key characteristics from device.",
+ )?;
+
+ let flags = km_blob.get_flags();
+
+ let (current_blob, superseded_blob) = if let Some(upgraded_blob) = upgraded_blob {
+ match (km_blob.take_value(), super_key.as_ref()) {
+ (BlobValue::Encrypted { iv, tag, data }, Some(super_key)) => {
+ let super_key_id =
+ self.get_super_key_id_check_unlockable_or_delete(uid, alias).context(
+ "In characteristics_file_to_cache: \
+ How is there a super key but no super key id?",
+ )?;
+
+ let mut superseded_metadata = BlobMetaData::new();
+ superseded_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
+ superseded_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec()));
+ superseded_metadata
+ .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id)));
+ superseded_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
+ let superseded_blob = (LegacyBlob::Vec(data), superseded_metadata);
+
+ let (data, iv, tag) = super_key.encrypt(&upgraded_blob).context(
+ "In characteristics_file_to_cache: \
+ Failed to encrypt upgraded key blob.",
+ )?;
+ (
+ Blob::new(flags, BlobValue::Encrypted { data, iv, tag }),
+ Some(superseded_blob),
+ )
+ }
+ (BlobValue::Encrypted { .. }, None) => {
+ return Err(Error::sys()).context(
+ "In characteristics_file_to_cache: This should not be reachable. \
+ The blob could not have been decrypted above.",
+ );
+ }
+ (BlobValue::Decrypted(data), _) => {
+ let mut superseded_metadata = BlobMetaData::new();
+ superseded_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
+ let superseded_blob = (LegacyBlob::ZVec(data), superseded_metadata);
+ (
+ Blob::new(
+ flags,
+ BlobValue::Decrypted(upgraded_blob.try_into().context(
+ "In characteristics_file_to_cache: \
+ Failed to convert upgraded blob to ZVec.",
+ )?),
+ ),
+ Some(superseded_blob),
+ )
+ }
+ _ => {
+ return Err(Error::sys()).context(
+ "In characteristics_file_to_cache: This should not be reachable. \
+ Any other variant should have resulted in a different error.",
+ )
+ }
+ }
+ } else {
+ (km_blob, None)
+ };
+
+ let params =
+ augment_legacy_characteristics_file_with_key_characteristics(km_params, params);
+ Ok((Some((current_blob, params)), superseded_blob))
+ }
+
+ /// This is a key import request that must run in the importer thread. This must
/// be passed to do_serialized.
- fn check_and_migrate(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> {
+ fn check_and_import(
+ &mut self,
+ uid: u32,
+ mut key: KeyDescriptor,
+ super_key: Option<Arc<dyn AesGcm>>,
+ ) -> Result<()> {
let alias = key.alias.clone().ok_or_else(|| {
- anyhow::anyhow!(Error::sys()).context(concat!(
- "In check_and_migrate: Must be Some because ",
- "our caller must not have called us otherwise."
- ))
+ anyhow::anyhow!(Error::sys()).context(
+ "In check_and_import: Must be Some because \
+ our caller must not have called us otherwise.",
+ )
})?;
- if self.recently_migrated.contains(&RecentMigration::new(uid, alias.clone())) {
+ if self.recently_imported.contains(&RecentImport::new(uid, alias.clone())) {
return Ok(());
}
@@ -451,49 +616,42 @@ impl LegacyMigratorState {
// If the key is not found in the cache, try to load from the legacy database.
let (km_blob_params, user_cert, ca_cert) = self
.legacy_loader
- .load_by_uid_alias(uid, &alias, None)
- .context("In check_and_migrate: Trying to load legacy blob.")?;
+ .load_by_uid_alias(uid, &alias, &super_key)
+ .map_err(|e| {
+ if e.root_cause().downcast_ref::<legacy_blob::Error>()
+ == Some(&legacy_blob::Error::LockedComponent)
+ {
+ // There is no chance to succeed at this point. We just check if there is
+ // a super key so that this entry might be unlockable in the future.
+ // If not the entry will be deleted and KEY_NOT_FOUND is returned.
+ // If a super key id was returned we still have to return LOCKED but the key
+ // may be imported when the user unlocks the device.
+ self.get_super_key_id_check_unlockable_or_delete(uid, &alias)
+ .and_then::<i64, _>(|_| {
+ Err(Error::Rc(ResponseCode::LOCKED))
+ .context("Super key present but locked.")
+ })
+ .unwrap_err()
+ } else {
+ e
+ }
+ })
+ .context("In check_and_import: Trying to load legacy blob.")?;
+
+ let (km_blob_params, superseded_blob) = self
+ .characteristics_file_to_cache(km_blob_params, &super_key, uid, &alias)
+ .context("In check_and_import: Trying to update legacy characteristics.")?;
+
let result = match km_blob_params {
Some((km_blob, params)) => {
let is_strongbox = km_blob.is_strongbox();
+
let (blob, mut blob_metadata) = match km_blob.take_value() {
BlobValue::Encrypted { iv, tag, data } => {
// Get super key id for user id.
- let user_id = uid_to_android_user(uid as u32);
-
- let super_key_id = match self
- .db
- .load_super_key(&USER_SUPER_KEY, user_id)
- .context("In check_and_migrate: Failed to load super key")?
- {
- Some((_, entry)) => entry.id(),
- None => {
- // This might be the first time we access the super key,
- // and it may not have been migrated. We cannot import
- // the legacy super_key key now, because we need to reencrypt
- // it which we cannot do if we are not unlocked, which we are
- // not because otherwise the key would have been migrated.
- // We can check though if the key exists. If it does,
- // we can return Locked. Otherwise, we can delete the
- // key and return NotFound, because the key will never
- // be unlocked again.
- if self.legacy_loader.has_super_key(user_id) {
- return Err(Error::Rc(ResponseCode::LOCKED)).context(concat!(
- "In check_and_migrate: Cannot migrate super key of this ",
- "key while user is locked."
- ));
- } else {
- self.legacy_loader.remove_keystore_entry(uid, &alias).context(
- concat!(
- "In check_and_migrate: ",
- "Trying to remove obsolete key."
- ),
- )?;
- return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
- .context("In check_and_migrate: Obsolete key.");
- }
- }
- };
+ let super_key_id = self
+ .get_super_key_id_check_unlockable_or_delete(uid, &alias)
+ .context("In check_and_import: Failed to get super key id.")?;
let mut blob_metadata = BlobMetaData::new();
blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec()));
@@ -505,74 +663,79 @@ impl LegacyMigratorState {
BlobValue::Decrypted(data) => (LegacyBlob::ZVec(data), BlobMetaData::new()),
_ => {
return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
- .context("In check_and_migrate: Legacy key has unexpected type.")
+ .context("In check_and_import: Legacy key has unexpected type.")
}
};
let km_uuid = self
.get_km_uuid(is_strongbox)
- .context("In check_and_migrate: Trying to get KM UUID")?;
+ .context("In check_and_import: Trying to get KM UUID")?;
blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid));
let mut metadata = KeyMetaData::new();
let creation_date = DateTime::now()
- .context("In check_and_migrate: Trying to make creation time.")?;
+ .context("In check_and_import: Trying to make creation time.")?;
metadata.add(KeyMetaEntry::CreationDate(creation_date));
+ let blob_info = BlobInfo::new_with_superseded(
+ &blob,
+ &blob_metadata,
+ superseded_blob.as_ref().map(|(b, m)| (&**b, m)),
+ );
// Store legacy key in the database.
self.db
.store_new_key(
&key,
KeyType::Client,
&params,
- &(&blob, &blob_metadata),
+ &blob_info,
&CertificateInfo::new(user_cert, ca_cert),
&metadata,
&km_uuid,
)
- .context("In check_and_migrate.")?;
+ .context("In check_and_import.")?;
Ok(())
}
None => {
if let Some(ca_cert) = ca_cert {
self.db
.store_new_certificate(&key, KeyType::Client, &ca_cert, &KEYSTORE_UUID)
- .context("In check_and_migrate: Failed to insert new certificate.")?;
+ .context("In check_and_import: Failed to insert new certificate.")?;
Ok(())
} else {
Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
- .context("In check_and_migrate: Legacy key not found.")
+ .context("In check_and_import: Legacy key not found.")
}
}
};
match result {
Ok(()) => {
- // Add the key to the migrated_keys list.
- self.recently_migrated.insert(RecentMigration::new(uid, alias.clone()));
+ // Add the key to the imported_keys list.
+ self.recently_imported.insert(RecentImport::new(uid, alias.clone()));
// Delete legacy key from the file system
self.legacy_loader
.remove_keystore_entry(uid, &alias)
- .context("In check_and_migrate: Trying to remove migrated key.")?;
+ .context("In check_and_import: Trying to remove imported key.")?;
Ok(())
}
Err(e) => Err(e),
}
}
- fn check_and_migrate_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> {
- if self.recently_migrated_super_key.contains(&user_id) {
+ fn check_and_import_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> {
+ if self.recently_imported_super_key.contains(&user_id) {
return Ok(());
}
if let Some(super_key) = self
.legacy_loader
.load_super_key(user_id, &pw)
- .context("In check_and_migrate_super_key: Trying to load legacy super key.")?
+ .context("In check_and_import_super_key: Trying to load legacy super key.")?
{
let (blob, blob_metadata) =
crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, pw)
- .context("In check_and_migrate_super_key: Trying to encrypt super key.")?;
+ .context("In check_and_import_super_key: Trying to encrypt super key.")?;
self.db
.store_super_key(
@@ -583,20 +746,20 @@ impl LegacyMigratorState {
&KeyMetaData::new(),
)
.context(concat!(
- "In check_and_migrate_super_key: ",
+ "In check_and_import_super_key: ",
"Trying to insert legacy super_key into the database."
))?;
self.legacy_loader.remove_super_key(user_id);
- self.recently_migrated_super_key.insert(user_id);
+ self.recently_imported_super_key.insert(user_id);
Ok(())
} else {
Err(Error::Rc(ResponseCode::KEY_NOT_FOUND))
- .context("In check_and_migrate_super_key: No key found do migrate.")
+ .context("In check_and_import_super_key: No key found do import.")
}
}
- /// Key migrator request to be run by do_serialized.
- /// See LegacyMigrator::bulk_delete_uid and LegacyMigrator::bulk_delete_user.
+ /// Key importer request to be run by do_serialized.
+ /// See LegacyImporter::bulk_delete_uid and LegacyImporter::bulk_delete_user.
fn bulk_delete(
&mut self,
bulk_delete_request: BulkDeleteRequest,
@@ -635,13 +798,17 @@ impl LegacyMigratorState {
{
let (km_blob_params, _, _) = self
.legacy_loader
- .load_by_uid_alias(uid, &alias, None)
+ .load_by_uid_alias(uid, &alias, &None)
.context("In bulk_delete: Trying to load legacy blob.")?;
// Determine if the key needs special handling to be deleted.
let (need_gc, is_super_encrypted) = km_blob_params
.as_ref()
.map(|(blob, params)| {
+ let params = match params {
+ LegacyKeyCharacteristics::Cache(params)
+ | LegacyKeyCharacteristics::File(params) => params,
+ };
(
params.iter().any(|kp| {
KeyParameterValue::RollbackResistance == *kp.key_parameter_value()
@@ -695,37 +862,98 @@ impl LegacyMigratorState {
self.legacy_loader
.remove_keystore_entry(uid, &alias)
- .context("In bulk_delete: Trying to remove migrated key.")?;
+ .context("In bulk_delete: Trying to remove imported key.")?;
}
Ok(())
}
fn has_super_key(&mut self, user_id: u32) -> Result<bool> {
- Ok(self.recently_migrated_super_key.contains(&user_id)
+ Ok(self.recently_imported_super_key.contains(&user_id)
|| self.legacy_loader.has_super_key(user_id))
}
fn check_empty(&self) -> u8 {
if self.legacy_loader.is_empty().unwrap_or(false) {
- LegacyMigrator::STATE_EMPTY
+ LegacyImporter::STATE_EMPTY
} else {
- LegacyMigrator::STATE_READY
+ LegacyImporter::STATE_READY
}
}
}
-enum LegacyBlob {
+enum LegacyBlob<'a> {
Vec(Vec<u8>),
ZVec(ZVec),
+ Ref(&'a [u8]),
}
-impl Deref for LegacyBlob {
+impl Deref for LegacyBlob<'_> {
type Target = [u8];
fn deref(&self) -> &Self::Target {
match self {
Self::Vec(v) => &v,
Self::ZVec(v) => &v,
+ Self::Ref(v) => v,
}
}
}
+
+/// This function takes two KeyParameter lists. The first is assumed to have been retrieved from the
+/// KM back end using km_dev.getKeyCharacteristics. The second is assumed to have been retrieved
+/// from a legacy key characteristics file (not cache) as used in Android P and older. The function
+/// augments the former with entries from the latter only if no equivalent entry is present ignoring.
+/// the security level of enforcement. All entries in the latter are assumed to have security level
+/// KEYSTORE.
+fn augment_legacy_characteristics_file_with_key_characteristics<T>(
+ mut from_km: Vec<KeyParameter>,
+ legacy: T,
+) -> Vec<KeyParameter>
+where
+ T: IntoIterator<Item = KeyParameter>,
+{
+ for legacy_kp in legacy.into_iter() {
+ if !from_km
+ .iter()
+ .any(|km_kp| km_kp.key_parameter_value() == legacy_kp.key_parameter_value())
+ {
+ from_km.push(legacy_kp);
+ }
+ }
+ from_km
+}
+
+/// Attempts to retrieve the key characteristics for the given blob from the KM back end with the
+/// given UUID. It may upgrade the key blob in the process. In that case the upgraded blob is
+/// returned as the second tuple member.
+fn get_key_characteristics_without_app_data(
+ uuid: &Uuid,
+ blob: &[u8],
+) -> Result<(Vec<KeyParameter>, Option<Vec<u8>>)> {
+ let (km_dev, _) = crate::globals::get_keymint_dev_by_uuid(uuid).with_context(|| {
+ format!(
+ "In get_key_characteristics_without_app_data: Trying to get km device for id {:?}",
+ uuid
+ )
+ })?;
+
+ let km_dev: Strong<dyn IKeyMintDevice> = km_dev
+ .get_interface()
+ .context("In get_key_characteristics_without_app_data: Failed to get keymint device.")?;
+
+ let (characteristics, upgraded_blob) = upgrade_keyblob_if_required_with(
+ &*km_dev,
+ blob,
+ &[],
+ |blob| {
+ let _wd = wd::watch_millis(
+ "In get_key_characteristics_without_app_data: Calling GetKeyCharacteristics.",
+ 500,
+ );
+ map_km_error(km_dev.getKeyCharacteristics(blob, &[], &[]))
+ },
+ |_| Ok(()),
+ )
+ .context("In foo.")?;
+ Ok((key_characteristics_to_internal(characteristics), upgraded_blob))
+}
diff --git a/keystore2/src/lib.rs b/keystore2/src/lib.rs
index 8b629b10..66763e9f 100644
--- a/keystore2/src/lib.rs
+++ b/keystore2/src/lib.rs
@@ -29,7 +29,7 @@ pub mod id_rotation;
/// Internal Representation of Key Parameter and convenience functions.
pub mod key_parameter;
pub mod legacy_blob;
-pub mod legacy_migrator;
+pub mod legacy_importer;
pub mod maintenance;
pub mod metrics;
pub mod metrics_store;
diff --git a/keystore2/src/maintenance.rs b/keystore2/src/maintenance.rs
index 3180e5df..ced89868 100644
--- a/keystore2/src/maintenance.rs
+++ b/keystore2/src/maintenance.rs
@@ -19,10 +19,12 @@ use crate::error::map_km_error;
use crate::error::map_or_log_err;
use crate::error::Error;
use crate::globals::get_keymint_device;
-use crate::globals::{DB, LEGACY_MIGRATOR, SUPER_KEY};
+use crate::globals::{DB, LEGACY_IMPORTER, SUPER_KEY};
use crate::permission::{KeyPerm, KeystorePerm};
use crate::super_key::UserState;
-use crate::utils::{check_key_permission, check_keystore_permission, watchdog as wd};
+use crate::utils::{
+ check_key_permission, check_keystore_permission, uid_to_android_user, watchdog as wd,
+};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::IKeyMintDevice::IKeyMintDevice;
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel;
use android_security_maintenance::aidl::android::security::maintenance::{
@@ -82,7 +84,7 @@ impl Maintenance {
.with(|db| {
UserState::get_with_password_changed(
&mut db.borrow_mut(),
- &LEGACY_MIGRATOR,
+ &LEGACY_IMPORTER,
&SUPER_KEY,
user_id as u32,
password.as_ref(),
@@ -110,7 +112,7 @@ impl Maintenance {
UserState::reset_user(
&mut db.borrow_mut(),
&SUPER_KEY,
- &LEGACY_MIGRATOR,
+ &LEGACY_IMPORTER,
user_id as u32,
false,
)
@@ -125,7 +127,7 @@ impl Maintenance {
// Permission check. Must return on error. Do not touch the '?'.
check_keystore_permission(KeystorePerm::clear_uid()).context("In clear_namespace.")?;
- LEGACY_MIGRATOR
+ LEGACY_IMPORTER
.bulk_delete_uid(domain, nspace)
.context("In clear_namespace: Trying to delete legacy keys.")?;
DB.with(|db| db.borrow_mut().unbind_keys_for_namespace(domain, nspace))
@@ -141,7 +143,7 @@ impl Maintenance {
check_keystore_permission(KeystorePerm::get_state()).context("In get_state.")?;
let state = DB
.with(|db| {
- UserState::get(&mut db.borrow_mut(), &LEGACY_MIGRATOR, &SUPER_KEY, user_id as u32)
+ UserState::get(&mut db.borrow_mut(), &LEGACY_IMPORTER, &SUPER_KEY, user_id as u32)
})
.context("In get_state. Trying to get UserState.")?;
@@ -219,11 +221,13 @@ impl Maintenance {
fn migrate_key_namespace(source: &KeyDescriptor, destination: &KeyDescriptor) -> Result<()> {
let caller_uid = ThreadState::get_calling_uid();
+ let super_key = SUPER_KEY.get_per_boot_key_by_user_id(uid_to_android_user(caller_uid));
+
DB.with(|db| {
let key_id_guard = match source.domain {
Domain::APP | Domain::SELINUX | Domain::KEY_ID => {
- let (key_id_guard, _) = LEGACY_MIGRATOR
- .with_try_migrate(&source, caller_uid, || {
+ let (key_id_guard, _) = LEGACY_IMPORTER
+ .with_try_import(&source, caller_uid, super_key, || {
db.borrow_mut().load_key_entry(
&source,
KeyType::Client,
diff --git a/keystore2/src/raw_device.rs b/keystore2/src/raw_device.rs
index cd549151..a883987b 100644
--- a/keystore2/src/raw_device.rs
+++ b/keystore2/src/raw_device.rs
@@ -16,8 +16,9 @@
use crate::{
database::{
- BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, KeyEntry, KeyEntryLoadBits,
- KeyIdGuard, KeyMetaData, KeyMetaEntry, KeyType, KeystoreDB, SubComponentType, Uuid,
+ BlobInfo, BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, KeyEntry,
+ KeyEntryLoadBits, KeyIdGuard, KeyMetaData, KeyMetaEntry, KeyType, KeystoreDB,
+ SubComponentType, Uuid,
},
error::{map_km_error, Error, ErrorCode},
globals::get_keymint_device,
@@ -123,7 +124,7 @@ impl KeyMintDevice {
&key_desc,
key_type,
&key_parameters,
- &(&creation_result.keyBlob, &blob_metadata),
+ &BlobInfo::new(&creation_result.keyBlob, &blob_metadata),
&CertificateInfo::new(None, None),
&key_metadata,
&self.km_uuid,
diff --git a/keystore2/src/remote_provisioning.rs b/keystore2/src/remote_provisioning.rs
index 212bf399..369be7c0 100644
--- a/keystore2/src/remote_provisioning.rs
+++ b/keystore2/src/remote_provisioning.rs
@@ -40,7 +40,7 @@ use anyhow::{Context, Result};
use keystore2_crypto::parse_subject_from_certificate;
use std::sync::atomic::{AtomicBool, Ordering};
-use crate::database::{CertificateChain, KeystoreDB, Uuid};
+use crate::database::{CertificateChain, KeyIdGuard, KeystoreDB, Uuid};
use crate::error::{self, map_or_log_err, map_rem_prov_error, Error};
use crate::globals::{get_keymint_device, get_remotely_provisioned_component, DB};
use crate::metrics_store::log_rkp_error_stats;
@@ -62,6 +62,11 @@ impl RemProvState {
Self { security_level, km_uuid, is_hal_present: AtomicBool::new(true) }
}
+ /// Returns the uuid for the KM instance attached to this RemProvState struct.
+ pub fn get_uuid(&self) -> Uuid {
+ self.km_uuid
+ }
+
/// Checks if remote provisioning is enabled and partially caches the result. On a hybrid system
/// remote provisioning can flip from being disabled to enabled depending on responses from the
/// server, so unfortunately caching the presence or absence of the HAL is not enough to fully
@@ -92,7 +97,7 @@ impl RemProvState {
key: &KeyDescriptor,
caller_uid: u32,
db: &mut KeystoreDB,
- ) -> Result<Option<CertificateChain>> {
+ ) -> Result<Option<(KeyIdGuard, CertificateChain)>> {
match key.domain {
Domain::APP => {
// Attempt to get an Attestation Key once. If it fails, then the app doesn't
@@ -117,7 +122,7 @@ impl RemProvState {
"key and failed silently. Something is very wrong."
))
},
- |cert_chain| Ok(Some(cert_chain)),
+ |(guard, cert_chain)| Ok(Some((guard, cert_chain))),
)
}
_ => Ok(None),
@@ -130,12 +135,12 @@ impl RemProvState {
key: &KeyDescriptor,
caller_uid: u32,
db: &mut KeystoreDB,
- ) -> Result<Option<CertificateChain>> {
+ ) -> Result<Option<(KeyIdGuard, CertificateChain)>> {
let cert_chain = db
.retrieve_attestation_key_and_cert_chain(key.domain, caller_uid as i64, &self.km_uuid)
.context("In get_rem_prov_attest_key_helper: Failed to retrieve a key + cert chain")?;
match cert_chain {
- Some(cert_chain) => Ok(Some(cert_chain)),
+ Some((guard, cert_chain)) => Ok(Some((guard, cert_chain))),
// Either this app needs to be assigned a key, or the pool is empty. An error will
// be thrown if there is no key available to assign. This will indicate that the app
// should be nudged to provision more keys so keystore can retry.
@@ -174,7 +179,7 @@ impl RemProvState {
caller_uid: u32,
params: &[KeyParameter],
db: &mut KeystoreDB,
- ) -> Result<Option<(AttestationKey, Certificate)>> {
+ ) -> Result<Option<(KeyIdGuard, AttestationKey, Certificate)>> {
if !self.is_asymmetric_key(params) || !self.check_rem_prov_enabled(db)? {
// There is no remote provisioning component for this security level on the
// device. Return None so the underlying KM instance knows to use its
@@ -195,7 +200,8 @@ impl RemProvState {
Ok(None)
}
Ok(v) => match v {
- Some(cert_chain) => Ok(Some((
+ Some((guard, cert_chain)) => Ok(Some((
+ guard,
AttestationKey {
keyBlob: cert_chain.private_key.to_vec(),
attestKeyParams: vec![],
diff --git a/keystore2/src/security_level.rs b/keystore2/src/security_level.rs
index 1b2e3485..76110b3f 100644
--- a/keystore2/src/security_level.rs
+++ b/keystore2/src/security_level.rs
@@ -18,9 +18,9 @@ use crate::attestation_key_utils::{get_attest_key_info, AttestationKeyInfo};
use crate::audit_log::{
log_key_deleted, log_key_generated, log_key_imported, log_key_integrity_violation,
};
-use crate::database::{CertificateInfo, KeyIdGuard};
+use crate::database::{BlobInfo, CertificateInfo, KeyIdGuard};
use crate::error::{self, map_km_error, map_or_log_err, Error, ErrorCode};
-use crate::globals::{DB, ENFORCEMENTS, LEGACY_MIGRATOR, SUPER_KEY};
+use crate::globals::{DB, ENFORCEMENTS, LEGACY_IMPORTER, SUPER_KEY};
use crate::key_parameter::KeyParameter as KsKeyParam;
use crate::key_parameter::KeyParameterValue as KsKeyParamValue;
use crate::metrics_store::log_key_creation_event_stats;
@@ -162,7 +162,7 @@ impl KeystoreSecurityLevel {
let (key_blob, mut blob_metadata) = SUPER_KEY
.handle_super_encryption_on_key_init(
&mut db,
- &LEGACY_MIGRATOR,
+ &LEGACY_IMPORTER,
&(key.domain),
&key_parameters,
flags,
@@ -180,7 +180,7 @@ impl KeystoreSecurityLevel {
&key,
KeyType::Client,
&key_parameters,
- &(&key_blob, &blob_metadata),
+ &BlobInfo::new(&key_blob, &blob_metadata),
&cert_info,
&key_metadata,
&self.km_uuid,
@@ -241,9 +241,11 @@ impl KeystoreSecurityLevel {
)
}
_ => {
+ let super_key =
+ SUPER_KEY.get_per_boot_key_by_user_id(uid_to_android_user(caller_uid));
let (key_id_guard, mut key_entry) = DB
.with::<_, Result<(KeyIdGuard, KeyEntry)>>(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ LEGACY_IMPORTER.with_try_import(key, caller_uid, super_key, || {
db.borrow_mut().load_key_entry(
&key,
KeyType::Client,
@@ -316,7 +318,7 @@ impl KeystoreSecurityLevel {
&*km_dev,
key_id_guard,
&km_blob,
- &blob_metadata,
+ blob_metadata.km_uuid().copied(),
&operation_parameters,
|blob| loop {
match map_km_error({
@@ -548,7 +550,7 @@ impl KeystoreSecurityLevel {
&*km_dev,
Some(key_id_guard),
&KeyBlob::Ref(&blob),
- &blob_metadata,
+ blob_metadata.km_uuid().copied(),
&params,
|blob| {
let attest_key = Some(AttestationKey {
@@ -570,23 +572,40 @@ impl KeystoreSecurityLevel {
)
.context("In generate_key: Using user generated attestation key.")
.map(|(result, _)| result),
- Some(AttestationKeyInfo::RemoteProvisioned { attestation_key, attestation_certs }) => {
- map_km_error({
- let _wp = self.watch_millis(
- concat!(
- "In KeystoreSecurityLevel::generate_key (RemoteProvisioned): ",
- "calling generate_key.",
- ),
- 5000, // Generate can take a little longer.
- );
- km_dev.generateKey(&params, Some(&attestation_key))
- })
+ Some(AttestationKeyInfo::RemoteProvisioned {
+ key_id_guard,
+ attestation_key,
+ attestation_certs,
+ }) => self
+ .upgrade_keyblob_if_required_with(
+ &*km_dev,
+ Some(key_id_guard),
+ &KeyBlob::Ref(&attestation_key.keyBlob),
+ Some(self.rem_prov_state.get_uuid()),
+ &[],
+ |blob| {
+ map_km_error({
+ let _wp = self.watch_millis(
+ concat!(
+ "In KeystoreSecurityLevel::generate_key (RemoteProvisioned): ",
+ "calling generate_key.",
+ ),
+ 5000, // Generate can take a little longer.
+ );
+ let dynamic_attest_key = Some(AttestationKey {
+ keyBlob: blob.to_vec(),
+ attestKeyParams: vec![],
+ issuerSubjectName: attestation_key.issuerSubjectName.clone(),
+ });
+ km_dev.generateKey(&params, dynamic_attest_key.as_ref())
+ })
+ },
+ )
.context("While generating Key with remote provisioned attestation key.")
- .map(|mut creation_result| {
- creation_result.certificateChain.push(attestation_certs);
- creation_result
- })
- }
+ .map(|(mut result, _)| {
+ result.certificateChain.push(attestation_certs);
+ result
+ }),
None => map_km_error({
let _wp = self.watch_millis(
concat!(
@@ -717,9 +736,11 @@ impl KeystoreSecurityLevel {
// Import_wrapped_key requires the rebind permission for the new key.
check_key_permission(KeyPerm::rebind(), &key, &None).context("In import_wrapped_key.")?;
+ let super_key = SUPER_KEY.get_per_boot_key_by_user_id(user_id);
+
let (wrapping_key_id_guard, mut wrapping_key_entry) = DB
.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ LEGACY_IMPORTER.with_try_import(&key, caller_uid, super_key, || {
db.borrow_mut().load_key_entry(
&wrapping_key,
KeyType::Client,
@@ -769,7 +790,7 @@ impl KeystoreSecurityLevel {
&*km_dev,
Some(wrapping_key_id_guard),
&wrapping_key_blob,
- &wrapping_blob_metadata,
+ wrapping_blob_metadata.km_uuid().copied(),
&[],
|wrapping_blob| {
let _wp = self.watch_millis(
@@ -795,7 +816,7 @@ impl KeystoreSecurityLevel {
fn store_upgraded_keyblob(
key_id_guard: KeyIdGuard,
- km_uuid: Option<&Uuid>,
+ km_uuid: Option<Uuid>,
key_blob: &KeyBlob,
upgraded_blob: &[u8],
) -> Result<()> {
@@ -805,7 +826,7 @@ impl KeystoreSecurityLevel {
let mut new_blob_metadata = new_blob_metadata.unwrap_or_default();
if let Some(uuid) = km_uuid {
- new_blob_metadata.add(BlobMetaEntry::KmUuid(*uuid));
+ new_blob_metadata.add(BlobMetaEntry::KmUuid(uuid));
}
DB.with(|db| {
@@ -823,69 +844,46 @@ impl KeystoreSecurityLevel {
fn upgrade_keyblob_if_required_with<T, F>(
&self,
km_dev: &dyn IKeyMintDevice,
- key_id_guard: Option<KeyIdGuard>,
+ mut key_id_guard: Option<KeyIdGuard>,
key_blob: &KeyBlob,
- blob_metadata: &BlobMetaData,
+ km_uuid: Option<Uuid>,
params: &[KeyParameter],
f: F,
) -> Result<(T, Option<Vec<u8>>)>
where
F: Fn(&[u8]) -> Result<T, Error>,
{
- match f(key_blob) {
- Err(Error::Km(ErrorCode::KEY_REQUIRES_UPGRADE)) => {
- let upgraded_blob = {
- let _wp = self.watch_millis(
- concat!(
- "In KeystoreSecurityLevel::upgrade_keyblob_if_required_with: ",
- "calling upgradeKey."
- ),
- 500,
- );
- map_km_error(km_dev.upgradeKey(key_blob, params))
- }
- .context("In upgrade_keyblob_if_required_with: Upgrade failed.")?;
-
- if let Some(kid) = key_id_guard {
- Self::store_upgraded_keyblob(
- kid,
- blob_metadata.km_uuid(),
- key_blob,
- &upgraded_blob,
- )
- .context(
+ let (v, upgraded_blob) = crate::utils::upgrade_keyblob_if_required_with(
+ km_dev,
+ key_blob,
+ params,
+ f,
+ |upgraded_blob| {
+ if key_id_guard.is_some() {
+ // Unwrap cannot panic, because the is_some was true.
+ let kid = key_id_guard.take().unwrap();
+ Self::store_upgraded_keyblob(kid, km_uuid, key_blob, upgraded_blob).context(
"In upgrade_keyblob_if_required_with: store_upgraded_keyblob failed",
- )?;
- }
-
- match f(&upgraded_blob) {
- Ok(v) => Ok((v, Some(upgraded_blob))),
- Err(e) => Err(e).context(concat!(
- "In upgrade_keyblob_if_required_with: ",
- "Failed to perform operation on second try."
- )),
- }
- }
- result => {
- if let Some(kid) = key_id_guard {
- if key_blob.force_reencrypt() {
- Self::store_upgraded_keyblob(
- kid,
- blob_metadata.km_uuid(),
- key_blob,
- key_blob,
- )
- .context(concat!(
- "In upgrade_keyblob_if_required_with: ",
- "store_upgraded_keyblob failed in forced reencrypt"
- ))?;
- }
+ )
+ } else {
+ Ok(())
}
- result
- .map(|v| (v, None))
- .context("In upgrade_keyblob_if_required_with: Called closure failed.")
+ },
+ )
+ .context("In KeystoreSecurityLevel::upgrade_keyblob_if_required_with.")?;
+
+ // If no upgrade was needed, use the opportunity to reencrypt the blob if required
+ // and if the a key_id_guard is held. Note: key_id_guard can only be Some if no
+ // upgrade was performed above and if one was given in the first place.
+ if key_blob.force_reencrypt() {
+ if let Some(kid) = key_id_guard {
+ Self::store_upgraded_keyblob(kid, km_uuid, key_blob, key_blob).context(concat!(
+ "In upgrade_keyblob_if_required_with: ",
+ "store_upgraded_keyblob failed in forced reencrypt"
+ ))?;
}
}
+ Ok((v, upgraded_blob))
}
fn convert_storage_key_to_ephemeral(
diff --git a/keystore2/src/service.rs b/keystore2/src/service.rs
index d65743d2..646e7b13 100644
--- a/keystore2/src/service.rs
+++ b/keystore2/src/service.rs
@@ -22,11 +22,11 @@ use crate::permission::{KeyPerm, KeystorePerm};
use crate::security_level::KeystoreSecurityLevel;
use crate::utils::{
check_grant_permission, check_key_permission, check_keystore_permission,
- key_parameters_to_authorizations, watchdog as wd, Asp,
+ key_parameters_to_authorizations, uid_to_android_user, watchdog as wd, Asp,
};
use crate::{
database::Uuid,
- globals::{create_thread_local_db, DB, LEGACY_BLOB_LOADER, LEGACY_MIGRATOR},
+ globals::{create_thread_local_db, DB, LEGACY_BLOB_LOADER, LEGACY_IMPORTER, SUPER_KEY},
};
use crate::{database::KEYSTORE_UUID, permission};
use crate::{
@@ -83,12 +83,12 @@ impl KeystoreService {
}
let uuid_by_sec_level = result.uuid_by_sec_level.clone();
- LEGACY_MIGRATOR
+ LEGACY_IMPORTER
.set_init(move || {
(create_thread_local_db(), uuid_by_sec_level, LEGACY_BLOB_LOADER.clone())
})
.context(
- "In KeystoreService::new_native_binder: Trying to initialize the legacy migrator.",
+ "In KeystoreService::new_native_binder: Trying to initialize the legacy importer.",
)?;
Ok(BnKeystoreService::new_binder(
@@ -132,9 +132,12 @@ impl KeystoreService {
fn get_key_entry(&self, key: &KeyDescriptor) -> Result<KeyEntryResponse> {
let caller_uid = ThreadState::get_calling_uid();
+
+ let super_key = SUPER_KEY.get_per_boot_key_by_user_id(uid_to_android_user(caller_uid));
+
let (key_id_guard, mut key_entry) = DB
.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ LEGACY_IMPORTER.with_try_import(&key, caller_uid, super_key, || {
db.borrow_mut().load_key_entry(
&key,
KeyType::Client,
@@ -184,8 +187,10 @@ impl KeystoreService {
certificate_chain: Option<&[u8]>,
) -> Result<()> {
let caller_uid = ThreadState::get_calling_uid();
+ let super_key = SUPER_KEY.get_per_boot_key_by_user_id(uid_to_android_user(caller_uid));
+
DB.with::<_, Result<()>>(|db| {
- let entry = match LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ let entry = match LEGACY_IMPORTER.with_try_import(&key, caller_uid, super_key, || {
db.borrow_mut().load_key_entry(
&key,
KeyType::Client,
@@ -288,7 +293,7 @@ impl KeystoreService {
Ok(()) => {}
};
- let mut result = LEGACY_MIGRATOR
+ let mut result = LEGACY_IMPORTER
.list_uid(k.domain, k.nspace)
.context("In list_entries: Trying to list legacy keys.")?;
@@ -308,8 +313,10 @@ impl KeystoreService {
fn delete_key(&self, key: &KeyDescriptor) -> Result<()> {
let caller_uid = ThreadState::get_calling_uid();
+ let super_key = SUPER_KEY.get_per_boot_key_by_user_id(uid_to_android_user(caller_uid));
+
DB.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ LEGACY_IMPORTER.with_try_import(&key, caller_uid, super_key, || {
db.borrow_mut().unbind_key(&key, KeyType::Client, caller_uid, |k, av| {
check_key_permission(KeyPerm::delete(), k, &av).context("During delete_key.")
})
@@ -326,8 +333,10 @@ impl KeystoreService {
access_vector: permission::KeyPermSet,
) -> Result<KeyDescriptor> {
let caller_uid = ThreadState::get_calling_uid();
+ let super_key = SUPER_KEY.get_per_boot_key_by_user_id(uid_to_android_user(caller_uid));
+
DB.with(|db| {
- LEGACY_MIGRATOR.with_try_migrate(&key, caller_uid, || {
+ LEGACY_IMPORTER.with_try_import(key, caller_uid, super_key, || {
db.borrow_mut().grant(
&key,
caller_uid,
diff --git a/keystore2/src/super_key.rs b/keystore2/src/super_key.rs
index f7e0375e..d2613215 100644
--- a/keystore2/src/super_key.rs
+++ b/keystore2/src/super_key.rs
@@ -26,11 +26,10 @@ use crate::{
error::ResponseCode,
key_parameter::{KeyParameter, KeyParameterValue},
legacy_blob::LegacyBlobLoader,
- legacy_migrator::LegacyMigrator,
+ legacy_importer::LegacyImporter,
raw_device::KeyMintDevice,
try_insert::TryInsert,
- utils::watchdog as wd,
- utils::AID_KEYSTORE,
+ utils::{watchdog as wd, AesGcm, AID_KEYSTORE},
};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
Algorithm::Algorithm, BlockMode::BlockMode, HardwareAuthToken::HardwareAuthToken,
@@ -157,15 +156,22 @@ pub struct SuperKey {
reencrypt_with: Option<Arc<SuperKey>>,
}
-impl SuperKey {
- /// For most purposes `unwrap_key` handles decryption,
- /// but legacy handling and some tests need to assume AES and decrypt directly.
- pub fn aes_gcm_decrypt(&self, data: &[u8], iv: &[u8], tag: &[u8]) -> Result<ZVec> {
+impl AesGcm for SuperKey {
+ fn decrypt(&self, data: &[u8], iv: &[u8], tag: &[u8]) -> Result<ZVec> {
if self.algorithm == SuperEncryptionAlgorithm::Aes256Gcm {
aes_gcm_decrypt(data, iv, tag, &self.key)
- .context("In aes_gcm_decrypt: decryption failed")
+ .context("In SuperKey::decrypt: Decryption failed.")
} else {
- Err(Error::sys()).context("In aes_gcm_decrypt: Key is not an AES key")
+ Err(Error::sys()).context("In SuperKey::decrypt: Key is not an AES key.")
+ }
+ }
+
+ fn encrypt(&self, plaintext: &[u8]) -> Result<(Vec<u8>, Vec<u8>, Vec<u8>)> {
+ if self.algorithm == SuperEncryptionAlgorithm::Aes256Gcm {
+ aes_gcm_encrypt(plaintext, &self.key)
+ .context("In SuperKey::encrypt: Encryption failed.")
+ } else {
+ Err(Error::sys()).context("In SuperKey::encrypt: Key is not an AES key.")
}
}
}
@@ -378,7 +384,15 @@ impl SuperKeyManager {
})
}
- pub fn get_per_boot_key_by_user_id(&self, user_id: UserId) -> Option<Arc<SuperKey>> {
+ pub fn get_per_boot_key_by_user_id(
+ &self,
+ user_id: UserId,
+ ) -> Option<Arc<dyn AesGcm + Send + Sync>> {
+ self.get_per_boot_key_by_user_id_internal(user_id)
+ .map(|sk| -> Arc<dyn AesGcm + Send + Sync> { sk })
+ }
+
+ fn get_per_boot_key_by_user_id_internal(&self, user_id: UserId) -> Option<Arc<SuperKey>> {
let data = self.data.lock().unwrap();
data.user_keys.get(&user_id).and_then(|e| e.per_boot.as_ref().cloned())
}
@@ -457,7 +471,7 @@ impl SuperKeyManager {
match key.algorithm {
SuperEncryptionAlgorithm::Aes256Gcm => match (metadata.iv(), metadata.aead_tag()) {
(Some(iv), Some(tag)) => key
- .aes_gcm_decrypt(blob, iv, tag)
+ .decrypt(blob, iv, tag)
.context("In unwrap_key_with_key: Failed to decrypt the key blob."),
(iv, tag) => Err(Error::Rc(ResponseCode::VALUE_CORRUPTED)).context(format!(
concat!(
@@ -497,7 +511,7 @@ impl SuperKeyManager {
/// Checks if user has setup LSKF, even when super key cache is empty for the user.
pub fn super_key_exists_in_db_for_user(
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
user_id: UserId,
) -> Result<bool> {
let key_in_db = db
@@ -507,7 +521,7 @@ impl SuperKeyManager {
if key_in_db {
Ok(key_in_db)
} else {
- legacy_migrator
+ legacy_importer
.has_super_key(user_id)
.context("In super_key_exists_in_db_for_user: Trying to query legacy db.")
}
@@ -519,13 +533,13 @@ impl SuperKeyManager {
pub fn check_and_unlock_super_key(
&self,
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
user_id: UserId,
pw: &Password,
) -> Result<UserState> {
let alias = &USER_SUPER_KEY;
- let result = legacy_migrator
- .with_try_migrate_super_key(user_id, pw, || db.load_super_key(alias, user_id))
+ let result = legacy_importer
+ .with_try_import_super_key(user_id, pw, || db.load_super_key(alias, user_id))
.context("In check_and_unlock_super_key. Failed to load super key")?;
match result {
@@ -548,12 +562,12 @@ impl SuperKeyManager {
pub fn check_and_initialize_super_key(
&self,
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
user_id: UserId,
pw: Option<&Password>,
) -> Result<UserState> {
let super_key_exists_in_db =
- Self::super_key_exists_in_db_for_user(db, legacy_migrator, user_id).context(
+ Self::super_key_exists_in_db_for_user(db, legacy_importer, user_id).context(
"In check_and_initialize_super_key. Failed to check if super key exists.",
)?;
if super_key_exists_in_db {
@@ -682,11 +696,11 @@ impl SuperKeyManager {
fn super_encrypt_on_key_init(
&self,
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
user_id: UserId,
key_blob: &[u8],
) -> Result<(Vec<u8>, BlobMetaData)> {
- match UserState::get(db, legacy_migrator, self, user_id)
+ match UserState::get(db, legacy_importer, self, user_id)
.context("In super_encrypt. Failed to get user state.")?
{
UserState::LskfUnlocked(super_key) => {
@@ -727,7 +741,7 @@ impl SuperKeyManager {
pub fn handle_super_encryption_on_key_init(
&self,
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
domain: &Domain,
key_parameters: &[KeyParameter],
flags: Option<i32>,
@@ -737,7 +751,7 @@ impl SuperKeyManager {
match Enforcements::super_encryption_required(domain, key_parameters, flags) {
SuperEncryptionType::None => Ok((key_blob.to_vec(), BlobMetaData::new())),
SuperEncryptionType::LskfBound => self
- .super_encrypt_on_key_init(db, legacy_migrator, user_id, &key_blob)
+ .super_encrypt_on_key_init(db, legacy_importer, user_id, &key_blob)
.context(concat!(
"In handle_super_encryption_on_key_init. ",
"Failed to super encrypt with LskfBound key."
@@ -1077,16 +1091,16 @@ pub enum UserState {
impl UserState {
pub fn get(
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
skm: &SuperKeyManager,
user_id: UserId,
) -> Result<UserState> {
- match skm.get_per_boot_key_by_user_id(user_id) {
+ match skm.get_per_boot_key_by_user_id_internal(user_id) {
Some(super_key) => Ok(UserState::LskfUnlocked(super_key)),
None => {
//Check if a super key exists in the database or legacy database.
//If so, return locked user state.
- if SuperKeyManager::super_key_exists_in_db_for_user(db, legacy_migrator, user_id)
+ if SuperKeyManager::super_key_exists_in_db_for_user(db, legacy_importer, user_id)
.context("In get.")?
{
Ok(UserState::LskfLocked)
@@ -1100,17 +1114,17 @@ impl UserState {
/// Queries user state when serving password change requests.
pub fn get_with_password_changed(
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
skm: &SuperKeyManager,
user_id: UserId,
password: Option<&Password>,
) -> Result<UserState> {
- match skm.get_per_boot_key_by_user_id(user_id) {
+ match skm.get_per_boot_key_by_user_id_internal(user_id) {
Some(super_key) => {
if password.is_none() {
//transitioning to swiping, delete only the super key in database and cache, and
//super-encrypted keys in database (and in KM)
- Self::reset_user(db, skm, legacy_migrator, user_id, true).context(
+ Self::reset_user(db, skm, legacy_importer, user_id, true).context(
"In get_with_password_changed: Trying to delete keys from the db.",
)?;
//Lskf is now removed in Keystore
@@ -1126,7 +1140,7 @@ impl UserState {
//If so, return LskfLocked state.
//Otherwise, i) if the password is provided, initialize the super key and return
//LskfUnlocked state ii) if password is not provided, return Uninitialized state.
- skm.check_and_initialize_super_key(db, legacy_migrator, user_id, password)
+ skm.check_and_initialize_super_key(db, legacy_importer, user_id, password)
}
}
}
@@ -1134,12 +1148,12 @@ impl UserState {
/// Queries user state when serving password unlock requests.
pub fn get_with_password_unlock(
db: &mut KeystoreDB,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
skm: &SuperKeyManager,
user_id: UserId,
password: &Password,
) -> Result<UserState> {
- match skm.get_per_boot_key_by_user_id(user_id) {
+ match skm.get_per_boot_key_by_user_id_internal(user_id) {
Some(super_key) => {
log::info!("In get_with_password_unlock. Trying to unlock when already unlocked.");
Ok(UserState::LskfUnlocked(super_key))
@@ -1149,7 +1163,7 @@ impl UserState {
//If not, return Uninitialized state.
//Otherwise, try to unlock the super key and if successful,
//return LskfUnlocked state
- skm.check_and_unlock_super_key(db, legacy_migrator, user_id, password)
+ skm.check_and_unlock_super_key(db, legacy_importer, user_id, password)
.context("In get_with_password_unlock. Failed to unlock super key.")
}
}
@@ -1161,12 +1175,12 @@ impl UserState {
pub fn reset_user(
db: &mut KeystoreDB,
skm: &SuperKeyManager,
- legacy_migrator: &LegacyMigrator,
+ legacy_importer: &LegacyImporter,
user_id: UserId,
keep_non_super_encrypted_keys: bool,
) -> Result<()> {
// mark keys created on behalf of the user as unreferenced.
- legacy_migrator
+ legacy_importer
.bulk_delete_user(user_id, keep_non_super_encrypted_keys)
.context("In reset_user: Trying to delete legacy keys.")?;
db.unbind_keys_for_user(user_id, keep_non_super_encrypted_keys)
diff --git a/keystore2/src/utils.rs b/keystore2/src/utils.rs
index a110c64e..5a42b25c 100644
--- a/keystore2/src/utils.rs
+++ b/keystore2/src/utils.rs
@@ -15,11 +15,13 @@
//! This module implements utility functions used by the Keystore 2.0 service
//! implementation.
-use crate::error::{map_binder_status, Error, ErrorCode};
+use crate::error::{map_binder_status, map_km_error, Error, ErrorCode};
+use crate::key_parameter::KeyParameter;
use crate::permission;
use crate::permission::{KeyPerm, KeyPermSet, KeystorePerm};
use android_hardware_security_keymint::aidl::android::hardware::security::keymint::{
- KeyCharacteristics::KeyCharacteristics, Tag::Tag,
+ IKeyMintDevice::IKeyMintDevice, KeyCharacteristics::KeyCharacteristics,
+ KeyParameter::KeyParameter as KmKeyParameter, Tag::Tag,
};
use android_os_permissions_aidl::aidl::android::os::IPermissionController;
use android_security_apc::aidl::android::security::apc::{
@@ -29,13 +31,15 @@ use android_security_apc::aidl::android::security::apc::{
use android_system_keystore2::aidl::android::system::keystore2::{
Authorization::Authorization, KeyDescriptor::KeyDescriptor,
};
-use anyhow::{anyhow, Context};
+use anyhow::{anyhow, Context, Result};
use binder::{FromIBinder, SpIBinder, ThreadState};
use keystore2_apc_compat::{
ApcCompatUiOptions, APC_COMPAT_ERROR_ABORTED, APC_COMPAT_ERROR_CANCELLED,
APC_COMPAT_ERROR_IGNORED, APC_COMPAT_ERROR_OK, APC_COMPAT_ERROR_OPERATION_PENDING,
APC_COMPAT_ERROR_SYSTEM_ERROR,
};
+use keystore2_crypto::{aes_gcm_decrypt, aes_gcm_encrypt, ZVec};
+use std::iter::IntoIterator;
use std::sync::Mutex;
/// This function uses its namesake in the permission module and in
@@ -165,18 +169,60 @@ impl Clone for Asp {
/// representation of the keystore service.
pub fn key_characteristics_to_internal(
key_characteristics: Vec<KeyCharacteristics>,
-) -> Vec<crate::key_parameter::KeyParameter> {
+) -> Vec<KeyParameter> {
key_characteristics
.into_iter()
.flat_map(|aidl_key_char| {
let sec_level = aidl_key_char.securityLevel;
- aidl_key_char.authorizations.into_iter().map(move |aidl_kp| {
- crate::key_parameter::KeyParameter::new(aidl_kp.into(), sec_level)
- })
+ aidl_key_char
+ .authorizations
+ .into_iter()
+ .map(move |aidl_kp| KeyParameter::new(aidl_kp.into(), sec_level))
})
.collect()
}
+/// This function can be used to upgrade key blobs on demand. The return value of
+/// `km_op` is inspected and if ErrorCode::KEY_REQUIRES_UPGRADE is encountered,
+/// an attempt is made to upgrade the key blob. On success `new_blob_handler` is called
+/// with the upgraded blob as argument. Then `km_op` is called a second time with the
+/// upgraded blob as argument. On success a tuple of the `km_op`s result and the
+/// optional upgraded blob is returned.
+pub fn upgrade_keyblob_if_required_with<T, KmOp, NewBlobHandler>(
+ km_dev: &dyn IKeyMintDevice,
+ key_blob: &[u8],
+ upgrade_params: &[KmKeyParameter],
+ km_op: KmOp,
+ new_blob_handler: NewBlobHandler,
+) -> Result<(T, Option<Vec<u8>>)>
+where
+ KmOp: Fn(&[u8]) -> Result<T, Error>,
+ NewBlobHandler: FnOnce(&[u8]) -> Result<()>,
+{
+ match km_op(key_blob) {
+ Err(Error::Km(ErrorCode::KEY_REQUIRES_UPGRADE)) => {
+ let upgraded_blob = {
+ let _wp = watchdog::watch_millis(
+ "In utils::upgrade_keyblob_if_required_with: calling upgradeKey.",
+ 500,
+ );
+ map_km_error(km_dev.upgradeKey(key_blob, upgrade_params))
+ }
+ .context("In utils::upgrade_keyblob_if_required_with: Upgrade failed.")?;
+
+ new_blob_handler(&upgraded_blob)
+ .context("In utils::upgrade_keyblob_if_required_with: calling new_blob_handler.")?;
+
+ km_op(&upgraded_blob)
+ .map(|v| (v, Some(upgraded_blob)))
+ .context("In utils::upgrade_keyblob_if_required_with: Calling km_op after upgrade.")
+ }
+ r => r
+ .map(|v| (v, None))
+ .context("In utils::upgrade_keyblob_if_required_with: Calling km_op."),
+ }
+}
+
/// Converts a set of key characteristics from the internal representation into a set of
/// Authorizations as they are used to convey key characteristics to the clients of keystore.
pub fn key_parameters_to_authorizations(
@@ -264,6 +310,36 @@ pub mod watchdog {
}
}
+/// Trait implemented by objects that can be used to decrypt cipher text using AES-GCM.
+pub trait AesGcm {
+ /// Deciphers `data` using the initialization vector `iv` and AEAD tag `tag`
+ /// and AES-GCM. The implementation provides the key material and selects
+ /// the implementation variant, e.g., AES128 or AES265.
+ fn decrypt(&self, data: &[u8], iv: &[u8], tag: &[u8]) -> Result<ZVec>;
+
+ /// Encrypts `data` and returns the ciphertext, the initialization vector `iv`
+ /// and AEAD tag `tag`. The implementation provides the key material and selects
+ /// the implementation variant, e.g., AES128 or AES265.
+ fn encrypt(&self, plaintext: &[u8]) -> Result<(Vec<u8>, Vec<u8>, Vec<u8>)>;
+}
+
+/// Marks an object as AES-GCM key.
+pub trait AesGcmKey {
+ /// Provides access to the raw key material.
+ fn key(&self) -> &[u8];
+}
+
+impl<T: AesGcmKey> AesGcm for T {
+ fn decrypt(&self, data: &[u8], iv: &[u8], tag: &[u8]) -> Result<ZVec> {
+ aes_gcm_decrypt(data, iv, tag, self.key())
+ .context("In AesGcm<T>::decrypt: Decryption failed")
+ }
+
+ fn encrypt(&self, plaintext: &[u8]) -> Result<(Vec<u8>, Vec<u8>, Vec<u8>)> {
+ aes_gcm_encrypt(plaintext, self.key()).context("In AesGcm<T>::encrypt: Encryption failed.")
+ }
+}
+
/// This module provides empty/noop implementations of the watch dog utility functions.
#[cfg(not(feature = "watchdog"))]
pub mod watchdog {