diff --git a/Cargo.lock b/Cargo.lock index f86c244203..0674d71293 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3199,9 +3199,9 @@ dependencies = [ [[package]] name = "iced-x86" -version = "1.20.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdd366a53278429c028367e0ba22a46cab6d565a57afb959f06e92c7a69e7828" +checksum = "7c447cff8c7f384a7d4f741cfcff32f75f3ad02b406432e8d6c878d56b1edf6b" dependencies = [ "lazy_static", ] @@ -3547,6 +3547,9 @@ name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +dependencies = [ + "spin 0.5.2", +] [[package]] name = "libc" @@ -3609,6 +3612,15 @@ dependencies = [ "escape8259", ] +[[package]] +name = "linked_list_allocator" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286" +dependencies = [ + "spinning_top", +] + [[package]] name = "linkme" version = "0.3.31" @@ -4769,6 +4781,32 @@ dependencies = [ "thiserror 2.0.0", ] +[[package]] +name = "opentmk" +version = "0.0.0" +dependencies = [ + "anyhow", + "arrayvec", + "bitfield-struct", + "cfg-if", + "hvdef", + "iced-x86", + "lazy_static", + "linked_list_allocator", + "log", + "memory_range", + "minimal_rt", + "minimal_rt_build", + "serde", + "serde_json", + "sync_nostd", + "thiserror 2.0.0", + "uefi", + "x86_64", + "x86defs", + "zerocopy 0.8.14", +] + [[package]] name = "openvmm" version = "0.0.0" @@ -6360,6 +6398,30 @@ dependencies = [ "zerocopy 0.8.14", ] +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spinning_top" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b9eb1a2f4c41445a3a0ff9abc5221c5fcd28e1f13cd7c0397706f9ac938ddb0" +dependencies = [ + "lock_api", +] + [[package]] name = "stackfuture" version = "0.3.0" @@ -6481,6 +6543,13 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_nostd" +version = "0.1.0" +dependencies = [ + "spin 0.10.0", +] + [[package]] name = "tap" version = "1.0.1" @@ -8736,6 +8805,12 @@ dependencies = [ "vmsocket", ] +[[package]] +name = "volatile" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "442887c63f2c839b346c192d047a7c87e73d0689c9157b00b53dcc27dd5ea793" + [[package]] name = "vpci" version = "0.0.0" @@ -9313,6 +9388,18 @@ dependencies = [ "tap", ] +[[package]] +name = "x86_64" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f042214de98141e9c8706e8192b73f56494087cc55ebec28ce10f26c5c364ae" +dependencies = [ + "bit_field", + "bitflags 2.6.0", + "rustversion", + "volatile", +] + [[package]] name = "x86defs" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index 1dd0ca13fb..ed1823f711 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,9 @@ members = [ "vm/loader/igvmfilegen", "vm/vmgs/vmgs_lib", "vm/vmgs/vmgstool", + # opentmk + "opentmk/opentmk", + "opentmk/sync" ] exclude = [ "xsync", @@ -374,6 +377,9 @@ vnc_worker_defs = { path = "workers/vnc_worker_defs" } vnc = { path = "workers/vnc_worker/vnc" } profiler_worker = { path = "openhcl/profiler_worker" } +# opentmk +sync_nostd = { path = "opentmk/sync"} + # crates.io anyhow = "1.0" arbitrary = "1.3" @@ -445,9 +451,11 @@ jiff = "0.1" kvm-bindings = "0.7" # Use of these specific REPO will go away when changes are taken upstream. landlock = "0.3.1" +lazy_static = { version = "1.4.0", features = ["spin_no_std"] } libc = "0.2" libfuzzer-sys = "0.4" libtest-mimic = "0.8" +linked_list_allocator = "0.10.5" linkme = "0.3.9" log = "0.4" macaddr = "1.0" @@ -493,6 +501,7 @@ smallbox = "0.8" smallvec = "1.8" smoltcp = { version = "0.8", default-features = false } socket2 = "0.5" +spin = "0.10.0" stackfuture = "0.3" static_assertions = "1.1" syn = "2" @@ -520,6 +529,7 @@ winapi = "0.3" windows = "0.59" windows-service = "0.7" windows-sys = "0.52" +x86_64 = "0.15.2" xshell = "=0.2.2" # pin to 0.2.2 to work around https://github.com/matklad/xshell/issues/63 xshell-macros = "0.2" # We add the derive feature here since the vast majority of our crates use it. diff --git a/openhcl/minimal_rt/src/arch/x86_64/hypercall.rs b/openhcl/minimal_rt/src/arch/x86_64/hypercall.rs index 9b87231e4f..6ad9fcfcbd 100644 --- a/openhcl/minimal_rt/src/arch/x86_64/hypercall.rs +++ b/openhcl/minimal_rt/src/arch/x86_64/hypercall.rs @@ -29,6 +29,7 @@ HYPERCALL_PAGE: /// input/output pages are not being concurrently used elsewhere. For fast /// hypercalls, the caller must ensure that there are no output words so that /// there is no register corruption. +#[inline(never)] pub unsafe fn invoke_hypercall( control: hvdef::hypercall::Control, input_gpa_or_fast1: u64, diff --git a/opentmk/opentmk/Cargo.toml b/opentmk/opentmk/Cargo.toml new file mode 100644 index 0000000000..c120d08e84 --- /dev/null +++ b/opentmk/opentmk/Cargo.toml @@ -0,0 +1,53 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +[package] +name = "opentmk" +edition.workspace = true +rust-version.workspace = true + +[dependencies] +anyhow = {version = "1.0", default-features = false} +arrayvec.workspace = true +bitfield-struct.workspace = true +cfg-if.workspace = true +hvdef = {workspace = true} +lazy_static.workspace = true +linked_list_allocator.workspace = true +log.workspace = true +memory_range.workspace = true +minimal_rt.workspace = true +serde = { version = "1.0", default-features = false, features = ["derive"]} +serde_json = { version = "1.0", default-features = false, features = ["alloc"] } +thiserror.workspace = true +uefi = { workspace = true, features = ["alloc"] } +x86_64.workspace = true +x86defs.workspace = true +zerocopy.workspace = true +sync_nostd.workspace = true + + +[dependencies.iced-x86] +version = "1.21.0" +default-features = false +# See below for all features +features = ["decoder", "nasm", "no_std"] + + +[lints] +workspace = true + +[build-dependencies] +minimal_rt_build.workspace = true + + +[profile.release] +opt-level = 0 # No optimizations (same as debug) +debug = true # Include debug symbols +debug-assertions = true # Enable debug assertions +overflow-checks = true # Enable integer overflow checks +ltso = "offz" # Disable link-time optrimization +codegen-units = 256 # Use more codegen units (faster compilation) + +[profile.release.package."*"] +opt-level = 0 diff --git a/opentmk/opentmk/README.md b/opentmk/opentmk/README.md new file mode 100644 index 0000000000..a2658e8753 --- /dev/null +++ b/opentmk/opentmk/README.md @@ -0,0 +1,3 @@ +# OpenTMK + +See the guide for more info on how to build/run the code in this crate. diff --git a/opentmk/opentmk/build_deploy.sh b/opentmk/opentmk/build_deploy.sh new file mode 100755 index 0000000000..8a23bf08fd --- /dev/null +++ b/opentmk/opentmk/build_deploy.sh @@ -0,0 +1,4 @@ +RUST_BACKTRACE=1 CARGO_PROFILE_RELEASE_force_frame_pointers=yes cargo +nightly-2025-05-09 build -p opentmk --target x86_64-unknown-uefi --release #--target-dir ./target/x86_64-unknown-uefi/debug +cargo xtask guest-test uefi --bootx64 ~/projects-local/openvmm/target/x86_64-unknown-uefi/release/opentmk.efi +qemu-img convert -f raw -O vhdx ~/projects-local/openvmm/target/x86_64-unknown-uefi/release/opentmk.img ~/projects/opentmk.vhdx +#CARGO_PROFILE_RELEASE_OPT_LEVEL=0 \ No newline at end of file diff --git a/opentmk/opentmk/rustfmt.toml b/opentmk/opentmk/rustfmt.toml new file mode 100644 index 0000000000..a451c50644 --- /dev/null +++ b/opentmk/opentmk/rustfmt.toml @@ -0,0 +1,4 @@ +imports_granularity = "Item" # Expands `use foo::{bar, baz};` into separate `use` lines +reorder_imports = true # Optional: sort imports +normalize_imports = true # Optional: standardize style (e.g., remove leading ::) +group_imports = "StdExternalCrate" # Optional: group std/external/local with blank lines diff --git a/opentmk/opentmk/src/arch/aarch64/hypercall.rs b/opentmk/opentmk/src/arch/aarch64/hypercall.rs new file mode 100644 index 0000000000..35011e089a --- /dev/null +++ b/opentmk/opentmk/src/arch/aarch64/hypercall.rs @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +/// Writes a synthehtic register to tell the hypervisor the OS ID for the boot shim. +fn report_os_id(guest_os_id: u64) { + // On ARM64, to be able to make hypercalls, one needs first to set the Guest OS ID + // synthetic register using a hypercall. Can't use `Hvcall::set_register` at that will + // lead to the infinite recursion as that function will first try initializing hypercalls + // with setting a register. + // + // Only one very specific HvSetVpRegisters hypercall is allowed to set the Guest OS ID + // (this is TLFS section 17.4.4.1.1 and 5.3), and that must be the fast hypercall. + let _ = minimal_rt::arch::hypercall::set_register_fast( + hvdef::HvArm64RegisterName::GuestOsId.into(), + guest_os_id.into(), + ); +} + +pub(crate) fn initialize(guest_os_id: u64) { + // We are assuming we are running under a Microsoft hypervisor. + report_os_id(guest_os_id); +} + +/// Call before jumping to kernel. +pub(crate) fn uninitialize() { + report_os_id(0); +} \ No newline at end of file diff --git a/opentmk/opentmk/src/arch/aarch64/mod.rs b/opentmk/opentmk/src/arch/aarch64/mod.rs new file mode 100644 index 0000000000..c9ab11a58c --- /dev/null +++ b/opentmk/opentmk/src/arch/aarch64/mod.rs @@ -0,0 +1,3 @@ +pub use minimal_rt::arch::aarch64::serial; + +pub mod hypercall; diff --git a/opentmk/opentmk/src/arch/mod.rs b/opentmk/opentmk/src/arch/mod.rs new file mode 100644 index 0000000000..1d578ebeb1 --- /dev/null +++ b/opentmk/opentmk/src/arch/mod.rs @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Imports and re-exports architecture-specific implementations. + +mod x86_64; + +cfg_if::cfg_if!( + if #[cfg(target_arch = "x86_64")] { + pub use x86_64::*; + } else if #[cfg(target_arch = "aarch64")] { + pub use aarch64::*; + } else { + compile_error!("target_arch is not supported"); + } +); diff --git a/opentmk/opentmk/src/arch/x86_64/hypercall.rs b/opentmk/opentmk/src/arch/x86_64/hypercall.rs new file mode 100644 index 0000000000..3fee1370cf --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/hypercall.rs @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +#![expect(unsafe_code)] + +use core::ptr::addr_of; + +use hvdef::HV_PAGE_SIZE; +use minimal_rt::arch::hypercall::HYPERCALL_PAGE; +use minimal_rt::arch::msr::read_msr; +use minimal_rt::arch::msr::write_msr; + +/// Writes an MSR to tell the hypervisor the OS ID for the boot shim. +fn report_os_id(guest_os_id: u64) { + // SAFETY: Using the contract established in the Hyper-V TLFS. + unsafe { + write_msr(hvdef::HV_X64_MSR_GUEST_OS_ID, guest_os_id); + }; +} + +/// Writes an MSR to tell the hypervisor where the hypercall page is +pub fn write_hypercall_msr(enable: bool) { + // SAFETY: Using the contract established in the Hyper-V TLFS. + let hypercall_contents = hvdef::hypercall::MsrHypercallContents::from(unsafe { + read_msr(hvdef::HV_X64_MSR_HYPERCALL) + }); + + let hypercall_page_num = addr_of!(HYPERCALL_PAGE) as u64 / HV_PAGE_SIZE; + + if !(!enable || !hypercall_contents.enable()) { + return; + } + let new_hv_contents: hvdef::hypercall::MsrHypercallContents = hypercall_contents + .with_enable(enable) + .with_gpn(if enable { hypercall_page_num } else { 0 }); + + // SAFETY: Using the contract established in the Hyper-V TLFS. + unsafe { write_msr(hvdef::HV_X64_MSR_HYPERCALL, new_hv_contents.into()) }; +} + +/// Has to be called before using hypercalls. +pub fn initialize(guest_os_id: u64) { + // We are assuming we are running under a Microsoft hypervisor, so there is + // no need to check any cpuid leaves. + report_os_id(guest_os_id); + write_hypercall_msr(true); +} + +/// Call before jumping to kernel. +pub fn uninitialize() { + write_hypercall_msr(false); + report_os_id(0); +} diff --git a/opentmk/opentmk/src/arch/x86_64/interrupt.rs b/opentmk/opentmk/src/arch/x86_64/interrupt.rs new file mode 100644 index 0000000000..800c13ad5d --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/interrupt.rs @@ -0,0 +1,52 @@ +use lazy_static::lazy_static; +use sync_nostd::Mutex; +use x86_64::structures::idt::InterruptDescriptorTable; +use x86_64::structures::idt::InterruptStackFrame; + +use super::interrupt_handler_register::register_interrupt_handler; +use super::interrupt_handler_register::set_common_handler; + +lazy_static! { + static ref IDT: InterruptDescriptorTable = { + let mut idt = InterruptDescriptorTable::new(); + register_interrupt_handler(&mut idt); + idt.double_fault.set_handler_fn(handler_double_fault); + idt + }; +} + +static mut HANDLERS: [fn(); 256] = [no_op; 256]; +static MUTEX: Mutex<()> = Mutex::new(()); +fn no_op() {} + +fn common_handler(_stack_frame: InterruptStackFrame, interrupt: u8) { + unsafe { + HANDLERS[interrupt as usize](); + } +} + +pub fn set_handler(interrupt: u8, handler: fn()) { + let _lock = MUTEX.lock(); + unsafe { + HANDLERS[interrupt as usize] = handler; + } +} + +extern "x86-interrupt" fn handler_double_fault( + stack_frame: InterruptStackFrame, + _error_code: u64, +) -> ! { + log::error!( + "EXCEPTION:\n\tERROR_CODE: {}\n\tDOUBLE FAULT\n{:#?}", + _error_code, + stack_frame + ); + loop {} +} + +// Initialize the IDT +pub fn init() { + IDT.load(); + set_common_handler(common_handler); + x86_64::instructions::interrupts::enable(); +} diff --git a/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs b/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs new file mode 100644 index 0000000000..6f37c3f6d6 --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/interrupt_handler_register.rs @@ -0,0 +1,591 @@ +#![allow(dead_code)] +use sync_nostd::Mutex; +use x86_64::structures::idt::InterruptDescriptorTable; +use x86_64::structures::idt::InterruptStackFrame; +use x86_64::structures::idt::PageFaultErrorCode; +static mut COMMON_HANDLER: fn(InterruptStackFrame, u8) = common_handler; +static COMMON_HANDLER_MUTEX: Mutex<()> = Mutex::new(()); + +#[unsafe(no_mangle)] +fn abstraction_handle(stack_frame: InterruptStackFrame, interrupt: u8) { + unsafe { (COMMON_HANDLER)(stack_frame, interrupt) }; + log::debug!("Interrupt: {}", interrupt); +} + +macro_rules! create_fn { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame) { + abstraction_handle(stack_frame, $i); + } + }; +} + +macro_rules! create_fn_create_with_errorcode { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame, _error_code: u64) { + abstraction_handle(stack_frame, $i); + } + }; +} + +macro_rules! create_fn_divergent_create_with_errorcode { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame, _error_code: u64) -> ! { + abstraction_handle(stack_frame, $i); + loop {} + } + }; +} + +macro_rules! create_fn_divergent_create { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name(stack_frame: InterruptStackFrame) -> ! { + abstraction_handle(stack_frame, $i); + loop {} + } + }; +} + +static mut BACKUP_RSP: u64 = 0; + +macro_rules! create_page_fault_fn { + ($name:ident, $i: expr) => { + extern "x86-interrupt" fn $name( + stack_frame: InterruptStackFrame, + _error_code: PageFaultErrorCode, + ) { + abstraction_handle(stack_frame, $i); + } + }; +} + +macro_rules! register_interrupt_handler { + ($idt: expr, $i: expr, $name: ident) => { + $idt[$i].set_handler_fn($name); + }; +} + +fn common_handler(_stack_frame: InterruptStackFrame, interrupt: u8) { + log::info!("Default interrupt handler fired: {}", interrupt); +} + +pub fn set_common_handler(handler: fn(InterruptStackFrame, u8)) { + let _guard = COMMON_HANDLER_MUTEX.lock(); + unsafe { + COMMON_HANDLER = handler; + } +} + +extern "x86-interrupt" fn no_op(_stack_frame: InterruptStackFrame) {} + +pub fn register_interrupt_handler(idt: &mut InterruptDescriptorTable) { + idt.divide_error.set_handler_fn(handler_0); + idt.debug.set_handler_fn(handler_1); + idt.non_maskable_interrupt.set_handler_fn(handler_2); + idt.breakpoint.set_handler_fn(handler_3); + idt.overflow.set_handler_fn(handler_4); + idt.bound_range_exceeded.set_handler_fn(handler_5); + idt.invalid_opcode.set_handler_fn(handler_6); + idt.device_not_available.set_handler_fn(handler_7); + idt.double_fault.set_handler_fn(handler_8); + register_interrupt_handler!(idt, 9, handler_9); + idt.invalid_tss.set_handler_fn(handler_10); + idt.segment_not_present.set_handler_fn(handler_11); + idt.stack_segment_fault.set_handler_fn(handler_12); + idt.general_protection_fault.set_handler_fn(handler_13); + idt.page_fault.set_handler_fn(handler_14); + // Vector 15 is reserved + idt.x87_floating_point.set_handler_fn(handler_16); + idt.alignment_check.set_handler_fn(handler_17); + idt.machine_check.set_handler_fn(handler_18); + idt.simd_floating_point.set_handler_fn(handler_19); + idt.virtualization.set_handler_fn(handler_20); + idt.cp_protection_exception.set_handler_fn(handler_21); + // Vector 22-27 is reserved + idt.hv_injection_exception.set_handler_fn(handler_28); + idt.vmm_communication_exception.set_handler_fn(handler_29); + idt.security_exception.set_handler_fn(handler_30); + // Vector 31 is reserved + + register_interrupt_handler!(idt, 32, handler_32); + register_interrupt_handler!(idt, 33, handler_33); + register_interrupt_handler!(idt, 34, handler_34); + register_interrupt_handler!(idt, 35, handler_35); + register_interrupt_handler!(idt, 36, handler_36); + register_interrupt_handler!(idt, 37, handler_37); + register_interrupt_handler!(idt, 38, handler_38); + register_interrupt_handler!(idt, 39, handler_39); + register_interrupt_handler!(idt, 40, handler_40); + register_interrupt_handler!(idt, 41, handler_41); + register_interrupt_handler!(idt, 42, handler_42); + register_interrupt_handler!(idt, 43, handler_43); + register_interrupt_handler!(idt, 44, handler_44); + register_interrupt_handler!(idt, 45, handler_45); + register_interrupt_handler!(idt, 46, handler_46); + register_interrupt_handler!(idt, 47, handler_47); + register_interrupt_handler!(idt, 48, handler_48); + register_interrupt_handler!(idt, 49, handler_49); + register_interrupt_handler!(idt, 50, handler_50); + register_interrupt_handler!(idt, 51, handler_51); + register_interrupt_handler!(idt, 52, handler_52); + register_interrupt_handler!(idt, 53, handler_53); + register_interrupt_handler!(idt, 54, handler_54); + register_interrupt_handler!(idt, 55, handler_55); + register_interrupt_handler!(idt, 56, handler_56); + register_interrupt_handler!(idt, 57, handler_57); + register_interrupt_handler!(idt, 58, handler_58); + register_interrupt_handler!(idt, 59, handler_59); + register_interrupt_handler!(idt, 60, handler_60); + register_interrupt_handler!(idt, 61, handler_61); + register_interrupt_handler!(idt, 62, handler_62); + register_interrupt_handler!(idt, 63, handler_63); + register_interrupt_handler!(idt, 64, handler_64); + register_interrupt_handler!(idt, 65, handler_65); + register_interrupt_handler!(idt, 66, handler_66); + register_interrupt_handler!(idt, 67, handler_67); + register_interrupt_handler!(idt, 68, handler_68); + register_interrupt_handler!(idt, 69, handler_69); + register_interrupt_handler!(idt, 70, handler_70); + register_interrupt_handler!(idt, 71, handler_71); + register_interrupt_handler!(idt, 72, handler_72); + register_interrupt_handler!(idt, 73, handler_73); + register_interrupt_handler!(idt, 74, handler_74); + register_interrupt_handler!(idt, 75, handler_75); + register_interrupt_handler!(idt, 76, handler_76); + register_interrupt_handler!(idt, 77, handler_77); + register_interrupt_handler!(idt, 78, handler_78); + register_interrupt_handler!(idt, 79, handler_79); + register_interrupt_handler!(idt, 80, handler_80); + register_interrupt_handler!(idt, 81, handler_81); + register_interrupt_handler!(idt, 82, handler_82); + register_interrupt_handler!(idt, 83, handler_83); + register_interrupt_handler!(idt, 84, handler_84); + register_interrupt_handler!(idt, 85, handler_85); + register_interrupt_handler!(idt, 86, handler_86); + register_interrupt_handler!(idt, 87, handler_87); + register_interrupt_handler!(idt, 88, handler_88); + register_interrupt_handler!(idt, 89, handler_89); + register_interrupt_handler!(idt, 90, handler_90); + register_interrupt_handler!(idt, 91, handler_91); + register_interrupt_handler!(idt, 92, handler_92); + register_interrupt_handler!(idt, 93, handler_93); + register_interrupt_handler!(idt, 94, handler_94); + register_interrupt_handler!(idt, 95, handler_95); + register_interrupt_handler!(idt, 96, handler_96); + register_interrupt_handler!(idt, 97, handler_97); + register_interrupt_handler!(idt, 98, handler_98); + register_interrupt_handler!(idt, 99, handler_99); + register_interrupt_handler!(idt, 100, handler_100); + register_interrupt_handler!(idt, 101, handler_101); + register_interrupt_handler!(idt, 102, handler_102); + register_interrupt_handler!(idt, 103, handler_103); + register_interrupt_handler!(idt, 104, handler_104); + register_interrupt_handler!(idt, 105, handler_105); + register_interrupt_handler!(idt, 106, handler_106); + register_interrupt_handler!(idt, 107, handler_107); + register_interrupt_handler!(idt, 108, handler_108); + register_interrupt_handler!(idt, 109, handler_109); + register_interrupt_handler!(idt, 110, handler_110); + register_interrupt_handler!(idt, 111, handler_111); + register_interrupt_handler!(idt, 112, handler_112); + register_interrupt_handler!(idt, 113, handler_113); + register_interrupt_handler!(idt, 114, handler_114); + register_interrupt_handler!(idt, 115, handler_115); + register_interrupt_handler!(idt, 116, handler_116); + register_interrupt_handler!(idt, 117, handler_117); + register_interrupt_handler!(idt, 118, handler_118); + register_interrupt_handler!(idt, 119, handler_119); + register_interrupt_handler!(idt, 120, handler_120); + register_interrupt_handler!(idt, 121, handler_121); + register_interrupt_handler!(idt, 122, handler_122); + register_interrupt_handler!(idt, 123, handler_123); + register_interrupt_handler!(idt, 124, handler_124); + register_interrupt_handler!(idt, 125, handler_125); + register_interrupt_handler!(idt, 126, handler_126); + register_interrupt_handler!(idt, 127, handler_127); + register_interrupt_handler!(idt, 128, handler_128); + register_interrupt_handler!(idt, 129, handler_129); + register_interrupt_handler!(idt, 130, handler_130); + register_interrupt_handler!(idt, 131, handler_131); + register_interrupt_handler!(idt, 132, handler_132); + register_interrupt_handler!(idt, 133, handler_133); + register_interrupt_handler!(idt, 134, handler_134); + register_interrupt_handler!(idt, 135, handler_135); + register_interrupt_handler!(idt, 136, handler_136); + register_interrupt_handler!(idt, 137, handler_137); + register_interrupt_handler!(idt, 138, handler_138); + register_interrupt_handler!(idt, 139, handler_139); + register_interrupt_handler!(idt, 140, handler_140); + register_interrupt_handler!(idt, 141, handler_141); + register_interrupt_handler!(idt, 142, handler_142); + register_interrupt_handler!(idt, 143, handler_143); + register_interrupt_handler!(idt, 144, handler_144); + register_interrupt_handler!(idt, 145, handler_145); + register_interrupt_handler!(idt, 146, handler_146); + register_interrupt_handler!(idt, 147, handler_147); + register_interrupt_handler!(idt, 148, handler_148); + register_interrupt_handler!(idt, 149, handler_149); + register_interrupt_handler!(idt, 150, handler_150); + register_interrupt_handler!(idt, 151, handler_151); + register_interrupt_handler!(idt, 152, handler_152); + register_interrupt_handler!(idt, 153, handler_153); + register_interrupt_handler!(idt, 154, handler_154); + register_interrupt_handler!(idt, 155, handler_155); + register_interrupt_handler!(idt, 156, handler_156); + register_interrupt_handler!(idt, 157, handler_157); + register_interrupt_handler!(idt, 158, handler_158); + register_interrupt_handler!(idt, 159, handler_159); + register_interrupt_handler!(idt, 160, handler_160); + register_interrupt_handler!(idt, 161, handler_161); + register_interrupt_handler!(idt, 162, handler_162); + register_interrupt_handler!(idt, 163, handler_163); + register_interrupt_handler!(idt, 164, handler_164); + register_interrupt_handler!(idt, 165, handler_165); + register_interrupt_handler!(idt, 166, handler_166); + register_interrupt_handler!(idt, 167, handler_167); + register_interrupt_handler!(idt, 168, handler_168); + register_interrupt_handler!(idt, 169, handler_169); + register_interrupt_handler!(idt, 170, handler_170); + register_interrupt_handler!(idt, 171, handler_171); + register_interrupt_handler!(idt, 172, handler_172); + register_interrupt_handler!(idt, 173, handler_173); + register_interrupt_handler!(idt, 174, handler_174); + register_interrupt_handler!(idt, 175, handler_175); + register_interrupt_handler!(idt, 176, handler_176); + register_interrupt_handler!(idt, 177, handler_177); + register_interrupt_handler!(idt, 178, handler_178); + register_interrupt_handler!(idt, 179, handler_179); + register_interrupt_handler!(idt, 180, handler_180); + register_interrupt_handler!(idt, 181, handler_181); + register_interrupt_handler!(idt, 182, handler_182); + register_interrupt_handler!(idt, 183, handler_183); + register_interrupt_handler!(idt, 184, handler_184); + register_interrupt_handler!(idt, 185, handler_185); + register_interrupt_handler!(idt, 186, handler_186); + register_interrupt_handler!(idt, 187, handler_187); + register_interrupt_handler!(idt, 188, handler_188); + register_interrupt_handler!(idt, 189, handler_189); + register_interrupt_handler!(idt, 190, handler_190); + register_interrupt_handler!(idt, 191, handler_191); + register_interrupt_handler!(idt, 192, handler_192); + register_interrupt_handler!(idt, 193, handler_193); + register_interrupt_handler!(idt, 194, handler_194); + register_interrupt_handler!(idt, 195, handler_195); + register_interrupt_handler!(idt, 196, handler_196); + register_interrupt_handler!(idt, 197, handler_197); + register_interrupt_handler!(idt, 198, handler_198); + register_interrupt_handler!(idt, 199, handler_199); + register_interrupt_handler!(idt, 200, handler_200); + register_interrupt_handler!(idt, 201, handler_201); + register_interrupt_handler!(idt, 202, handler_202); + register_interrupt_handler!(idt, 203, handler_203); + register_interrupt_handler!(idt, 204, handler_204); + register_interrupt_handler!(idt, 205, handler_205); + register_interrupt_handler!(idt, 206, handler_206); + register_interrupt_handler!(idt, 207, handler_207); + register_interrupt_handler!(idt, 208, handler_208); + register_interrupt_handler!(idt, 209, handler_209); + register_interrupt_handler!(idt, 210, handler_210); + register_interrupt_handler!(idt, 211, handler_211); + register_interrupt_handler!(idt, 212, handler_212); + register_interrupt_handler!(idt, 213, handler_213); + register_interrupt_handler!(idt, 214, handler_214); + register_interrupt_handler!(idt, 215, handler_215); + register_interrupt_handler!(idt, 216, handler_216); + register_interrupt_handler!(idt, 217, handler_217); + register_interrupt_handler!(idt, 218, handler_218); + register_interrupt_handler!(idt, 219, handler_219); + register_interrupt_handler!(idt, 220, handler_220); + register_interrupt_handler!(idt, 221, handler_221); + register_interrupt_handler!(idt, 222, handler_222); + register_interrupt_handler!(idt, 223, handler_223); + register_interrupt_handler!(idt, 224, handler_224); + register_interrupt_handler!(idt, 225, handler_225); + register_interrupt_handler!(idt, 226, handler_226); + register_interrupt_handler!(idt, 227, handler_227); + register_interrupt_handler!(idt, 228, handler_228); + register_interrupt_handler!(idt, 229, handler_229); + register_interrupt_handler!(idt, 230, handler_230); + register_interrupt_handler!(idt, 231, handler_231); + register_interrupt_handler!(idt, 232, handler_232); + register_interrupt_handler!(idt, 233, handler_233); + register_interrupt_handler!(idt, 234, handler_234); + register_interrupt_handler!(idt, 235, handler_235); + register_interrupt_handler!(idt, 236, handler_236); + register_interrupt_handler!(idt, 237, handler_237); + register_interrupt_handler!(idt, 238, handler_238); + register_interrupt_handler!(idt, 239, handler_239); + register_interrupt_handler!(idt, 240, handler_240); + register_interrupt_handler!(idt, 241, handler_241); + register_interrupt_handler!(idt, 242, handler_242); + register_interrupt_handler!(idt, 243, handler_243); + register_interrupt_handler!(idt, 244, handler_244); + register_interrupt_handler!(idt, 245, handler_245); + register_interrupt_handler!(idt, 246, handler_246); + register_interrupt_handler!(idt, 247, handler_247); + register_interrupt_handler!(idt, 248, handler_248); + register_interrupt_handler!(idt, 249, handler_249); + register_interrupt_handler!(idt, 250, handler_250); + register_interrupt_handler!(idt, 251, handler_251); + register_interrupt_handler!(idt, 252, handler_252); + register_interrupt_handler!(idt, 253, handler_253); + register_interrupt_handler!(idt, 254, handler_254); + register_interrupt_handler!(idt, 255, handler_255); +} + +create_fn!(handler_0, 0); +create_fn!(handler_1, 1); +create_fn!(handler_2, 2); +create_fn!(handler_3, 3); +create_fn!(handler_4, 4); +create_fn!(handler_5, 5); +create_fn!(handler_6, 6); +create_fn!(handler_7, 7); +create_fn_divergent_create_with_errorcode!(handler_8, 8); +create_fn!(handler_9, 9); +create_fn_create_with_errorcode!(handler_10, 10); +create_fn_create_with_errorcode!(handler_11, 11); +create_fn_create_with_errorcode!(handler_12, 12); +create_fn_create_with_errorcode!(handler_13, 13); +create_page_fault_fn!(handler_14, 14); +create_fn!(handler_15, 15); +create_fn!(handler_16, 16); +create_fn_create_with_errorcode!(handler_17, 17); +create_fn_divergent_create!(handler_18, 18); +create_fn!(handler_19, 19); +create_fn!(handler_20, 20); +create_fn_create_with_errorcode!(handler_21, 21); +create_fn!(handler_22, 22); +create_fn!(handler_23, 23); +create_fn!(handler_24, 24); +create_fn!(handler_25, 25); +create_fn!(handler_26, 26); +create_fn!(handler_27, 27); +create_fn!(handler_28, 28); +create_fn_create_with_errorcode!(handler_29, 29); +create_fn_create_with_errorcode!(handler_30, 30); +create_fn!(handler_31, 31); +create_fn!(handler_32, 32); +create_fn!(handler_33, 33); +create_fn!(handler_34, 34); +create_fn!(handler_35, 35); +create_fn!(handler_36, 36); +create_fn!(handler_37, 37); +create_fn!(handler_38, 38); +create_fn!(handler_39, 39); +create_fn!(handler_40, 40); +create_fn!(handler_41, 41); +create_fn!(handler_42, 42); +create_fn!(handler_43, 43); +create_fn!(handler_44, 44); +create_fn!(handler_45, 45); +create_fn!(handler_46, 46); +create_fn!(handler_47, 47); +create_fn!(handler_48, 48); +create_fn!(handler_49, 49); +create_fn!(handler_50, 50); +create_fn!(handler_51, 51); +create_fn!(handler_52, 52); +create_fn!(handler_53, 53); +create_fn!(handler_54, 54); +create_fn!(handler_55, 55); +create_fn!(handler_56, 56); +create_fn!(handler_57, 57); +create_fn!(handler_58, 58); +create_fn!(handler_59, 59); +create_fn!(handler_60, 60); +create_fn!(handler_61, 61); +create_fn!(handler_62, 62); +create_fn!(handler_63, 63); +create_fn!(handler_64, 64); +create_fn!(handler_65, 65); +create_fn!(handler_66, 66); +create_fn!(handler_67, 67); +create_fn!(handler_68, 68); +create_fn!(handler_69, 69); +create_fn!(handler_70, 70); +create_fn!(handler_71, 71); +create_fn!(handler_72, 72); +create_fn!(handler_73, 73); +create_fn!(handler_74, 74); +create_fn!(handler_75, 75); +create_fn!(handler_76, 76); +create_fn!(handler_77, 77); +create_fn!(handler_78, 78); +create_fn!(handler_79, 79); +create_fn!(handler_80, 80); +create_fn!(handler_81, 81); +create_fn!(handler_82, 82); +create_fn!(handler_83, 83); +create_fn!(handler_84, 84); +create_fn!(handler_85, 85); +create_fn!(handler_86, 86); +create_fn!(handler_87, 87); +create_fn!(handler_88, 88); +create_fn!(handler_89, 89); +create_fn!(handler_90, 90); +create_fn!(handler_91, 91); +create_fn!(handler_92, 92); +create_fn!(handler_93, 93); +create_fn!(handler_94, 94); +create_fn!(handler_95, 95); +create_fn!(handler_96, 96); +create_fn!(handler_97, 97); +create_fn!(handler_98, 98); +create_fn!(handler_99, 99); +create_fn!(handler_100, 100); +create_fn!(handler_101, 101); +create_fn!(handler_102, 102); +create_fn!(handler_103, 103); +create_fn!(handler_104, 104); +create_fn!(handler_105, 105); +create_fn!(handler_106, 106); +create_fn!(handler_107, 107); +create_fn!(handler_108, 108); +create_fn!(handler_109, 109); +create_fn!(handler_110, 110); +create_fn!(handler_111, 111); +create_fn!(handler_112, 112); +create_fn!(handler_113, 113); +create_fn!(handler_114, 114); +create_fn!(handler_115, 115); +create_fn!(handler_116, 116); +create_fn!(handler_117, 117); +create_fn!(handler_118, 118); +create_fn!(handler_119, 119); +create_fn!(handler_120, 120); +create_fn!(handler_121, 121); +create_fn!(handler_122, 122); +create_fn!(handler_123, 123); +create_fn!(handler_124, 124); +create_fn!(handler_125, 125); +create_fn!(handler_126, 126); +create_fn!(handler_127, 127); +create_fn!(handler_128, 128); +create_fn!(handler_129, 129); +create_fn!(handler_130, 130); +create_fn!(handler_131, 131); +create_fn!(handler_132, 132); +create_fn!(handler_133, 133); +create_fn!(handler_134, 134); +create_fn!(handler_135, 135); +create_fn!(handler_136, 136); +create_fn!(handler_137, 137); +create_fn!(handler_138, 138); +create_fn!(handler_139, 139); +create_fn!(handler_140, 140); +create_fn!(handler_141, 141); +create_fn!(handler_142, 142); +create_fn!(handler_143, 143); +create_fn!(handler_144, 144); +create_fn!(handler_145, 145); +create_fn!(handler_146, 146); +create_fn!(handler_147, 147); +create_fn!(handler_148, 148); +create_fn!(handler_149, 149); +create_fn!(handler_150, 150); +create_fn!(handler_151, 151); +create_fn!(handler_152, 152); +create_fn!(handler_153, 153); +create_fn!(handler_154, 154); +create_fn!(handler_155, 155); +create_fn!(handler_156, 156); +create_fn!(handler_157, 157); +create_fn!(handler_158, 158); +create_fn!(handler_159, 159); +create_fn!(handler_160, 160); +create_fn!(handler_161, 161); +create_fn!(handler_162, 162); +create_fn!(handler_163, 163); +create_fn!(handler_164, 164); +create_fn!(handler_165, 165); +create_fn!(handler_166, 166); +create_fn!(handler_167, 167); +create_fn!(handler_168, 168); +create_fn!(handler_169, 169); +create_fn!(handler_170, 170); +create_fn!(handler_171, 171); +create_fn!(handler_172, 172); +create_fn!(handler_173, 173); +create_fn!(handler_174, 174); +create_fn!(handler_175, 175); +create_fn!(handler_176, 176); +create_fn!(handler_177, 177); +create_fn!(handler_178, 178); +create_fn!(handler_179, 179); +create_fn!(handler_180, 180); +create_fn!(handler_181, 181); +create_fn!(handler_182, 182); +create_fn!(handler_183, 183); +create_fn!(handler_184, 184); +create_fn!(handler_185, 185); +create_fn!(handler_186, 186); +create_fn!(handler_187, 187); +create_fn!(handler_188, 188); +create_fn!(handler_189, 189); +create_fn!(handler_190, 190); +create_fn!(handler_191, 191); +create_fn!(handler_192, 192); +create_fn!(handler_193, 193); +create_fn!(handler_194, 194); +create_fn!(handler_195, 195); +create_fn!(handler_196, 196); +create_fn!(handler_197, 197); +create_fn!(handler_198, 198); +create_fn!(handler_199, 199); +create_fn!(handler_200, 200); +create_fn!(handler_201, 201); +create_fn!(handler_202, 202); +create_fn!(handler_203, 203); +create_fn!(handler_204, 204); +create_fn!(handler_205, 205); +create_fn!(handler_206, 206); +create_fn!(handler_207, 207); +create_fn!(handler_208, 208); +create_fn!(handler_209, 209); +create_fn!(handler_210, 210); +create_fn!(handler_211, 211); +create_fn!(handler_212, 212); +create_fn!(handler_213, 213); +create_fn!(handler_214, 214); +create_fn!(handler_215, 215); +create_fn!(handler_216, 216); +create_fn!(handler_217, 217); +create_fn!(handler_218, 218); +create_fn!(handler_219, 219); +create_fn!(handler_220, 220); +create_fn!(handler_221, 221); +create_fn!(handler_222, 222); +create_fn!(handler_223, 223); +create_fn!(handler_224, 224); +create_fn!(handler_225, 225); +create_fn!(handler_226, 226); +create_fn!(handler_227, 227); +create_fn!(handler_228, 228); +create_fn!(handler_229, 229); +create_fn!(handler_230, 230); +create_fn!(handler_231, 231); +create_fn!(handler_232, 232); +create_fn!(handler_233, 233); +create_fn!(handler_234, 234); +create_fn!(handler_235, 235); +create_fn!(handler_236, 236); +create_fn!(handler_237, 237); +create_fn!(handler_238, 238); +create_fn!(handler_239, 239); +create_fn!(handler_240, 240); +create_fn!(handler_241, 241); +create_fn!(handler_242, 242); +create_fn!(handler_243, 243); +create_fn!(handler_244, 244); +create_fn!(handler_245, 245); +create_fn!(handler_246, 246); +create_fn!(handler_247, 247); +create_fn!(handler_248, 248); +create_fn!(handler_249, 249); +create_fn!(handler_250, 250); +create_fn!(handler_251, 251); +create_fn!(handler_252, 252); +create_fn!(handler_253, 253); +create_fn!(handler_254, 254); +create_fn!(handler_255, 255); diff --git a/opentmk/opentmk/src/arch/x86_64/io.rs b/opentmk/opentmk/src/arch/x86_64/io.rs new file mode 100644 index 0000000000..56d60b616f --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/io.rs @@ -0,0 +1,54 @@ +use core::arch::asm; + +/// Write a byte to a port. +pub fn outb(port: u16, data: u8) { + // SAFETY: The caller has assured us this is safe. + unsafe { + asm! { + "out dx, al", + in("dx") port, + in("al") data, + } + } +} + +/// Read a byte from a port. +pub fn inb(port: u16) -> u8 { + let mut data; + // SAFETY: The caller has assured us this is safe. + unsafe { + asm! { + "in al, dx", + in("dx") port, + out("al") data, + } + } + data +} + +/// Read a double word from a port. +pub fn inl(port: u16) -> u32 { + let mut data; + // SAFETY: The caller has assured us this is safe. + unsafe { + asm! { + "in eax, dx", + in("dx") port, + out("eax") data, + } + } + data +} + +/// Write a double word to a port. +/// This is a no-op on x86. +pub fn outl(port: u16, data: u32) { + // SAFETY: The caller has assured us this is safe. + unsafe { + asm! { + "out dx, eax", + in("dx") port, + in("eax") data, + } + } +} diff --git a/opentmk/opentmk/src/arch/x86_64/mod.rs b/opentmk/opentmk/src/arch/x86_64/mod.rs new file mode 100644 index 0000000000..2da8e40fb0 --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/mod.rs @@ -0,0 +1,7 @@ +pub mod hypercall; +pub mod interrupt; +mod interrupt_handler_register; +mod io; +pub mod rtc; +pub mod serial; +pub mod tpm; diff --git a/opentmk/opentmk/src/arch/x86_64/rtc.rs b/opentmk/opentmk/src/arch/x86_64/rtc.rs new file mode 100644 index 0000000000..afdcaaa9ee --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/rtc.rs @@ -0,0 +1,132 @@ +use super::io::inb; +use super::io::outb; +// CMOS/RTC I/O ports +const CMOS_ADDRESS: u16 = 0x70; +const CMOS_DATA: u16 = 0x71; + +// RTC register addresses +const RTC_SECONDS: u8 = 0x00; +const RTC_MINUTES: u8 = 0x02; +const RTC_HOURS: u8 = 0x04; +const RTC_DAY: u8 = 0x07; +const RTC_MONTH: u8 = 0x08; +const RTC_YEAR: u8 = 0x09; +const RTC_STATUS_A: u8 = 0x0A; +const RTC_STATUS_B: u8 = 0x0B; + +#[repr(C)] +#[derive(Debug, Clone, Copy)] +pub struct DateTime { + seconds: u8, + minutes: u8, + hours: u8, + day: u8, + month: u8, + year: u8, +} + +// implement display as ISO 8601 format +impl core::fmt::Display for DateTime { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "{:02}:{:02}:{:02} {:02}-{:02}-{:04} UTC", + self.hours, + self.minutes, + self.seconds, + self.day, + self.month, + 2000 + self.year as u64 + ) + } +} + +// convert datetime to Unix epoch +impl DateTime { + pub fn to_unix_epoch_sec(&self) -> u64 { + let mut days = self.day as u64; + days += (self.month as u64 - 1) * 30; // Approximation, not accurate for all months + days += (self.year as u64 + 2000 - 1970) * 365; // Approximation, not accounting for leap years + let hours = self.hours as u64; + let minutes = self.minutes as u64; + let seconds = self.seconds as u64; + + (days * 24 + hours) * 3600 + (minutes * 60) + seconds + } +} + +// Read from CMOS/RTC register +fn read_cmos(reg: u8) -> u8 { + outb(CMOS_ADDRESS, reg); + inb(CMOS_DATA) +} + +// Check if RTC update is in progress +fn rtc_update_in_progress() -> bool { + read_cmos(RTC_STATUS_A) & 0x80 != 0 +} + +// Convert BCD to binary if needed +fn bcd_to_binary(bcd: u8) -> u8 { + (bcd & 0x0F) + ((bcd >> 4) * 10) +} + +// Read current date and time from RTC +pub fn read_rtc() -> DateTime { + // Wait for any update to complete + while rtc_update_in_progress() {} + + let mut datetime = DateTime { + seconds: read_cmos(RTC_SECONDS), + minutes: read_cmos(RTC_MINUTES), + hours: read_cmos(RTC_HOURS), + day: read_cmos(RTC_DAY), + month: read_cmos(RTC_MONTH), + year: read_cmos(RTC_YEAR), + }; + + // Check if we need to wait for another update cycle + while rtc_update_in_progress() {} + + // Read again to ensure consistency + let seconds_check = read_cmos(RTC_SECONDS); + if seconds_check != datetime.seconds { + datetime.seconds = seconds_check; + datetime.minutes = read_cmos(RTC_MINUTES); + datetime.hours = read_cmos(RTC_HOURS); + datetime.day = read_cmos(RTC_DAY); + datetime.month = read_cmos(RTC_MONTH); + datetime.year = read_cmos(RTC_YEAR); + } + + // Check RTC format (BCD vs binary) + let status_b = read_cmos(RTC_STATUS_B); + let is_bcd = (status_b & 0x04) == 0; + + if is_bcd { + datetime.seconds = bcd_to_binary(datetime.seconds); + datetime.minutes = bcd_to_binary(datetime.minutes); + datetime.hours = bcd_to_binary(datetime.hours); + datetime.day = bcd_to_binary(datetime.day); + datetime.month = bcd_to_binary(datetime.month); + datetime.year = bcd_to_binary(datetime.year); + } + + // Handle 12-hour format if needed + if (status_b & 0x02) == 0 && (datetime.hours & 0x80) != 0 { + datetime.hours = ((datetime.hours & 0x7F) + 12) % 24; + } + + datetime +} + +pub fn delay_sec(seconds: u64) { + let start = read_rtc().to_unix_epoch_sec(); + let end = start + seconds; + loop { + let current = read_rtc().to_unix_epoch_sec(); + if current >= end { + break; + } + } +} diff --git a/opentmk/opentmk/src/arch/x86_64/serial.rs b/opentmk/opentmk/src/arch/x86_64/serial.rs new file mode 100644 index 0000000000..2124c7b673 --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/serial.rs @@ -0,0 +1,110 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Serial output for debugging. + +use core::fmt; + +use sync_nostd::Mutex; + +use super::io; + +/// Serial port addresses. +/// These are the standard COM ports used in x86 systems. +#[repr(u16)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum SerialPort { + COM1, + COM2, + COM3, + COM4, +} + +impl SerialPort { + /// Convert the SerialPort enum to its u16 representation. + pub fn value(self) -> u16 { + match self { + SerialPort::COM1 => 0x3F8, + SerialPort::COM2 => 0x2F8, + SerialPort::COM3 => 0x3E8, + SerialPort::COM4 => 0x2E8, + } + } +} + +/// A trait to access io ports used by the serial device. +pub trait IoAccess { + /// Issue an in byte instruction. + /// + /// # Safety + /// + /// The caller must be sure that the given port is safe to read from. + unsafe fn inb(&self, port: u16) -> u8; + /// Issue an out byte instruction. + /// + /// # Safety + /// + /// The caller must be sure that the given port is safe to write to, and that the + /// given value is safe for it. + unsafe fn outb(&self, port: u16, data: u8); +} + +/// A struct to access io ports using in/out instructions. +pub struct InstrIoAccess; + +impl IoAccess for InstrIoAccess { + unsafe fn inb(&self, port: u16) -> u8 { + io::inb(port) + } + + unsafe fn outb(&self, port: u16, data: u8) { + io::outb(port, data) + } +} + +/// A writer for the UART COM Ports. +pub struct Serial { + io: T, + serial_port: SerialPort, + mutex: Mutex<()>, +} + +impl Serial { + /// Initialize the serial port. + pub const fn new(serial_port: SerialPort, io: T) -> Self { + Self { + io, + serial_port, + mutex: Mutex::new(()), + } + } + + pub fn init(&self) { + unsafe { + self.io.outb(self.serial_port.value() + 1, 0x00); // Disable all interrupts + self.io.outb(self.serial_port.value() + 2, 0xC7); // Enable FIFO, clear them, with 14-byte threshold + self.io.outb(self.serial_port.value() + 4, 0x0F); + } + } + + fn write_byte(&self, b: u8) { + // SAFETY: Reading and writing text to the serial device is safe. + unsafe { + while self.io.inb(self.serial_port.value() + 5) & 0x20 == 0 {} + self.io.outb(self.serial_port.value(), b); + } + } +} + +impl fmt::Write for Serial { + fn write_str(&mut self, s: &str) -> fmt::Result { + let _guard = self.mutex.lock(); + for &b in s.as_bytes() { + if b == b'\n' { + self.write_byte(b'\r'); + } + self.write_byte(b); + } + Ok(()) + } +} diff --git a/opentmk/opentmk/src/arch/x86_64/tpm.rs b/opentmk/opentmk/src/arch/x86_64/tpm.rs new file mode 100644 index 0000000000..f837d62653 --- /dev/null +++ b/opentmk/opentmk/src/arch/x86_64/tpm.rs @@ -0,0 +1,120 @@ +use zerocopy::IntoBytes; + +use crate::devices::tpm::protocol::protocol::SelfTestCmd; +use crate::devices::tpm::protocol::protocol::TpmCommand; +use crate::devices::tpm::protocol::SessionTagEnum; +use crate::devices::tpm::protocol::TpmCommandError; + +pub const TPM_DEVICE_MMIO_REGION_BASE_ADDRESS: u64 = 0xfed40000; +pub const TPM_DEVICE_MMIO_REGION_SIZE: u64 = 0x70; + +pub const TPM_DEVICE_IO_PORT_RANGE_BEGIN: u16 = 0x1040; +pub const TPM_DEVICE_IO_PORT_RANGE_END: u16 = 0x1048; + +pub const TPM_DEVICE_IO_PORT_CONTROL_OFFSET: u16 = 0; +pub const TPM_DEVICE_IO_PORT_DATA_OFFSET: u16 = 4; + +pub const TPM_DEVICE_MMIO_PORT_REGION_BASE_ADDRESS: u64 = + TPM_DEVICE_MMIO_REGION_BASE_ADDRESS + 0x80; +pub const TPM_DEVICE_MMIO_PORT_CONTROL: u64 = + TPM_DEVICE_MMIO_PORT_REGION_BASE_ADDRESS + TPM_DEVICE_IO_PORT_CONTROL_OFFSET as u64; +pub const TPM_DEVICE_MMIO_PORT_DATA: u64 = + TPM_DEVICE_MMIO_PORT_REGION_BASE_ADDRESS + TPM_DEVICE_IO_PORT_DATA_OFFSET as u64; +pub const TPM_DEVICE_MMIO_PORT_REGION_SIZE: u64 = 0x8; + +pub struct Tpm<'a> { + command_buffer: Option<&'a mut [u8]>, + response_buffer: Option<&'a mut [u8]>, +} + +impl<'a> Tpm<'a> { + pub fn new() -> Tpm<'a> { + Tpm { + command_buffer: None, + response_buffer: None, + } + } + + pub fn set_command_buffer(&mut self, buffer: &'a mut [u8]) { + self.command_buffer = Some(buffer); + } + + pub fn set_response_buffer(&mut self, buffer: &'a mut [u8]) { + self.response_buffer = Some(buffer); + } + + #[cfg(target_arch = "x86_64")] + pub fn get_control_port(command: u32) -> u32 { + let control_port = TPM_DEVICE_IO_PORT_RANGE_BEGIN + TPM_DEVICE_IO_PORT_CONTROL_OFFSET; + let data_port = TPM_DEVICE_IO_PORT_RANGE_BEGIN + TPM_DEVICE_IO_PORT_DATA_OFFSET; + super::io::outl(control_port, command); + super::io::inl(data_port) + } + + pub fn get_tcg_protocol_version() -> u32 { + Tpm::get_control_port(64) + } + + pub fn map_shared_memory(gpa: u32) -> u32 { + let control_port = TPM_DEVICE_IO_PORT_RANGE_BEGIN + TPM_DEVICE_IO_PORT_CONTROL_OFFSET; + let data_port = TPM_DEVICE_IO_PORT_RANGE_BEGIN + TPM_DEVICE_IO_PORT_DATA_OFFSET; + super::io::outl(control_port, 0x1); + super::io::outl(data_port, gpa); + super::io::outl(control_port, 0x2); + super::io::inl(data_port) + } + + pub fn get_mapped_shared_memory() -> u32 { + let data_port = TPM_DEVICE_IO_PORT_RANGE_BEGIN + TPM_DEVICE_IO_PORT_DATA_OFFSET; + Tpm::get_control_port(0x2); + super::io::inl(data_port) + } + + pub fn copy_to_command_buffer(&mut self, buffer: &[u8]) { + self.command_buffer.as_mut().unwrap()[..buffer.len()].copy_from_slice(buffer); + } + + pub fn copy_from_response_buffer(&mut self, buffer: &mut [u8]) { + buffer.copy_from_slice(self.response_buffer.as_ref().unwrap()); + } + + pub fn execute_command() { + let command_exec_mmio_addr = TPM_DEVICE_MMIO_REGION_BASE_ADDRESS + 0x4c; + let command_exec_mmio_ptr = command_exec_mmio_addr as *mut u32; + + unsafe { + *command_exec_mmio_ptr = 0x1; + } + + while unsafe { *command_exec_mmio_ptr } == 0x1 { + unsafe { + core::arch::x86_64::_mm_pause(); + } + } + } + + pub fn run_command(&mut self, buffer: &[u8]) -> [u8; 4096] { + assert!(buffer.len() <= 4096); + self.copy_to_command_buffer(buffer); + + Tpm::execute_command(); + + let mut response = [0; 4096]; + self.copy_from_response_buffer(&mut response); + response + } + + pub fn self_test(&mut self) -> Result<(), TpmCommandError> { + let session_tag = SessionTagEnum::NoSessions; + let cmd = SelfTestCmd::new(session_tag.into(), true); + let response = self.run_command(cmd.as_bytes()); + + match SelfTestCmd::base_validate_reply(&response, session_tag) { + Err(error) => Err(TpmCommandError::InvalidResponse(error)), + Ok((res, false)) => Err(TpmCommandError::TpmCommandFailed { + response_code: res.header.response_code.get(), + })?, + Ok((_res, true)) => Ok(()), + } + } +} diff --git a/opentmk/opentmk/src/context.rs b/opentmk/opentmk/src/context.rs new file mode 100644 index 0000000000..8e8f122938 --- /dev/null +++ b/opentmk/opentmk/src/context.rs @@ -0,0 +1,135 @@ +#![allow(dead_code)] +use alloc::boxed::Box; +use core::ops::Range; + +use hvdef::Vtl; + +use crate::tmkdefs::TmkResult; + +pub trait SecureInterceptPlatformTrait { + /// Installs a secure-world intercept for the given interrupt. + /// + /// The platform must arrange that the supplied `interrupt_idx` + /// triggers a VM-exit or any other mechanism that transfers control + /// to the TMK secure handler. + /// + /// Returns `Ok(())` on success or an error wrapped in `TmkResult`. + fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()>; +} + +pub trait InterruptPlatformTrait { + /// Associates an interrupt vector with a handler inside the + /// non-secure world. + /// + /// * `interrupt_idx` – IDT/GIC index to program + /// * `handler` – Function that will be executed when the interrupt + /// fires. + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()>; + + /// Finalises platform specific interrupt setup (enables the table, + /// unmasks lines, etc.). + fn setup_interrupt_handler(&mut self) -> TmkResult<()>; +} + +pub trait MsrPlatformTrait { + /// Reads the content of `msr`. + /// + /// Returns the 64-bit value currently stored in that MSR. + fn read_msr(&mut self, msr: u32) -> TmkResult; + + /// Writes `value` into `msr`. + fn write_msr(&mut self, msr: u32, value: u64) -> TmkResult<()>; +} + +pub trait VirtualProcessorPlatformTrait +where + T: VtlPlatformTrait, +{ + /// Returns the index of the virtual CPU currently executing this + /// code. + fn get_current_vp(&self) -> TmkResult; + + /// Reads the architecture specific register identified by `reg`. + fn get_register(&mut self, reg: u32) -> TmkResult; + + /// Total number of online VPs in the partition. + fn get_vp_count(&self) -> TmkResult; + + /// Queues `cmd` to run later on the VP described inside the + /// `VpExecutor`. + fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; + + /// Synchronously executes `cmd` on its target VP. + fn start_on_vp(&mut self, cmd: VpExecutor) -> TmkResult<()>; + + /// Starts the target VP (if required) and executes `cmd` with a + /// platform provided default VTL context. + fn start_running_vp_with_default_context(&mut self, cmd: VpExecutor) -> TmkResult<()>; +} + +pub trait VtlPlatformTrait { + /// Applies VTL protection to the supplied physical address range. + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()>; + + /// Enables the given `vtl` on `vp_index` with a default context. + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()>; + + /// Returns the VTL level the caller is currently executing in. + fn get_current_vtl(&self) -> TmkResult; + + /// Sets the default VTL context on `vp_index`. + fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()>; + + /// Performs partition wide initialisation for a given `vtl`. + fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()>; + + /// Platform specific global VTL preparation (stage 2 translation, + /// EPT, etc.). + fn setup_vtl_protection(&mut self) -> TmkResult<()>; + + /// Switches the current hardware thread to the higher privileged VTL. + fn switch_to_high_vtl(&mut self); + + /// Switches the current hardware thread back to the lower privileged VTL. + fn switch_to_low_vtl(&mut self); + + fn set_vp_state_with_vtl(&mut self, register_index: u32, value: u64, vtl: Vtl) + -> TmkResult<()>; + + fn get_vp_state_with_vtl(&mut self, register_index: u32, vtl: Vtl) -> TmkResult; +} + +pub trait X64PlatformTrait {} +pub trait Aarch64PlatformTrait {} + +pub struct VpExecutor { + vp_index: u32, + vtl: Vtl, + cmd: Option>, +} + +impl VpExecutor { + /// Creates a new executor targeting `vp_index` running in `vtl`. + pub fn new(vp_index: u32, vtl: Vtl) -> Self { + VpExecutor { + vp_index, + vtl, + cmd: None, + } + } + + /// Stores a closure `cmd` that will be executed on the target VP. + /// + /// The closure receives a mutable reference to the platform-specific + /// type `T` that implements `VtlPlatformTrait`. + pub fn command(mut self, cmd: impl FnOnce(&mut T) + 'static + Send) -> Self { + self.cmd = Some(Box::new(cmd)); + self + } + + /// Extracts the tuple `(vp_index, vtl, cmd)` consuming `self`. + pub fn get(mut self) -> (u32, Vtl, Option>) { + let cmd = self.cmd.take(); + (self.vp_index, self.vtl, cmd) + } +} diff --git a/opentmk/opentmk/src/devices/mod.rs b/opentmk/opentmk/src/devices/mod.rs new file mode 100644 index 0000000000..1e7e6110dc --- /dev/null +++ b/opentmk/opentmk/src/devices/mod.rs @@ -0,0 +1 @@ +pub mod tpm; diff --git a/opentmk/opentmk/src/devices/tpm/mod.rs b/opentmk/opentmk/src/devices/tpm/mod.rs new file mode 100644 index 0000000000..19c290fe42 --- /dev/null +++ b/opentmk/opentmk/src/devices/tpm/mod.rs @@ -0,0 +1,14 @@ +use zerocopy::IntoBytes; + +pub mod protocol; + +pub struct TpmUtil; +impl TpmUtil { + pub fn get_self_test_cmd() -> [u8; 4096] { + let session_tag = protocol::SessionTagEnum::NoSessions; + let cmd = protocol::protocol::SelfTestCmd::new(session_tag.into(), true); + let mut buffer = [0; 4096]; + buffer[..cmd.as_bytes().len()].copy_from_slice(cmd.as_bytes()); + buffer + } +} diff --git a/opentmk/opentmk/src/devices/tpm/protocol.rs b/opentmk/opentmk/src/devices/tpm/protocol.rs new file mode 100644 index 0000000000..90e72adb54 --- /dev/null +++ b/opentmk/opentmk/src/devices/tpm/protocol.rs @@ -0,0 +1,4061 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! TPM 2.0 Protocol types, as defined in the spec + +//! NOTE: once the `tpm-rs` project matures, this hand-rolled code should be *deleted* and +//! replaced with types from that `tpm-rs` project. + +use alloc::vec::Vec; + +use bitfield_struct::bitfield; +use thiserror::Error; +use zerocopy::FromBytes; +use zerocopy::FromZeros; +use zerocopy::Immutable; +use zerocopy::IntoBytes; +use zerocopy::KnownLayout; + +use self::packed_nums::*; + +#[allow(non_camel_case_types)] +mod packed_nums { + pub type u16_be = zerocopy::U16; + pub type u32_be = zerocopy::U32; + pub type u64_be = zerocopy::U64; +} + +#[derive(Debug, Error)] +pub enum InvalidInput { + #[error("input data size too large for buffer - input size > upper bound: {0} > {1}")] + BufferSizeTooLarge(usize, usize), + #[error("input list length too long - input length > upper bound: {0} > {1}")] + PcrSelectionsLengthTooLong(usize, usize), + #[error("input payload size too large - input size > upper bound: {0} > {1}")] + NvPublicPayloadTooLarge(usize, usize), +} + +#[derive(Debug, Error)] +pub enum TpmProtoError { + #[error("input user_auth to TpmsSensitiveCreate is invalid")] + TpmsSensitiveCreateUserAuth(#[source] InvalidInput), + #[error("input data to TpmsSensitiveCreate is invalid")] + TpmsSensitiveCreateData(#[source] InvalidInput), + #[error("input auth_policy to TpmtPublic is invalid")] + TpmtPublicAuthPolicy(#[source] InvalidInput), + #[error("input unique to TpmtPublic is invalid")] + TpmtPublicUnique(#[source] InvalidInput), + #[error("input auth_policy to TpmsNvPublic is invalid")] + TpmsNvPublicAuthPolicy(#[source] InvalidInput), + #[error("input outside_info to CreatePrimary is invalid")] + CreatePrimaryOutsideInfo(#[source] InvalidInput), + #[error("input creation_pcr to CreatePrimary is invalid")] + CreatePrimaryCreationPcr(#[source] InvalidInput), + #[error("input auth to NvDefineSpace is invalid")] + NvDefineSpaceAuth(#[source] InvalidInput), + #[error("input public_info to NvDefineSpace is invalid")] + NvDefineSpacePublicInfo(#[source] InvalidInput), + #[error("input data to NvWrite is invalid")] + NvWriteData(#[source] InvalidInput), + #[error("input pcr_allocation to PcrAllocate is invalid")] + PcrAllocatePcrAllocation(#[source] InvalidInput), + #[error("input data to Import is invalid")] + ImportData(#[source] InvalidInput), +} + +#[derive(Debug, Error)] +pub enum ResponseValidationError { + #[error("response size is too small to fit into the buffer")] + ResponseSizeTooSmall, + #[error( + "size {size} specified in the response header does not meet the minimal size of command type {expected_size}, command succeeded: {command_succeeded}" + )] + HeaderResponseSizeMismatch { + size: u32, + expected_size: usize, + command_succeeded: bool, + }, + #[error( + "unexpected session tag {response_session_tag} specified in the response header, expected: {expected_session_tag}, command succeeded: {command_succeeded}" + )] + HeaderSessionTagMismatch { + response_session_tag: u16, + expected_session_tag: u16, + command_succeeded: bool, + }, +} + +#[repr(transparent)] +#[derive(Copy, Clone, Debug, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq)] +pub struct ReservedHandle(pub u32_be); + +impl PartialEq for u32 { + fn eq(&self, other: &ReservedHandle) -> bool { + other.0.get() == *self + } +} + +impl ReservedHandle { + pub const fn new(kind: u8, offset: u32) -> ReservedHandle { + ReservedHandle(new_u32_be((kind as u32) << 24 | offset)) + } +} + +pub const TPM20_HT_NV_INDEX: u8 = 0x01; +pub const TPM20_HT_PERMANENT: u8 = 0x40; +pub const TPM20_HT_PERSISTENT: u8 = 0x81; + +pub const TPM20_RH_OWNER: ReservedHandle = ReservedHandle::new(TPM20_HT_PERMANENT, 0x01); +pub const TPM20_RH_PLATFORM: ReservedHandle = ReservedHandle::new(TPM20_HT_PERMANENT, 0x0c); +pub const TPM20_RH_ENDORSEMENT: ReservedHandle = ReservedHandle::new(TPM20_HT_PERMANENT, 0x0b); +// `TPM_RS_PW` (not `TPM_RH_PW`) +// See Table 28, Section 7.4, "Trusted Platform Module Library Part 2: Structures", revision 1.38. +pub const TPM20_RS_PW: ReservedHandle = ReservedHandle::new(TPM20_HT_PERMANENT, 0x09); + +// Based on Section 2.2, "Registry of Reserved TPM 2.0 Handles and Localities", version 1.1. +pub const NV_INDEX_RANGE_BASE_PLATFORM_MANUFACTURER: u32 = + (TPM20_HT_NV_INDEX as u32) << 24 | 0x400000; +pub const NV_INDEX_RANGE_BASE_TCG_ASSIGNED: u32 = (TPM20_HT_NV_INDEX as u32) << 24 | 0xc00000; + +// The suggested minimal size for the buffer in `TPM2B_MAX_BUFFER`. +// See Table 79, Section 10.4.8, "Trusted Platform Module Library Part 2: Structures", revision 1.38. +pub const MAX_DIGEST_BUFFER_SIZE: usize = 1024; + +#[repr(transparent)] +#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)] +pub struct SessionTag(pub u16_be); + +impl PartialEq for u16 { + fn eq(&self, other: &SessionTag) -> bool { + other.0.get() == *self + } +} + +impl SessionTag { + const fn new(val: u16) -> SessionTag { + SessionTag(new_u16_be(val)) + } +} + +#[derive(Error, Debug)] +pub enum TpmCommandError { + #[error("failed to execute the TPM command")] + TpmExecuteCommand, + #[error("invalid response from the TPM command")] + InvalidResponse(#[source] ResponseValidationError), + #[error("invalid input parameter for the TPM command")] + InvalidInputParameter(#[source] TpmProtoError), + #[error("TPM command failed, response code: {response_code:#x}")] + TpmCommandFailed { response_code: u32 }, + #[error("failed to create the TPM command struct")] + TpmCommandCreationFailed(#[source] TpmProtoError), +} + +#[derive(Debug, Copy, Clone)] +#[repr(u16)] +pub enum SessionTagEnum { + // No structure type specified + Null = 0x8000, + + // A command/response for a command defined in this specification. The + // command/response has no attached sessions. If a command has an + // error and the command tag value is either TPM_ST_NO_SESSIONS or + // TPM_ST_SESSIONS, then this tag value is used for the response code. + NoSessions = 0x8001, + + // A command/response for a command defined in this specification. The + // command/response has one or more attached sessions and the sessionOffset + // field is present. + Sessions = 0x8002, + AttestClock = 0x8014, + AttestCommandAudit = 0x8015, + AttestSessionAudit = 0x8016, + AttestCertify = 0x8017, + AttestQuote = 0x8018, + AttestTick = 0x8019, + AttestTickstamp = 0x801A, + AttestTransport = 0x801B, + AttestCreation = 0x801C, + AttestNv = 0x801D, + // Tickets + Creation = 0x8021, + Verified = 0x8022, + Auth = 0x8023, + Hashcheck = 0x8024, + + // Structure describing a Field Upgrade Policy + FuManifest = 0x8029, +} + +impl From for SessionTag { + fn from(x: SessionTagEnum) -> Self { + SessionTag::new(x as u16) + } +} + +impl SessionTagEnum { + pub fn from_u16(val: u16) -> Option { + let ret = match val { + 0x8000 => Self::Null, + 0x8001 => Self::NoSessions, + 0x8002 => Self::Sessions, + 0x8014 => Self::AttestClock, + 0x8015 => Self::AttestCommandAudit, + 0x8016 => Self::AttestSessionAudit, + 0x8017 => Self::AttestCertify, + 0x8018 => Self::AttestQuote, + 0x8019 => Self::AttestTick, + 0x801A => Self::AttestTickstamp, + 0x801B => Self::AttestTransport, + 0x801C => Self::AttestCreation, + 0x801D => Self::AttestNv, + 0x8021 => Self::Creation, + 0x8022 => Self::Verified, + 0x8023 => Self::Auth, + 0x8024 => Self::Hashcheck, + 0x8029 => Self::FuManifest, + _ => return None, + }; + Some(ret) + } +} + +#[repr(transparent)] +#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq)] +pub struct CommandCode(pub u32_be); + +impl PartialEq for u32 { + fn eq(&self, other: &CommandCode) -> bool { + other.0.get() == *self + } +} + +impl CommandCode { + const fn new(val: u32) -> CommandCode { + CommandCode(new_u32_be(val)) + } + + pub fn into_enum(self) -> Option { + CommandCodeEnum::from_u32(self.0.get()) + } +} + +#[allow(non_camel_case_types, clippy::upper_case_acronyms)] +#[derive(Debug, Clone, Copy, PartialEq)] +#[repr(u32)] +pub enum CommandCodeEnum { + NV_UndefineSpaceSpecial = 0x0000011f, + EvictControl = 0x00000120, + HierarchyControl = 0x00000121, + NV_UndefineSpace = 0x00000122, + ChangeEPS = 0x00000124, + ChangePPS = 0x00000125, + Clear = 0x00000126, + ClearControl = 0x00000127, + ClockSet = 0x00000128, + HierarchyChangeAuth = 0x00000129, + NV_DefineSpace = 0x0000012a, + PCR_Allocate = 0x0000012b, + PCR_SetAuthPolicy = 0x0000012c, + PP_Commands = 0x0000012d, + SetPrimaryPolicy = 0x0000012e, + FieldUpgradeStart = 0x0000012f, + ClockRateAdjust = 0x00000130, + CreatePrimary = 0x00000131, + NV_GlobalWriteLock = 0x00000132, + GetCommandAuditDigest = 0x00000133, + NV_Increment = 0x00000134, + NV_SetBits = 0x00000135, + NV_Extend = 0x00000136, + NV_Write = 0x00000137, + NV_WriteLock = 0x00000138, + DictionaryAttackLockReset = 0x00000139, + DictionaryAttackParameters = 0x0000013a, + NV_ChangeAuth = 0x0000013b, + PCR_Event = 0x0000013c, + PCR_Reset = 0x0000013d, + SequenceComplete = 0x0000013e, + SetAlgorithmSet = 0x0000013f, + SetCommandCodeAuditStatus = 0x00000140, + FieldUpgradeData = 0x00000141, + IncrementalSelfTest = 0x00000142, + SelfTest = 0x00000143, + Startup = 0x00000144, + Shutdown = 0x00000145, + StirRandom = 0x00000146, + ActivateCredential = 0x00000147, + Certify = 0x00000148, + PolicyNV = 0x00000149, + CertifyCreation = 0x0000014a, + Duplicate = 0x0000014b, + GetTime = 0x0000014c, + GetSessionAuditDigest = 0x0000014d, + NV_Read = 0x0000014e, + NV_ReadLock = 0x0000014f, + ObjectChangeAuth = 0x00000150, + PolicySecret = 0x00000151, + Rewrap = 0x00000152, + Create = 0x00000153, + ECDH_ZGen = 0x00000154, + HMAC = 0x00000155, + Import = 0x00000156, + Load = 0x00000157, + Quote = 0x00000158, + RSA_Decrypt = 0x00000159, + HMAC_Start = 0x0000015b, + SequenceUpdate = 0x0000015c, + Sign = 0x0000015d, + Unseal = 0x0000015e, + PolicySigned = 0x00000160, + ContextLoad = 0x00000161, + ContextSave = 0x00000162, + ECDH_KeyGen = 0x00000163, + EncryptDecrypt = 0x00000164, + FlushContext = 0x00000165, + LoadExternal = 0x00000167, + MakeCredential = 0x00000168, + NV_ReadPublic = 0x00000169, + PolicyAuthorize = 0x0000016a, + PolicyAuthValue = 0x0000016b, + PolicyCommandCode = 0x0000016c, + PolicyCounterTimer = 0x0000016d, + PolicyCpHash = 0x0000016e, + PolicyLocality = 0x0000016f, + PolicyNameHash = 0x00000170, + PolicyOR = 0x00000171, + PolicyTicket = 0x00000172, + ReadPublic = 0x00000173, + RSA_Encrypt = 0x00000174, + StartAuthSession = 0x00000176, + VerifySignature = 0x00000177, + ECC_Parameters = 0x00000178, + FirmwareRead = 0x00000179, + GetCapability = 0x0000017a, + GetRandom = 0x0000017b, + GetTestResult = 0x0000017c, + Hash = 0x0000017d, + PCR_Read = 0x0000017e, + PolicyPCR = 0x0000017f, + PolicyRestart = 0x00000180, + ReadClock = 0x00000181, + PCR_Extend = 0x00000182, + PCR_SetAuthValue = 0x00000183, + NV_Certify = 0x00000184, + EventSequenceComplete = 0x00000185, + HashSequenceStart = 0x00000186, + PolicyPhysicalPresence = 0x00000187, + PolicyDuplicationSelect = 0x00000188, + PolicyGetDigest = 0x00000189, + TestParms = 0x0000018a, + Commit = 0x0000018b, + PolicyPassword = 0x0000018c, + ZGen_2Phase = 0x0000018d, + EC_Ephemeral = 0x0000018e, + PolicyNvWritten = 0x0000018f, + PolicyTemplate = 0x00000190, + CreateLoaded = 0x00000191, + PolicyAuthorizeNV = 0x00000192, + EncryptDecrypt2 = 0x00000193, + AC_GetCapability = 0x00000194, + AC_Send = 0x00000195, + Policy_AC_SendSelect = 0x00000196, + CertifyX509 = 0x00000197, + ACT_SetTimeout = 0x00000198, +} + +impl From for CommandCode { + fn from(x: CommandCodeEnum) -> Self { + CommandCode::new(x as u32) + } +} + +impl CommandCodeEnum { + pub fn from_u32(val: u32) -> Option { + let ret = match val { + 0x0000011f => Self::NV_UndefineSpaceSpecial, + 0x00000120 => Self::EvictControl, + 0x00000121 => Self::HierarchyControl, + 0x00000122 => Self::NV_UndefineSpace, + 0x00000124 => Self::ChangeEPS, + 0x00000125 => Self::ChangePPS, + 0x00000126 => Self::Clear, + 0x00000127 => Self::ClearControl, + 0x00000128 => Self::ClockSet, + 0x00000129 => Self::HierarchyChangeAuth, + 0x0000012a => Self::NV_DefineSpace, + 0x0000012b => Self::PCR_Allocate, + 0x0000012c => Self::PCR_SetAuthPolicy, + 0x0000012d => Self::PP_Commands, + 0x0000012e => Self::SetPrimaryPolicy, + 0x0000012f => Self::FieldUpgradeStart, + 0x00000130 => Self::ClockRateAdjust, + 0x00000131 => Self::CreatePrimary, + 0x00000132 => Self::NV_GlobalWriteLock, + 0x00000133 => Self::GetCommandAuditDigest, + 0x00000134 => Self::NV_Increment, + 0x00000135 => Self::NV_SetBits, + 0x00000136 => Self::NV_Extend, + 0x00000137 => Self::NV_Write, + 0x00000138 => Self::NV_WriteLock, + 0x00000139 => Self::DictionaryAttackLockReset, + 0x0000013a => Self::DictionaryAttackParameters, + 0x0000013b => Self::NV_ChangeAuth, + 0x0000013c => Self::PCR_Event, + 0x0000013d => Self::PCR_Reset, + 0x0000013e => Self::SequenceComplete, + 0x0000013f => Self::SetAlgorithmSet, + 0x00000140 => Self::SetCommandCodeAuditStatus, + 0x00000141 => Self::FieldUpgradeData, + 0x00000142 => Self::IncrementalSelfTest, + 0x00000143 => Self::SelfTest, + 0x00000144 => Self::Startup, + 0x00000145 => Self::Shutdown, + 0x00000146 => Self::StirRandom, + 0x00000147 => Self::ActivateCredential, + 0x00000148 => Self::Certify, + 0x00000149 => Self::PolicyNV, + 0x0000014a => Self::CertifyCreation, + 0x0000014b => Self::Duplicate, + 0x0000014c => Self::GetTime, + 0x0000014d => Self::GetSessionAuditDigest, + 0x0000014e => Self::NV_Read, + 0x0000014f => Self::NV_ReadLock, + 0x00000150 => Self::ObjectChangeAuth, + 0x00000151 => Self::PolicySecret, + 0x00000152 => Self::Rewrap, + 0x00000153 => Self::Create, + 0x00000154 => Self::ECDH_ZGen, + 0x00000155 => Self::HMAC, + 0x00000156 => Self::Import, + 0x00000157 => Self::Load, + 0x00000158 => Self::Quote, + 0x00000159 => Self::RSA_Decrypt, + 0x0000015b => Self::HMAC_Start, + 0x0000015c => Self::SequenceUpdate, + 0x0000015d => Self::Sign, + 0x0000015e => Self::Unseal, + 0x00000160 => Self::PolicySigned, + 0x00000161 => Self::ContextLoad, + 0x00000162 => Self::ContextSave, + 0x00000163 => Self::ECDH_KeyGen, + 0x00000164 => Self::EncryptDecrypt, + 0x00000165 => Self::FlushContext, + 0x00000167 => Self::LoadExternal, + 0x00000168 => Self::MakeCredential, + 0x00000169 => Self::NV_ReadPublic, + 0x0000016a => Self::PolicyAuthorize, + 0x0000016b => Self::PolicyAuthValue, + 0x0000016c => Self::PolicyCommandCode, + 0x0000016d => Self::PolicyCounterTimer, + 0x0000016e => Self::PolicyCpHash, + 0x0000016f => Self::PolicyLocality, + 0x00000170 => Self::PolicyNameHash, + 0x00000171 => Self::PolicyOR, + 0x00000172 => Self::PolicyTicket, + 0x00000173 => Self::ReadPublic, + 0x00000174 => Self::RSA_Encrypt, + 0x00000176 => Self::StartAuthSession, + 0x00000177 => Self::VerifySignature, + 0x00000178 => Self::ECC_Parameters, + 0x00000179 => Self::FirmwareRead, + 0x0000017a => Self::GetCapability, + 0x0000017b => Self::GetRandom, + 0x0000017c => Self::GetTestResult, + 0x0000017d => Self::Hash, + 0x0000017e => Self::PCR_Read, + 0x0000017f => Self::PolicyPCR, + 0x00000180 => Self::PolicyRestart, + 0x00000181 => Self::ReadClock, + 0x00000182 => Self::PCR_Extend, + 0x00000183 => Self::PCR_SetAuthValue, + 0x00000184 => Self::NV_Certify, + 0x00000185 => Self::EventSequenceComplete, + 0x00000186 => Self::HashSequenceStart, + 0x00000187 => Self::PolicyPhysicalPresence, + 0x00000188 => Self::PolicyDuplicationSelect, + 0x00000189 => Self::PolicyGetDigest, + 0x0000018a => Self::TestParms, + 0x0000018b => Self::Commit, + 0x0000018c => Self::PolicyPassword, + 0x0000018d => Self::ZGen_2Phase, + 0x0000018e => Self::EC_Ephemeral, + 0x0000018f => Self::PolicyNvWritten, + 0x00000190 => Self::PolicyTemplate, + 0x00000191 => Self::CreateLoaded, + 0x00000192 => Self::PolicyAuthorizeNV, + 0x00000193 => Self::EncryptDecrypt2, + 0x00000194 => Self::AC_GetCapability, + 0x00000195 => Self::AC_Send, + 0x00000196 => Self::Policy_AC_SendSelect, + 0x00000197 => Self::CertifyX509, + 0x00000198 => Self::ACT_SetTimeout, + _ => return None, + }; + + Some(ret) + } +} + +const FLAG_FMT1: u32 = 0x0080; +const FLAG_VER1: u32 = 0x0100; +const FLAG_WARN: u32 = 0x0800 + FLAG_VER1; + +#[repr(u32)] +pub enum ResponseCode { + Success = 0x000, + /// The given handle value is not valid or cannot be used for this + /// command. + Value = FLAG_FMT1 + 0x004, + /// Hierarchy is not enabled or is not correct for the use. + Hierarchy = FLAG_FMT1 + 0x0005, + /// The handle is not correct for the use. + Handle = FLAG_FMT1 + 0x000B, + /// The authorization HMAC check failed. + AuthFail = FLAG_FMT1 + 0x000E, + /// Structure is the wrong size. + Size = FLAG_FMT1 + 0x0015, + /// The TPM was unable to unmarshal a value because there were not + /// enough bytes in the input buffer. + Insufficient = FLAG_FMT1 + 0x001A, + /// Integrity check fail. + Integrity = FLAG_FMT1 + 0x001F, + /// TPM is in failure mode. + Failure = FLAG_VER1 + 0x0001, + /// Use of an authorization session with a context command. + AuthContext = FLAG_VER1 + 0x0045, + /// The NV index is used before being initialized or the state saved by + /// TPM20_CC_Shutdown could not be restored. + NvUninitialized = FLAG_VER1 + 0x04A, + /// ... + Sensitive = FLAG_VER1 + 0x055, + /// Gap for session context ID is too large. + ContextGap = FLAG_WARN + 0x001, + /// Out of memory for object contexts. + ObjectMemory = FLAG_WARN + 0x002, + /// Out of memory for session contexts. + SessionMemory = FLAG_WARN + 0x003, + /// Out of shared object/session memory or need space for internal + /// operations. + Memory = FLAG_WARN + 0x004, + /// Out of session handles - a session must be flushed before a new + /// session may be created. + SessionHandles = FLAG_WARN + 0x005, + /// Out of object handles - the handle space for objects is depleted and + /// a reboot is required . + /// NOTE:This cannot occur on the reference implementation. + ObjectHandles = FLAG_WARN + 0x006, + /// The TPM has suspended operation on the command. Forward progress was + /// made and the command may be retried. + Yielded = FLAG_WARN + 0x008, + /// The command was cancelled. The command may be retried. + Cancelled = FLAG_WARN + 0x009, + /// TPM is performing self tests. + Testing = FLAG_WARN + 0x00A, + /// The TPM is rate-limiting accesses to prevent wearout of NV. + NvRate = FLAG_WARN + 0x020, + /// Commands are not being accepted because the TPM is in DA lockout + /// mode. + Lockout = FLAG_WARN + 0x021, + /// The TPM was not able to start the command. Retry might work. + Retry = FLAG_WARN + 0x022, + /// The command may require writing of NV and NV is not current + /// accessible. + NvUnavailable = FLAG_WARN + 0x023, + /// This value is reserved and shall not be returned by the TPM. + NotUsed = FLAG_WARN + 0x07F, + /// Add to a parameter-, handle-, or session-related error. + Rc1 = 0x100, +} + +impl ResponseCode { + pub fn from_u32(val: u32) -> Option { + let ret = match val { + x if x == ResponseCode::Success as u32 => ResponseCode::Success, + x if x == ResponseCode::Value as u32 => ResponseCode::Value, + x if x == ResponseCode::Hierarchy as u32 => ResponseCode::Hierarchy, + x if x == ResponseCode::Handle as u32 => ResponseCode::Handle, + x if x == ResponseCode::AuthFail as u32 => ResponseCode::AuthFail, + x if x == ResponseCode::Size as u32 => ResponseCode::Size, + x if x == ResponseCode::Insufficient as u32 => ResponseCode::Insufficient, + x if x == ResponseCode::Integrity as u32 => ResponseCode::Integrity, + x if x == ResponseCode::Failure as u32 => ResponseCode::Failure, + x if x == ResponseCode::AuthContext as u32 => ResponseCode::AuthContext, + x if x == ResponseCode::NvUninitialized as u32 => ResponseCode::NvUninitialized, + x if x == ResponseCode::Sensitive as u32 => ResponseCode::Sensitive, + x if x == ResponseCode::ContextGap as u32 => ResponseCode::ContextGap, + x if x == ResponseCode::ObjectMemory as u32 => ResponseCode::ObjectMemory, + x if x == ResponseCode::SessionMemory as u32 => ResponseCode::SessionMemory, + x if x == ResponseCode::Memory as u32 => ResponseCode::Memory, + x if x == ResponseCode::SessionHandles as u32 => ResponseCode::SessionHandles, + x if x == ResponseCode::ObjectHandles as u32 => ResponseCode::ObjectHandles, + x if x == ResponseCode::Yielded as u32 => ResponseCode::Yielded, + x if x == ResponseCode::Cancelled as u32 => ResponseCode::Cancelled, + x if x == ResponseCode::Testing as u32 => ResponseCode::Testing, + x if x == ResponseCode::NvRate as u32 => ResponseCode::NvRate, + x if x == ResponseCode::Lockout as u32 => ResponseCode::Lockout, + x if x == ResponseCode::Retry as u32 => ResponseCode::Retry, + x if x == ResponseCode::NvUnavailable as u32 => ResponseCode::NvUnavailable, + x if x == ResponseCode::NotUsed as u32 => ResponseCode::NotUsed, + _ => return None, + }; + Some(ret) + } +} + +#[repr(transparent)] +#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq)] +pub struct AlgId(pub u16_be); + +impl PartialEq for u16 { + fn eq(&self, other: &AlgId) -> bool { + other.0.get() == *self + } +} + +impl AlgId { + const fn new(val: u16) -> AlgId { + AlgId(new_u16_be(val)) + } +} + +#[allow(non_camel_case_types, clippy::upper_case_acronyms)] +#[derive(Debug)] +#[repr(u16)] +pub enum AlgIdEnum { + RSA = 0x0001, + SHA = 0x0004, + AES = 0x0006, + SHA256 = 0x000b, + SHA384 = 0x000c, + SHA512 = 0x000d, + NULL = 0x0010, + SM3_256 = 0x0012, + RSASSA = 0x0014, + CFB = 0x0043, +} + +impl From for AlgId { + fn from(x: AlgIdEnum) -> Self { + AlgId::new(x as u16) + } +} + +impl AlgIdEnum { + pub fn from_u16(val: u16) -> Option { + let ret = match val { + 0x0004 => Self::SHA, + 0x000b => Self::SHA256, + 0x000c => Self::SHA384, + 0x000d => Self::SHA512, + 0x0012 => Self::SM3_256, + _ => return None, + }; + + Some(ret) + } +} + +/// `TPMA_OBJECT` +#[repr(transparent)] +#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq)] +pub struct TpmaObject(pub u32_be); + +impl TpmaObject { + const fn new(val: u32) -> Self { + Self(new_u32_be(val)) + } +} + +impl From for TpmaObject { + fn from(x: TpmaObjectBits) -> Self { + let val: u32 = x.into(); + Self::new(val) + } +} + +impl From for TpmaObject { + fn from(x: u32) -> Self { + Self::new(x) + } +} + +#[bitfield(u32)] +pub struct TpmaObjectBits { + _reserved0: bool, + pub fixed_tpm: bool, + pub st_clear: bool, + _reserved1: bool, + pub fixed_parent: bool, + pub sensitive_data_origin: bool, + pub user_with_auth: bool, + pub admin_with_policy: bool, + #[bits(2)] + _reserved2: u8, + pub no_da: bool, + pub encrypted_duplication: bool, + #[bits(4)] + _reserved3: u8, + pub restricted: bool, + pub decrypt: bool, + pub sign_encrypt: bool, + #[bits(13)] + _reserved4: u16, +} + +/// `TPMA_NV` +#[repr(transparent)] +#[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes, PartialEq)] +pub struct TpmaNv(pub u32_be); + +impl TpmaNv { + const fn new(val: u32) -> Self { + Self(new_u32_be(val)) + } +} + +impl From for TpmaNv { + fn from(x: TpmaNvBits) -> Self { + let val: u32 = x.into(); + Self::new(val) + } +} + +impl From for TpmaNv { + fn from(x: u32) -> Self { + Self::new(x) + } +} + +#[bitfield(u32)] +pub struct TpmaNvBits { + pub nv_ppwrite: bool, + pub nv_ownerwrite: bool, + pub nv_authwrite: bool, + pub nv_policywrite: bool, + // bits 7:4: `TPM_NT` + // 0001 - `tpm_nt_counter` + pub nt_counter: bool, + // 0010 - `tpm_nt_bits` + pub nt_bits: bool, + // 0100 - `tpm_nt_extend` + pub nt_extend: bool, + _unused0: bool, + // bits 9:8 are reserved + #[bits(2)] + _reserved1: u8, + pub nv_policy_delete: bool, + pub nv_writelocked: bool, + pub nv_writeall: bool, + pub nv_writedefine: bool, + pub nv_write_stclear: bool, + pub nv_globallock: bool, + pub nv_ppread: bool, + pub nv_ownerread: bool, + pub nv_authread: bool, + pub nv_policyread: bool, + // bits 24:20 are reserved + #[bits(5)] + _reserved2: u8, + pub nv_no_da: bool, + pub nv_orderly: bool, + pub nv_clear_stclear: bool, + pub nv_readlocked: bool, + pub nv_written: bool, + pub nv_platformcreate: bool, + pub nv_read_stclear: bool, +} + +/// Workaround to allow constructing a zerocopy U64 in a const context. +const fn new_u64_be(val: u64) -> u64_be { + u64_be::from_bytes(val.to_be_bytes()) +} + +/// Workaround to allow constructing a zerocopy U32 in a const context. +const fn new_u32_be(val: u32) -> u32_be { + u32_be::from_bytes(val.to_be_bytes()) +} + +/// Workaround to allow constructing a zerocopy U16 in a const context. +const fn new_u16_be(val: u16) -> u16_be { + u16_be::from_bytes(val.to_be_bytes()) +} + +/// TPM command / response definitions +pub mod protocol { + use super::*; + + /// Common structs shared between multiple command / response structs + pub mod common { + use super::*; + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct CmdHeader { + pub session_tag: SessionTag, + pub size: u32_be, + pub command_code: CommandCode, + } + + impl CmdHeader { + /// Construct a header for a fixed-size command + pub fn new( + session_tag: SessionTag, + command_code: CommandCode, + ) -> CmdHeader { + CmdHeader { + session_tag, + size: (size_of::() as u32).into(), + command_code, + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ReplyHeader { + pub session_tag: u16_be, + pub size: u32_be, + pub response_code: u32_be, + } + + impl ReplyHeader { + /// Performs a few command-agnostic validation checks: + /// - Ensures the size matches the size_of the provided `FullReply` type + /// - Compares provided session_tag + /// + /// Returns Ok(bool) if the validation passes. The bool value indicates whether + /// the response_code is [`ResponseCode::Success`] or not. + /// Returns Err(ResponseValidationError) otherwise. + pub fn base_validation( + &self, + session_tag: SessionTag, + expected_size: u32, + ) -> Result { + // Response code other than Success indicates that the command fails + // See Section 6.2, "Trusted Platform Module Library Part 3: Commands", revision 1.38. + let command_succeeded = ResponseCode::from_u32(self.response_code.get()) + .map(|c| matches!(c, ResponseCode::Success)) + .unwrap_or(false); + + let (expected_tag, expected_size) = if command_succeeded { + (session_tag, expected_size as usize) + } else { + // If the command fails, the expected tag should be NoSessions and the minimal size + // of the response should be the size of the header. + // See Section 6.1, "Trusted Platform Module Library Part 3: Commands", revision 1.38. + // + // DEVNOTE: we do not handle the special case caused by sending unsupported commands where + // the session tag will be `TPM_RC_BAD_TAG` instead. + (SessionTagEnum::NoSessions.into(), size_of::()) + }; + + if self.session_tag.get() != expected_tag { + Err(ResponseValidationError::HeaderSessionTagMismatch { + response_session_tag: self.session_tag.get(), + expected_session_tag: session_tag.0.get(), + command_succeeded, + })? + } + + // Allow the size specified in the header to be equal to or larger than the expected size in case + // that the expected size does not take the authorization area into account. + if (self.size.get() as usize) < expected_size { + Err(ResponseValidationError::HeaderResponseSizeMismatch { + size: self.size.get(), + expected_size, + command_succeeded, + })? + } + + Ok(command_succeeded) + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct CmdAuth { + handle: ReservedHandle, + nonce_2b: u16_be, + session: u8, + auth_2b: u16_be, + } + + impl CmdAuth { + pub fn new(handle: ReservedHandle, nonce_2b: u16, session: u8, auth_2b: u16) -> Self { + CmdAuth { + handle, + nonce_2b: nonce_2b.into(), + session, + auth_2b: auth_2b.into(), + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ReplyAuth { + pub nonce_2b: u16_be, + pub session: u8, + pub auth_2b: u16_be, + } + } + + use common::CmdHeader; + use common::ReplyHeader; + + /// Marker trait for a struct that corresponds to a TPM Command + pub trait TpmCommand: IntoBytes + FromBytes + Sized + Immutable + KnownLayout { + type Reply: TpmReply; + + fn base_validate_reply( + reply_buf: &[u8], + session_tag: impl Into, + ) -> Result<(Self::Reply, bool), ResponseValidationError> { + let res = Self::Reply::deserialize(reply_buf) + .ok_or(ResponseValidationError::ResponseSizeTooSmall)?; + let succeeded = res.base_validation(session_tag.into())?; + + Ok((res, succeeded)) + } + } + + /// Marker trait for a struct that corresponds to a TPM Reply + pub trait TpmReply: IntoBytes + FromBytes + Sized + Immutable + KnownLayout { + type Command: TpmCommand; + + fn base_validation( + &self, + session_tag: SessionTag, + ) -> Result { + // `Reply::deserialize` guarantees this should not fail + let header = ReplyHeader::ref_from_prefix(self.as_bytes()) + .expect("unexpected response size") + .0; // TODO: zerocopy: error (https://github.com/microsoft/openvmm/issues/759) + header.base_validation(session_tag, self.payload_size() as u32) + } + fn deserialize(bytes: &[u8]) -> Option; + fn payload_size(&self) -> usize; + } + + /// General type for TPM 2.0 sized buffers. + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct Tpm2bBuffer { + pub size: u16_be, + // Use value that is large enough as the buffer size so that we + // only need to define one struct. + pub buffer: [u8; MAX_DIGEST_BUFFER_SIZE], + } + + impl Tpm2bBuffer { + /// Create a `Tpm2bBuffer` from a slice. + pub fn new(data: &[u8]) -> Result { + let size = data.len(); + if size > MAX_DIGEST_BUFFER_SIZE { + Err(InvalidInput::BufferSizeTooLarge( + size, + MAX_DIGEST_BUFFER_SIZE, + ))? + } + + let mut buffer = [0u8; MAX_DIGEST_BUFFER_SIZE]; + buffer[..size].copy_from_slice(data); + + Ok(Self { + size: new_u16_be(size as u16), + buffer, + }) + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.size.as_bytes()); + buffer.extend_from_slice(&self.buffer[..self.size.get() as usize]); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + if bytes.len() < end { + return None; + } + + let size: u16 = u16_be::read_from_bytes(&bytes[start..end]).ok()?.into(); // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + if size as usize > MAX_DIGEST_BUFFER_SIZE { + return None; + } + + start = end; + end += size as usize; + if bytes.len() < end { + return None; + } + let mut buffer = [0u8; MAX_DIGEST_BUFFER_SIZE]; + buffer[..size as usize].copy_from_slice(&bytes[start..end]); + + Some(Self { + size: size.into(), + buffer, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.size); + payload_size += self.size.get() as usize; + + payload_size + } + } + + /// `TPML_PCR_SELECTION` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct TpmlPcrSelection { + pub count: u32_be, + pub pcr_selections: [PcrSelection; 5], + } + + impl TpmlPcrSelection { + pub fn new(pcr_selections: &[PcrSelection]) -> Result { + let count = pcr_selections.len(); + if count > 5 { + Err(InvalidInput::PcrSelectionsLengthTooLong(count, 5))? + } + + let mut base = [PcrSelection::new_zeroed(); 5]; + base[..count].copy_from_slice(pcr_selections); + + Ok(Self { + count: new_u32_be(count as u32), + pcr_selections: base, + }) + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.count.as_bytes()); + for i in 0..self.count.get() { + buffer.extend_from_slice(&self.pcr_selections[i as usize].serialize()); + } + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + if bytes.len() < end { + return None; + } + + let count: u32 = u32_be::read_from_bytes(&bytes[start..end]).ok()?.into(); // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + if count > 5 { + return None; + } + + let mut pcr_selections = [PcrSelection::new_zeroed(); 5]; + for i in 0..count { + start = end; + pcr_selections[i as usize] = PcrSelection::deserialize(&bytes[start..])?; + end += pcr_selections[i as usize].payload_size(); + } + + Some(Self { + count: count.into(), + pcr_selections, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + let count = self.count; + + payload_size += size_of_val(&count); + for i in 0..count.get() { + payload_size += self.pcr_selections[i as usize].payload_size(); + } + + payload_size + } + } + + /// `TPMS_SENSITIVE_CREATE` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct TpmsSensitiveCreate { + user_auth: Tpm2bBuffer, + data: Tpm2bBuffer, + } + + impl TpmsSensitiveCreate { + pub fn new(user_auth: &[u8], data: &[u8]) -> Result { + let user_auth = + Tpm2bBuffer::new(user_auth).map_err(TpmProtoError::TpmsSensitiveCreateUserAuth)?; + let data = Tpm2bBuffer::new(data).map_err(TpmProtoError::TpmsSensitiveCreateData)?; + Ok(Self { user_auth, data }) + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(&self.user_auth.serialize()); + buffer.extend_from_slice(&self.data.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += self.user_auth.payload_size(); + payload_size += self.data.payload_size(); + + payload_size + } + } + + /// `TPM2B_SENSITIVE_CREATE` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct Tpm2bSensitiveCreate { + size: u16_be, + sensitive: TpmsSensitiveCreate, + } + + impl Tpm2bSensitiveCreate { + pub fn new(sensitive: TpmsSensitiveCreate) -> Self { + let size = sensitive.payload_size() as u16; + Self { + size: size.into(), + sensitive, + } + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.size.as_bytes()); + buffer.extend_from_slice(&self.sensitive.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + let size = self.size; + + payload_size += size_of_val(&size); + payload_size += self.sensitive.payload_size(); + + payload_size + } + } + + /// `TPMT_RSA_SCHEME` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout, PartialEq)] + pub struct TpmtRsaScheme { + scheme: AlgId, + hash_alg: AlgId, + } + + impl TpmtRsaScheme { + pub fn new(scheme: AlgId, hash_alg: Option) -> Self { + let hash_alg = hash_alg.map_or_else(|| AlgId::new(0), |v| v); + + Self { scheme, hash_alg } + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.scheme.as_bytes()); + + // No parameters when algorithm is NULL + if self.scheme != AlgIdEnum::NULL.into() { + // Only support scheme with hash (e.g., RSASSA) for now + buffer.extend_from_slice(self.hash_alg.as_bytes()); + } + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + if bytes.len() < end { + return None; + } + + let scheme = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + let hash_alg = if scheme != AlgIdEnum::NULL.into() { + start = end; + end += size_of::(); + AlgId::read_from_prefix(&bytes[start..end]).ok()?.0 // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + } else { + AlgId::new(0) + }; + + Some(Self { scheme, hash_alg }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.scheme); + + if self.scheme != AlgIdEnum::NULL.into() { + payload_size += size_of_val(&self.hash_alg); + } + + payload_size + } + } + + /// `TPMT_SYM_DEF_OBJECT` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout, PartialEq)] + pub struct TpmtSymDefObject { + algorithm: AlgId, + key_bits: u16_be, + mode: AlgId, + } + + impl TpmtSymDefObject { + pub fn new(algorithm: AlgId, key_bits: Option, mode: Option) -> Self { + let key_bits = key_bits.map_or_else(|| new_u16_be(0), |v| v.into()); + let mode = mode.map_or_else(|| AlgId::new(0), |v| v); + + Self { + algorithm, + key_bits, + mode, + } + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.algorithm.as_bytes()); + + // No parameters when algorithm is NULL + if self.algorithm != AlgIdEnum::NULL.into() { + buffer.extend_from_slice(self.key_bits.as_bytes()); + buffer.extend_from_slice(self.mode.as_bytes()); + } + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + if bytes.len() < end { + return None; + } + + let algorithm = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + let (key_bits, mode) = if algorithm != AlgIdEnum::NULL.into() { + start = end; + end += size_of::(); + let key_bits = u16_be::read_from_bytes(&bytes[start..end]).ok()?; // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + let mode = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + (key_bits, mode) + } else { + (new_u16_be(0), AlgId::new(0)) + }; + + Some(Self { + algorithm, + key_bits, + mode, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.algorithm); + + if self.algorithm != AlgIdEnum::NULL.into() { + payload_size += size_of_val(&self.key_bits); + payload_size += size_of_val(&self.mode); + } + + payload_size + } + } + + /// `TPMS_RSA_PARMS` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout, PartialEq)] + pub struct TpmsRsaParams { + symmetric: TpmtSymDefObject, + scheme: TpmtRsaScheme, + key_bits: u16_be, + pub exponent: u32_be, + } + + impl TpmsRsaParams { + pub fn new( + symmetric: TpmtSymDefObject, + scheme: TpmtRsaScheme, + key_bits: u16, + exponent: u32, + ) -> Self { + Self { + symmetric, + scheme, + key_bits: key_bits.into(), + exponent: exponent.into(), + } + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(&self.symmetric.serialize()); + buffer.extend_from_slice(&self.scheme.serialize()); + buffer.extend_from_slice(self.key_bits.as_bytes()); + buffer.extend_from_slice(self.exponent.as_bytes()); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = 0; + + let symmetric = TpmtSymDefObject::deserialize(&bytes[start..])?; + end += symmetric.payload_size(); + + start = end; + let scheme = TpmtRsaScheme::deserialize(&bytes[start..])?; + end += scheme.payload_size(); + + // TODO: zerocopy: as of zerocopy 0.8 this can be simplified with `read_from_bytes`....ok()?, to avoid (https://github.com/microsoft/openvmm/issues/759) + // manual size checks. Leaving this code as-is to reduce risk of the 0.7 -> 0.8 move. + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let key_bits = u16_be::read_from_bytes(&bytes[start..end]).ok()?; + + // TODO: zerocopy: as of zerocopy 0.8 this can be simplified with `read_from_bytes`....ok()?, to avoid (https://github.com/microsoft/openvmm/issues/759) + // manual size checks. Leaving this code as-is to reduce risk of the 0.7 -> 0.8 move. + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let exponent = u32_be::read_from_bytes(&bytes[start..end]).ok()?; + + Some(Self { + symmetric, + scheme, + key_bits, + exponent, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += self.symmetric.payload_size(); + payload_size += self.scheme.payload_size(); + payload_size += size_of_val(&self.key_bits); + payload_size += size_of_val(&self.exponent); + + payload_size + } + } + + /// `TPMT_PUBLIC` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct TpmtPublic { + my_type: AlgId, + name_alg: AlgId, + object_attributes: TpmaObject, + auth_policy: Tpm2bBuffer, + // `TPMS_RSA_PARAMS` + pub parameters: TpmsRsaParams, + // `TPM2B_PUBLIC_KEY_RSA` + pub unique: Tpm2bBuffer, + } + + impl TpmtPublic { + pub fn new( + my_type: AlgId, + name_alg: AlgId, + object_attributes: TpmaObjectBits, + auth_policy: &[u8], + parameters: TpmsRsaParams, + unique: &[u8], + ) -> Result { + let auth_policy = + Tpm2bBuffer::new(auth_policy).map_err(TpmProtoError::TpmtPublicAuthPolicy)?; + let unique = Tpm2bBuffer::new(unique).map_err(TpmProtoError::TpmtPublicUnique)?; + Ok(Self { + my_type, + name_alg, + object_attributes: object_attributes.into(), + auth_policy, + parameters, + unique, + }) + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.my_type.as_bytes()); + buffer.extend_from_slice(self.name_alg.as_bytes()); + buffer.extend_from_slice(self.object_attributes.as_bytes()); + buffer.extend_from_slice(&self.auth_policy.serialize()); + buffer.extend_from_slice(&self.parameters.serialize()); + buffer.extend_from_slice(&self.unique.serialize()); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + if bytes.len() < end { + return None; + } + let r#type = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let name_alg = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let object_attributes: u32 = u32_be::read_from_bytes(&bytes[start..end]).ok()?.into(); // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let auth_policy = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += auth_policy.payload_size(); + if bytes.len() < end { + return None; + } + + start = end; + let parameters = TpmsRsaParams::deserialize(&bytes[start..])?; + end += parameters.payload_size(); + + start = end; + let unique = Tpm2bBuffer::deserialize(&bytes[start..])?; + + Some(Self { + my_type: r#type, + name_alg, + object_attributes: object_attributes.into(), + auth_policy, + parameters, + unique, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.my_type); + payload_size += size_of_val(&self.name_alg); + payload_size += size_of_val(&self.object_attributes); + payload_size += self.auth_policy.payload_size(); + payload_size += self.parameters.payload_size(); + payload_size += self.unique.payload_size(); + + payload_size + } + } + + /// `TPM2B_PUBLIC` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct Tpm2bPublic { + pub size: u16_be, + pub public_area: TpmtPublic, + } + + impl Tpm2bPublic { + pub fn new(public_area: TpmtPublic) -> Self { + let size = public_area.payload_size() as u16; + Self { + size: size.into(), + public_area, + } + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.size.as_bytes()); + buffer.extend_from_slice(&self.public_area.serialize()); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let end = size_of::(); + + if bytes.len() < end { + return None; + } + + let size = u16_be::read_from_bytes(&bytes[start..end]).ok()?; // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let public_area = TpmtPublic::deserialize(&bytes[start..])?; + + Some(Self { size, public_area }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.size); + payload_size += self.public_area.payload_size(); + + payload_size + } + } + + /// `TPMS_CREATION_DATA` + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct TpmsCreationData { + pcr_select: TpmlPcrSelection, + pcr_digest: Tpm2bBuffer, + locality: u8, + parent_name_alg: AlgId, + parent_name: Tpm2bBuffer, + parent_qualified_name: Tpm2bBuffer, + outside_info: Tpm2bBuffer, + } + + impl TpmsCreationData { + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = 0; + + let pcr_select = TpmlPcrSelection::deserialize(&bytes[start..])?; + end += pcr_select.payload_size(); + + start = end; + let pcr_digest = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += pcr_digest.payload_size(); + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let locality = bytes[start]; + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let parent_name_alg = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let parent_name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += parent_name.payload_size(); + + start = end; + let parent_qualified_name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += parent_qualified_name.payload_size(); + + start = end; + let outside_info = Tpm2bBuffer::deserialize(&bytes[start..])?; + + Some(Self { + pcr_select, + pcr_digest, + locality, + parent_name_alg, + parent_name, + parent_qualified_name, + outside_info, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += self.pcr_select.payload_size(); + payload_size += self.pcr_digest.payload_size(); + payload_size += size_of_val(&self.locality); + payload_size += size_of_val(&self.parent_name_alg); + payload_size += self.parent_name.payload_size(); + payload_size += self.parent_qualified_name.payload_size(); + payload_size += self.outside_info.payload_size(); + + payload_size + } + } + + /// `TPM2B_CREATION_DATA` + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + #[repr(C)] + pub struct Tpm2bCreationData { + size: u16_be, + creation_data: TpmsCreationData, + } + + impl Tpm2bCreationData { + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let end = size_of::(); + + if bytes.len() < end { + return None; + } + + let size = u16_be::read_from_bytes(&bytes[start..end]).ok()?; // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let creation_data = TpmsCreationData::deserialize(&bytes[start..])?; + + Some(Self { + size, + creation_data, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.size); + payload_size += self.creation_data.payload_size(); + + payload_size + } + } + + /// `TPMT_TK_CREATION` + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct TpmtTkCreation { + tag: SessionTag, + hierarchy: ReservedHandle, + digest: Tpm2bBuffer, + } + + impl TpmtTkCreation { + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + if bytes.len() < end { + return None; + } + let tag = SessionTag::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let hierarchy = ReservedHandle::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let digest = Tpm2bBuffer::deserialize(&bytes[start..])?; + + Some(Self { + tag, + hierarchy, + digest, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.tag); + payload_size += size_of_val(&self.hierarchy); + payload_size += self.digest.payload_size(); + + payload_size + } + } + + /// `TPMS_NV_PUBLIC` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct TpmsNvPublic { + nv_index: u32_be, + name_alg: AlgId, + pub attributes: TpmaNv, + auth_policy: Tpm2bBuffer, + pub data_size: u16_be, + } + + impl TpmsNvPublic { + pub fn new( + nv_index: u32, + name_alg: AlgId, + attributes: TpmaNvBits, + auth_policy: &[u8], + data_size: u16, + ) -> Result { + let auth_policy = + Tpm2bBuffer::new(auth_policy).map_err(TpmProtoError::TpmsNvPublicAuthPolicy)?; + + Ok(Self { + nv_index: nv_index.into(), + name_alg, + attributes: attributes.into(), + auth_policy, + data_size: data_size.into(), + }) + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.nv_index.as_bytes()); + buffer.extend_from_slice(self.name_alg.as_bytes()); + buffer.extend_from_slice(self.attributes.as_bytes()); + buffer.extend_from_slice(&self.auth_policy.serialize()); + buffer.extend_from_slice(self.data_size.as_bytes()); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + if bytes.len() < end { + return None; + } + let nv_index: u32 = u32_be::read_from_bytes(&bytes[start..end]).ok()?.into(); // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let name_alg = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let attributes: u32 = u32_be::read_from_bytes(&bytes[start..end]).ok()?.into(); // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let auth_policy = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += auth_policy.payload_size(); + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let data_size = u16_be::read_from_bytes(&bytes[start..end]).ok()?; // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + Some(Self { + nv_index: nv_index.into(), + name_alg, + attributes: attributes.into(), + auth_policy, + data_size, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.nv_index); + payload_size += size_of_val(&self.name_alg); + payload_size += size_of_val(&self.attributes); + payload_size += self.auth_policy.payload_size(); + payload_size += size_of_val(&self.data_size); + + payload_size + } + } + + /// `TPM2B_NV_PUBLIC` + #[repr(C)] + #[derive(Debug, Copy, Clone, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct Tpm2bNvPublic { + size: u16_be, + pub nv_public: TpmsNvPublic, + } + + impl Tpm2bNvPublic { + pub fn new(nv_public: TpmsNvPublic) -> Result { + let size = nv_public.payload_size(); + if size > u16::MAX.into() { + Err(InvalidInput::NvPublicPayloadTooLarge(size, u16::MAX.into()))? + } + + Ok(Self { + size: (size as u16).into(), + nv_public, + }) + } + + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.size.as_bytes()); + buffer.extend_from_slice(&self.nv_public.serialize()); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let end = size_of::(); + + if bytes.len() < end { + return None; + } + + let size = u16_be::read_from_bytes(&bytes[start..end]).ok()?; // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let nv_public = TpmsNvPublic::deserialize(&bytes[start..])?; + + Some(Self { size, nv_public }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.size); + payload_size += self.nv_public.payload_size(); + + payload_size + } + } + + // === ClearControl === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ClearControlCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + auth_size: u32_be, + auth: common::CmdAuth, + disable: u8, + } + + impl ClearControlCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + disable: bool, + ) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::ClearControl.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + disable: disable as u8, + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ClearControlReply { + pub header: ReplyHeader, + pub param_size: u32_be, + pub auth: common::ReplyAuth, + } + + impl TpmCommand for ClearControlCmd { + type Reply = ClearControlReply; + } + + impl TpmReply for ClearControlReply { + type Command = ClearControlCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Clear === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ClearCmd { + header: CmdHeader, + + auth_handle: ReservedHandle, + auth_size: u32_be, + auth: common::CmdAuth, + } + + impl ClearCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + ) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::Clear.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ClearReply { + pub header: ReplyHeader, + pub param_size: u32_be, + pub auth: common::ReplyAuth, + } + + impl TpmCommand for ClearCmd { + type Reply = ClearReply; + } + + impl TpmReply for ClearReply { + type Command = ClearCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Startup === // + + pub enum StartupType { + Clear, + State, + } + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct StartupCmd { + header: CmdHeader, + startup_type: u16_be, + } + + impl StartupCmd { + pub fn new(session_tag: SessionTag, startup_type: StartupType) -> StartupCmd { + StartupCmd { + header: CmdHeader::new::(session_tag, CommandCodeEnum::Startup.into()), + startup_type: match startup_type { + StartupType::Clear => 0, + StartupType::State => 1, + } + .into(), + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct StartupReply { + pub header: ReplyHeader, + } + + impl TpmCommand for StartupCmd { + type Reply = StartupReply; + } + + impl TpmReply for StartupReply { + type Command = StartupCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Self Test === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct SelfTestCmd { + header: CmdHeader, + full_test: u8, + } + + impl SelfTestCmd { + pub fn new(session_tag: SessionTag, full_test: bool) -> SelfTestCmd { + SelfTestCmd { + header: CmdHeader::new::(session_tag, CommandCodeEnum::SelfTest.into()), + full_test: full_test as u8, + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct SelfTestReply { + pub header: ReplyHeader, + } + + impl TpmCommand for SelfTestCmd { + type Reply = SelfTestReply; + } + + impl TpmReply for SelfTestReply { + type Command = SelfTestCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Hierarchy Control === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct HierarchyControlCmd { + header: CmdHeader, + + auth_handle: ReservedHandle, + auth_size: u32_be, + auth: common::CmdAuth, + + hierarchy: ReservedHandle, + state: u8, + } + + impl HierarchyControlCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + hierarchy: ReservedHandle, + state: bool, + ) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::HierarchyControl.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + hierarchy, + state: state as u8, + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct HierarchyControlReply { + pub header: ReplyHeader, + pub param_size: u32_be, + pub auth: common::ReplyAuth, + } + + impl TpmCommand for HierarchyControlCmd { + type Reply = HierarchyControlReply; + } + + impl TpmReply for HierarchyControlReply { + type Command = HierarchyControlCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Pcr Allocate === // + + #[repr(C)] + #[derive(Debug, Copy, Clone, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct PcrSelection { + pub hash: AlgId, + pub size_of_select: u8, + pub bitmap: [u8; 3], + } + + impl PcrSelection { + pub fn serialize(self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.hash.as_bytes()); + buffer.extend_from_slice(self.size_of_select.as_bytes()); + buffer.extend_from_slice(&self.bitmap[..self.size_of_select as usize]); + + buffer + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + if bytes.len() < end { + return None; + } + let hash = AlgId::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let size_of_select = bytes[start]; + if size_of_select > 3 { + return None; + } + + start = end; + end += size_of_select as usize; + if bytes.len() < end { + return None; + } + let mut bitmap = [0u8; 3]; + bitmap[..size_of_select as usize].copy_from_slice(&bytes[start..end]); + + Some(Self { + hash, + size_of_select, + bitmap, + }) + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.hash); + payload_size += size_of_val(&self.size_of_select); + payload_size += self.size_of_select as usize; + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct PcrAllocateCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + // Parameters + pcr_allocation: TpmlPcrSelection, + } + + impl PcrAllocateCmd { + pub const HASH_ALG_TO_ID: [(u32, AlgId); 5] = [ + (1 << 0, AlgId::new(AlgIdEnum::SHA as u16)), + (1 << 1, AlgId::new(AlgIdEnum::SHA256 as u16)), + (1 << 2, AlgId::new(AlgIdEnum::SHA384 as u16)), + (1 << 3, AlgId::new(AlgIdEnum::SHA512 as u16)), + (1 << 4, AlgId::new(AlgIdEnum::SM3_256 as u16)), + ]; + + /// # Panics + /// + /// `pcr_selections` must be have a len less than `TCG_BOOT_HASH_COUNT` + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + pcr_selections: &[PcrSelection], + ) -> Result { + let pcr_allocation = TpmlPcrSelection::new(pcr_selections) + .map_err(TpmProtoError::PcrAllocatePcrAllocation)?; + + let mut cmd = Self { + header: CmdHeader::new::(session, CommandCodeEnum::PCR_Allocate.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + pcr_allocation, + }; + + cmd.header.size = new_u32_be(cmd.payload_size() as u32); + + Ok(cmd) + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.header.as_bytes()); + buffer.extend_from_slice(self.auth_handle.as_bytes()); + buffer.extend_from_slice(self.auth_size.as_bytes()); + buffer.extend_from_slice(self.auth.as_bytes()); + buffer.extend_from_slice(&self.pcr_allocation.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.auth_handle); + payload_size += size_of_val(&self.auth_size); + payload_size += size_of_val(&self.auth); + payload_size += self.pcr_allocation.payload_size(); + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct PcrAllocateReply { + pub header: ReplyHeader, + pub auth_size: u32_be, + pub allocation_success: u8, + pub max_pcr: u32_be, + pub size_needed: u32_be, + pub size_available: u32_be, + + pub auth: common::ReplyAuth, + } + + impl TpmCommand for PcrAllocateCmd { + type Reply = PcrAllocateReply; + } + + impl TpmReply for PcrAllocateReply { + type Command = PcrAllocateCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === ChangeSeed === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ChangeSeedCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + auth_size: u32_be, + auth: common::CmdAuth, + } + + impl ChangeSeedCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + command_code: CommandCodeEnum, + ) -> Self { + Self { + header: CmdHeader::new::(session, command_code.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + } + } + } + + #[repr(C)] + #[derive(Debug, IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ChangeSeedReply { + pub header: ReplyHeader, + pub param_size: u32_be, + + pub auth: common::ReplyAuth, + } + + impl TpmCommand for ChangeSeedCmd { + type Reply = ChangeSeedReply; + } + + impl TpmReply for ChangeSeedReply { + type Command = ChangeSeedCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: option-to-error (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === CreatePrimary === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct CreatePrimaryCmd { + pub header: CmdHeader, + primary_handle: ReservedHandle, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + // Parameters + in_sensitive: Tpm2bSensitiveCreate, + in_public: Tpm2bPublic, + outside_info: Tpm2bBuffer, + creation_pcr: TpmlPcrSelection, + } + + impl CreatePrimaryCmd { + pub fn new( + session: SessionTag, + primary_handle: ReservedHandle, + auth: common::CmdAuth, + in_sensitive_user_auth: &[u8], + in_sensitive_data: &[u8], + in_public: TpmtPublic, + outside_info: &[u8], + creation_pcr: &[PcrSelection], + ) -> Result { + let sensitive_create = + TpmsSensitiveCreate::new(in_sensitive_user_auth, in_sensitive_data)?; + let in_sensitive = Tpm2bSensitiveCreate::new(sensitive_create); + let in_public = Tpm2bPublic::new(in_public); + let outside_info = + Tpm2bBuffer::new(outside_info).map_err(TpmProtoError::CreatePrimaryOutsideInfo)?; + let creation_pcr = TpmlPcrSelection::new(creation_pcr) + .map_err(TpmProtoError::CreatePrimaryCreationPcr)?; + + let mut cmd = Self { + header: CmdHeader::new::(session, CommandCodeEnum::CreatePrimary.into()), + primary_handle, + auth_size: (size_of::() as u32).into(), + auth, + in_sensitive, + in_public, + outside_info, + creation_pcr, + }; + + cmd.header.size = new_u32_be(cmd.payload_size() as u32); + + Ok(cmd) + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.header.as_bytes()); + buffer.extend_from_slice(self.primary_handle.as_bytes()); + buffer.extend_from_slice(self.auth_size.as_bytes()); + buffer.extend_from_slice(self.auth.as_bytes()); + buffer.extend_from_slice(&self.in_sensitive.serialize()); + buffer.extend_from_slice(&self.in_public.serialize()); + buffer.extend_from_slice(&self.outside_info.serialize()); + buffer.extend_from_slice(&self.creation_pcr.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.primary_handle); + payload_size += size_of_val(&self.auth_size); + payload_size += size_of_val(&self.auth); + payload_size += self.in_sensitive.payload_size(); + payload_size += self.in_public.payload_size(); + payload_size += self.outside_info.payload_size(); + payload_size += self.creation_pcr.payload_size(); + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct CreatePrimaryReply { + pub header: ReplyHeader, + pub object_handle: ReservedHandle, + // Parameter size + param_size: u32_be, + // Parameters + pub out_public: Tpm2bPublic, + creation_data: Tpm2bCreationData, + creation_hash: Tpm2bBuffer, + creation_ticket: TpmtTkCreation, + name: Tpm2bBuffer, + // Authorization area + auth: common::ReplyAuth, + } + + impl TpmCommand for CreatePrimaryCmd { + type Reply = CreatePrimaryReply; + } + + impl TpmReply for CreatePrimaryReply { + type Command = CreatePrimaryCmd; + + fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + let header = ReplyHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Handle the command failure. + if header.size.get() as usize == end { + let mut cmd = CreatePrimaryReply::new_zeroed(); + cmd.header = header; + return Some(cmd); + } + + start = end; + end += size_of::(); + let object_handle = ReservedHandle::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + let param_size = u32_be::read_from_bytes(&bytes[start..end]).ok()?; // TODO: zerocopy: simplify (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let out_public = Tpm2bPublic::deserialize(&bytes[start..])?; + end += out_public.payload_size(); + + start = end; + let creation_data = Tpm2bCreationData::deserialize(&bytes[start..])?; + end += creation_data.payload_size(); + + start = end; + let creation_hash = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += creation_hash.payload_size(); + + start = end; + let creation_ticket = TpmtTkCreation::deserialize(&bytes[start..])?; + end += creation_ticket.payload_size(); + + start = end; + let name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += name.payload_size(); + + start = end; + end += size_of::(); + let auth = common::ReplyAuth::read_from_prefix(&bytes[start..end]) + .ok()? + .0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + if header.size.get() as usize != end { + return None; + } + + Some(Self { + header, + object_handle, + param_size, + out_public, + creation_data, + creation_hash, + creation_ticket, + name, + auth, + }) + } + + fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.object_handle); + payload_size += size_of_val(&self.param_size); + payload_size += self.out_public.payload_size(); + payload_size += self.creation_data.payload_size(); + payload_size += self.creation_hash.payload_size(); + payload_size += self.creation_ticket.payload_size(); + payload_size += self.name.payload_size(); + payload_size += size_of_val(&self.auth); + + payload_size + } + } + + // === FlushContext === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct FlushContextCmd { + pub header: CmdHeader, + // Parameter + flush_handle: ReservedHandle, + } + + impl FlushContextCmd { + pub fn new(flush_handle: ReservedHandle) -> Self { + Self { + header: CmdHeader::new::( + SessionTagEnum::NoSessions.into(), + CommandCodeEnum::FlushContext.into(), + ), + flush_handle, + } + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct FlushContextReply { + pub header: ReplyHeader, + } + + impl TpmCommand for FlushContextCmd { + type Reply = FlushContextReply; + } + + impl TpmReply for FlushContextReply { + type Command = FlushContextCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === EvictControl === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct EvictControlCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + object_handle: ReservedHandle, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + // Parameter + persistent_handle: ReservedHandle, + } + + impl EvictControlCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + object_handle: ReservedHandle, + auth: common::CmdAuth, + persistent_handle: ReservedHandle, + ) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::EvictControl.into()), + auth_handle, + object_handle, + auth_size: (size_of::() as u32).into(), + auth, + persistent_handle, + } + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct EvictControlReply { + pub header: ReplyHeader, + } + + impl TpmCommand for EvictControlCmd { + type Reply = EvictControlReply; + } + + impl TpmReply for EvictControlReply { + type Command = EvictControlCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: error-to-option (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === ReadPublic === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ReadPublicCmd { + header: CmdHeader, + object_handle: ReservedHandle, + } + + impl ReadPublicCmd { + pub fn new(session: SessionTag, object_handle: ReservedHandle) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::ReadPublic.into()), + object_handle, + } + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct ReadPublicReply { + pub header: ReplyHeader, + pub out_public: Tpm2bPublic, + name: Tpm2bBuffer, + qualified_name: Tpm2bBuffer, + } + + impl TpmCommand for ReadPublicCmd { + type Reply = ReadPublicReply; + } + + impl TpmReply for ReadPublicReply { + type Command = ReadPublicCmd; + + fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + let header = ReplyHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Handle the command failure. + if header.size.get() as usize == end { + return Some(Self { + header, + out_public: Tpm2bPublic::new_zeroed(), + name: Tpm2bBuffer::new_zeroed(), + qualified_name: Tpm2bBuffer::new_zeroed(), + }); + } + + start = end; + let out_public = Tpm2bPublic::deserialize(&bytes[start..])?; + end += out_public.payload_size(); + + start = end; + let name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += name.payload_size(); + + start = end; + let qualified_name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += qualified_name.payload_size(); + + if header.size.get() as usize != end { + return None; + } + + Some(Self { + header, + out_public, + name, + qualified_name, + }) + } + + fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of::(); + payload_size += self.out_public.payload_size(); + payload_size += self.name.payload_size(); + payload_size += self.qualified_name.payload_size(); + + payload_size + } + } + + // === Nv DefineSpace === // + + #[repr(C)] + #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvDefineSpaceCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + // Authorization area + auth_size: u32_be, + auth_cmd: common::CmdAuth, + // Parameters + auth: Tpm2bBuffer, + public_info: Tpm2bNvPublic, + } + + impl NvDefineSpaceCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth_cmd: common::CmdAuth, + auth: u64, + public_info: TpmsNvPublic, + ) -> Result { + let auth = new_u64_be(auth); + let auth = + Tpm2bBuffer::new(auth.as_bytes()).map_err(TpmProtoError::NvDefineSpaceAuth)?; + let public_info = + Tpm2bNvPublic::new(public_info).map_err(TpmProtoError::NvDefineSpacePublicInfo)?; + + let mut cmd = Self { + header: CmdHeader::new::(session, CommandCodeEnum::NV_DefineSpace.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth_cmd, + auth, + public_info, + }; + + cmd.header.size = new_u32_be(cmd.payload_size() as u32); + + Ok(cmd) + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.header.as_bytes()); + buffer.extend_from_slice(self.auth_handle.as_bytes()); + buffer.extend_from_slice(self.auth_size.as_bytes()); + buffer.extend_from_slice(self.auth_cmd.as_bytes()); + buffer.extend_from_slice(&self.auth.serialize()); + buffer.extend_from_slice(&self.public_info.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.auth_handle); + payload_size += size_of_val(&self.auth_size); + payload_size += size_of_val(&self.auth_cmd); + payload_size += self.auth.payload_size(); + payload_size += self.public_info.payload_size(); + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvDefineSpaceReply { + pub header: ReplyHeader, + } + + impl TpmCommand for NvDefineSpaceCmd { + type Reply = NvDefineSpaceReply; + } + + impl TpmReply for NvDefineSpaceReply { + type Command = NvDefineSpaceCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Nv UndefineSpace === // + + #[repr(C)] + #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvUndefineSpaceCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + nv_index: u32_be, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + } + + impl NvUndefineSpaceCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + nv_index: u32, + ) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::NV_UndefineSpace.into()), + auth_handle, + nv_index: nv_index.into(), + auth_size: (size_of::() as u32).into(), + auth, + } + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvUndefineSpaceReply { + pub header: ReplyHeader, + } + + impl TpmCommand for NvUndefineSpaceCmd { + type Reply = NvUndefineSpaceReply; + } + + impl TpmReply for NvUndefineSpaceReply { + type Command = NvUndefineSpaceCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Nv ReadPublic === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct NvReadPublicCmd { + header: CmdHeader, + nv_index: u32_be, + } + + impl NvReadPublicCmd { + pub fn new(session: SessionTag, nv_index: u32) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::NV_ReadPublic.into()), + nv_index: nv_index.into(), + } + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvReadPublicReply { + pub header: ReplyHeader, + // Parameters + pub nv_public: Tpm2bNvPublic, + nv_name: Tpm2bBuffer, + } + + impl TpmCommand for NvReadPublicCmd { + type Reply = NvReadPublicReply; + } + + impl TpmReply for NvReadPublicReply { + type Command = NvReadPublicCmd; + + fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + let header = ReplyHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Handle the command failure. + if header.size.get() as usize == end { + return Some(Self { + header, + nv_public: Tpm2bNvPublic::new_zeroed(), + nv_name: Tpm2bBuffer::new_zeroed(), + }); + } + + start = end; + let nv_public = Tpm2bNvPublic::deserialize(&bytes[start..])?; + end += nv_public.payload_size(); + + start = end; + let nv_name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += nv_name.payload_size(); + + if header.size.get() as usize != end { + return None; + } + + Some(Self { + header, + nv_public, + nv_name, + }) + } + + fn payload_size(&self) -> usize { + let mut size = 0; + + size += size_of::(); + size += self.nv_public.payload_size(); + size += self.nv_name.payload_size(); + + size + } + } + + // === Nv Write === // + + #[repr(C)] + #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvWriteCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + pub nv_index: u32_be, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + auth_value: u64_be, + // Parameters + pub data: Tpm2bBuffer, + pub offset: u16_be, + } + + impl NvWriteCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + auth_value: u64, + nv_index: u32, + data: &[u8], + offset: u16, + ) -> Result { + let data = Tpm2bBuffer::new(data).map_err(TpmProtoError::NvWriteData)?; + // If `auth_handle` is not the owner, assuming password-based authorization is used. + let auth_value_size = if auth_handle != TPM20_RH_OWNER { + size_of::() as u32 + } else { + 0 + }; + + let mut cmd = Self { + header: CmdHeader::new::(session, CommandCodeEnum::NV_Write.into()), + auth_handle, + nv_index: nv_index.into(), + auth_size: (size_of::() as u32 + auth_value_size).into(), + auth, + auth_value: auth_value.into(), + data, + offset: offset.into(), + }; + + cmd.header.size = new_u32_be(cmd.payload_size() as u32); + + Ok(cmd) + } + + pub fn update_write_data(&mut self, data: &[u8], offset: u16) -> Result<(), TpmProtoError> { + let data = Tpm2bBuffer::new(data).map_err(TpmProtoError::NvWriteData)?; + + self.data = data; + self.offset = offset.into(); + self.header.size = new_u32_be(self.payload_size() as u32); + + Ok(()) + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.header.as_bytes()); + buffer.extend_from_slice(self.auth_handle.as_bytes()); + buffer.extend_from_slice(self.nv_index.as_bytes()); + buffer.extend_from_slice(self.auth_size.as_bytes()); + buffer.extend_from_slice(self.auth.as_bytes()); + if self.auth_handle != TPM20_RH_OWNER { + buffer.extend_from_slice(self.auth_value.as_bytes()); + } + buffer.extend_from_slice(&self.data.serialize()); + buffer.extend_from_slice(self.offset.as_bytes()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.auth_handle); + payload_size += size_of_val(&self.nv_index); + payload_size += size_of_val(&self.auth_size); + payload_size += size_of_val(&self.auth); + if self.auth_handle != TPM20_RH_OWNER { + payload_size += size_of_val(&self.auth_value); + } + payload_size += self.data.payload_size(); + payload_size += size_of_val(&self.offset); + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvWriteReply { + pub header: ReplyHeader, + } + + impl TpmCommand for NvWriteCmd { + type Reply = NvWriteReply; + } + + impl TpmReply for NvWriteReply { + type Command = NvWriteCmd; + + fn deserialize(bytes: &[u8]) -> Option { + Some(Self::read_from_prefix(bytes).ok()?.0) // TODO: zerocopy: tpm better error? (https://github.com/microsoft/openvmm/issues/759) + } + + fn payload_size(&self) -> usize { + size_of::() + } + } + + // === Nv Read === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct NvReadCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + pub nv_index: u32_be, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + // Parameters + size: u16_be, + pub offset: u16_be, + } + + impl NvReadCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + nv_index: u32, + auth: common::CmdAuth, + size: u16, + offset: u16, + ) -> Self { + Self { + header: CmdHeader::new::(session, CommandCodeEnum::NV_Read.into()), + auth_handle, + nv_index: nv_index.into(), + auth_size: (size_of::() as u32).into(), + auth, + size: size.into(), + offset: offset.into(), + } + } + + pub fn update_read_parameters(&mut self, size: u16, offset: u16) { + self.size = size.into(); + self.offset = offset.into(); + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + if bytes.len() < end { + return None; + } + let header = CmdHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + if header.command_code != CommandCodeEnum::NV_Read.into() { + return None; + } + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let auth_handle = ReservedHandle::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let nv_index = u32_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let auth_size = u32_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Skip authorization area + end += auth_size.get() as usize; + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let size = u16_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let offset = u16_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + Some(Self { + header, + auth_handle, + nv_index, + auth_size, + auth: common::CmdAuth::new(ReservedHandle(0.into()), 0, 0, 0), + size, + offset, + }) + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct NvReadReply { + pub header: ReplyHeader, + pub parameter_size: u32_be, + // Parameter + pub data: Tpm2bBuffer, + // Authorization area + pub auth: common::ReplyAuth, + } + + impl TpmCommand for NvReadCmd { + type Reply = NvReadReply; + } + + impl TpmReply for NvReadReply { + type Command = NvReadCmd; + + fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + let header = ReplyHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Handle the command failure. + if header.size.get() as usize == end { + return Some(Self { + header, + parameter_size: 0.into(), + data: Tpm2bBuffer::new_zeroed(), + auth: common::ReplyAuth::new_zeroed(), + }); + } + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let parameter_size = u32_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + let data = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += data.payload_size(); + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let auth = common::ReplyAuth::read_from_prefix(&bytes[start..end]) + .ok()? + .0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + if header.size.get() as usize != end { + return None; + } + + Some(Self { + header, + parameter_size, + data, + auth, + }) + } + + fn payload_size(&self) -> usize { + let mut size = 0; + + size += size_of::(); + size += self.data.payload_size(); + + size + } + } + + // === Import === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct ImportCmd { + pub header: CmdHeader, + pub auth_handle: ReservedHandle, + // Authorization area + pub auth_size: u32_be, + pub auth: common::CmdAuth, + // Parameters + // `TPM2B_DATA` + pub encryption_key: Tpm2bBuffer, + // `TPM2B_PUBLIC` + pub object_public: Tpm2bPublic, + // `TPM2B_PRIVATE` + pub duplicate: Tpm2bBuffer, + // `TPM2B_ENCRYPTED_SECRET` + pub in_sym_seed: Tpm2bBuffer, + // `TPMT_SYM_DEF_OBJECT` + pub symmetric_alg: TpmtSymDefObject, + } + + impl ImportCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + encryption_key: &Tpm2bBuffer, + object_public: &Tpm2bPublic, + duplicate: &Tpm2bBuffer, + in_sym_seed: &Tpm2bBuffer, + symmetric_alg: &TpmtSymDefObject, + ) -> Self { + let mut cmd = Self { + header: CmdHeader::new::(session, CommandCodeEnum::Import.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + encryption_key: *encryption_key, + object_public: *object_public, + duplicate: *duplicate, + in_sym_seed: *in_sym_seed, + symmetric_alg: *symmetric_alg, + }; + + cmd.header.size = new_u32_be(cmd.payload_size() as u32); + + cmd + } + + /// Deserialize the command payload assuming no inner wrapping key + pub fn deserialize_no_wrapping_key(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = 0; + + // When there is no inner wrapper for `duplicate`, `encryption_key` + // should be an empty buffer and `symmetric_alg` should be `TPM_ALG_NULL`. + // See Table 42, Section 13.3.2, "Trusted Platform Module Library Part 3: Commands", revision 1.38. + let encryption_key = Tpm2bBuffer::new_zeroed(); + let symmetric_alg = TpmtSymDefObject::new(AlgIdEnum::NULL.into(), None, None); + + let object_public = Tpm2bPublic::deserialize(&bytes[start..])?; + end += object_public.payload_size(); + + start = end; + let duplicate = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += duplicate.payload_size(); + + start = end; + let in_sym_seed = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += in_sym_seed.payload_size(); + + // Handle zero paddings applied to valid payload + if bytes.len() < end { + return None; + } + + Some(Self { + header: CmdHeader::new_zeroed(), + auth_handle: ReservedHandle(0.into()), + auth_size: 0.into(), + auth: common::CmdAuth::new_zeroed(), + encryption_key, + object_public, + duplicate, + in_sym_seed, + symmetric_alg, + }) + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.header.as_bytes()); + buffer.extend_from_slice(self.auth_handle.as_bytes()); + buffer.extend_from_slice(self.auth_size.as_bytes()); + buffer.extend_from_slice(self.auth.as_bytes()); + buffer.extend_from_slice(&self.encryption_key.serialize()); + buffer.extend_from_slice(&self.object_public.serialize()); + buffer.extend_from_slice(&self.duplicate.serialize()); + buffer.extend_from_slice(&self.in_sym_seed.serialize()); + buffer.extend_from_slice(&self.symmetric_alg.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.auth_handle); + payload_size += size_of_val(&self.auth_size); + payload_size += size_of_val(&self.auth); + payload_size += self.encryption_key.payload_size(); + payload_size += self.object_public.payload_size(); + payload_size += self.duplicate.payload_size(); + payload_size += self.in_sym_seed.payload_size(); + payload_size += self.symmetric_alg.payload_size(); + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct ImportReply { + pub header: ReplyHeader, + pub parameter_size: u32_be, + // Parameter + // `TPM2B_PRIVATE` + pub out_private: Tpm2bBuffer, + // Authorization area + pub auth: common::ReplyAuth, + } + + impl TpmCommand for ImportCmd { + type Reply = ImportReply; + } + + impl TpmReply for ImportReply { + type Command = ImportCmd; + + fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + let header = ReplyHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Handle the command failure. + if header.size.get() as usize == end { + return Some(Self { + header, + parameter_size: 0.into(), + out_private: Tpm2bBuffer::new_zeroed(), + auth: common::ReplyAuth::new_zeroed(), + }); + } + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let parameter_size = u32_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + let expected_auth_start = end + parameter_size.get() as usize; + + start = end; + let out_private = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += out_private.payload_size(); + + start = end; + if start != expected_auth_start { + return None; + } + end += size_of::(); + if bytes.len() < end { + return None; + } + let auth = common::ReplyAuth::read_from_prefix(&bytes[start..end]) + .ok()? + .0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + if header.size.get() as usize != end { + return None; + } + + Some(Self { + header, + parameter_size, + out_private, + auth, + }) + } + + fn payload_size(&self) -> usize { + let mut size = 0; + + size += size_of::(); + size += self.out_private.payload_size(); + + size + } + } + + // === Load === // + + #[repr(C)] + #[derive(IntoBytes, Immutable, KnownLayout, FromBytes)] + pub struct LoadCmd { + header: CmdHeader, + auth_handle: ReservedHandle, + // Authorization area + auth_size: u32_be, + auth: common::CmdAuth, + // Parameters + // `TPM2B_PRIVATE` + in_private: Tpm2bBuffer, + // `TPM2B_PUBLIC` + in_public: Tpm2bPublic, + } + + impl LoadCmd { + pub fn new( + session: SessionTag, + auth_handle: ReservedHandle, + auth: common::CmdAuth, + in_private: &Tpm2bBuffer, + in_public: &Tpm2bPublic, + ) -> Self { + let mut cmd = Self { + header: CmdHeader::new::(session, CommandCodeEnum::Load.into()), + auth_handle, + auth_size: (size_of::() as u32).into(), + auth, + in_private: *in_private, + in_public: *in_public, + }; + + cmd.header.size = new_u32_be(cmd.payload_size() as u32); + + cmd + } + + pub fn serialize(&self) -> Vec { + let mut buffer = Vec::new(); + + buffer.extend_from_slice(self.header.as_bytes()); + buffer.extend_from_slice(self.auth_handle.as_bytes()); + buffer.extend_from_slice(self.auth_size.as_bytes()); + buffer.extend_from_slice(self.auth.as_bytes()); + buffer.extend_from_slice(&self.in_private.serialize()); + buffer.extend_from_slice(&self.in_public.serialize()); + + buffer + } + + pub fn payload_size(&self) -> usize { + let mut payload_size = 0; + + payload_size += size_of_val(&self.header); + payload_size += size_of_val(&self.auth_handle); + payload_size += size_of_val(&self.auth_size); + payload_size += size_of_val(&self.auth); + payload_size += self.in_private.payload_size(); + payload_size += self.in_public.payload_size(); + + payload_size + } + } + + #[repr(C)] + #[derive(Debug, FromBytes, IntoBytes, Immutable, KnownLayout)] + pub struct LoadReply { + pub header: ReplyHeader, + pub object_handle: ReservedHandle, + pub parameter_size: u32_be, + // Parameter + // `TPM2B_NAME` + pub name: Tpm2bBuffer, + // Authorization area + pub auth: common::ReplyAuth, + } + + impl TpmCommand for LoadCmd { + type Reply = LoadReply; + } + + impl TpmReply for LoadReply { + type Command = LoadCmd; + + fn deserialize(bytes: &[u8]) -> Option { + let mut start = 0; + let mut end = size_of::(); + + let header = ReplyHeader::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + // Handle the command failure. + if header.size.get() as usize == end { + return Some(Self { + header, + object_handle: ReservedHandle::new_zeroed(), + parameter_size: 0.into(), + name: Tpm2bBuffer::new_zeroed(), + auth: common::ReplyAuth::new_zeroed(), + }); + } + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let object_handle = ReservedHandle::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + start = end; + end += size_of::(); + if bytes.len() < end { + return None; + } + let parameter_size = u32_be::read_from_prefix(&bytes[start..end]).ok()?.0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + let expected_auth_start = end + parameter_size.get() as usize; + + start = end; + let name = Tpm2bBuffer::deserialize(&bytes[start..])?; + end += name.payload_size(); + + start = end; + if start != expected_auth_start { + return None; + } + end += size_of::(); + if bytes.len() < end { + return None; + } + let auth = common::ReplyAuth::read_from_prefix(&bytes[start..end]) + .ok()? + .0; // TODO: zerocopy: use-rest-of-range, option-to-error (https://github.com/microsoft/openvmm/issues/759) + + if header.size.get() as usize != end { + return None; + } + + Some(Self { + header, + object_handle, + parameter_size, + name, + auth, + }) + } + + fn payload_size(&self) -> usize { + let mut size = 0; + + size += size_of::(); + size += size_of::(); + size += self.name.payload_size(); + + size + } + } +} + +#[cfg(test)] +mod tests { + use super::protocol::common::*; + use super::protocol::*; + use super::*; + + #[test] + fn test_create_primary() { + const AK_PUB_EXPECTED_CMD: [u8; 321] = [ + 0x80, 0x02, 0x00, 0x00, 0x01, 0x41, 0x00, 0x00, 0x01, 0x31, 0x40, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x09, 0x40, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x18, 0x00, 0x01, 0x00, 0x0b, 0x00, 0x05, 0x04, + 0x72, 0x00, 0x00, 0x00, 0x10, 0x00, 0x14, 0x00, 0x0b, 0x08, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + + const AK_PUB_REPLY_SUCCEED: [u8; 488] = [ + 0x80, 0x02, 0x00, 0x00, 0x01, 0xe8, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0xd1, 0x01, 0x18, 0x00, 0x01, 0x00, 0x0b, 0x00, 0x05, 0x04, 0x72, + 0x00, 0x00, 0x00, 0x10, 0x00, 0x14, 0x00, 0x0b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0xc8, 0x38, 0xd1, 0x52, 0x00, 0x00, 0xe9, 0x3c, 0x89, 0x4c, 0x52, 0xfb, + 0x79, 0x7b, 0xc4, 0x14, 0x28, 0x5f, 0xaa, 0x50, 0x78, 0x9a, 0x31, 0x2b, 0x4d, 0xfe, + 0xad, 0xad, 0x97, 0x28, 0x49, 0xb2, 0x39, 0x77, 0x5e, 0x06, 0x49, 0xb7, 0x93, 0xf5, + 0x2f, 0x84, 0x85, 0x2e, 0x17, 0x87, 0x52, 0x96, 0x36, 0x74, 0x76, 0x21, 0x5f, 0xc2, + 0x90, 0x81, 0xf7, 0xe9, 0xd8, 0xac, 0x07, 0x60, 0xaf, 0x83, 0xa2, 0x08, 0xda, 0x94, + 0x77, 0x2c, 0x73, 0x9c, 0xd4, 0x80, 0x47, 0x43, 0xa6, 0x4e, 0x36, 0xc3, 0x7e, 0xe2, + 0x9c, 0xfb, 0xf1, 0x7e, 0x36, 0x8e, 0x7a, 0x86, 0xde, 0x3d, 0x4e, 0x8a, 0x3a, 0xce, + 0x7a, 0xa1, 0x58, 0xf6, 0xdb, 0x49, 0x3e, 0xc2, 0x2e, 0xcb, 0x4a, 0xbc, 0x19, 0x81, + 0xd5, 0x5d, 0x4f, 0x57, 0x39, 0xf5, 0x9e, 0x02, 0x56, 0x91, 0x37, 0xc2, 0x87, 0x96, + 0x26, 0xd8, 0x4a, 0x45, 0x16, 0x01, 0xe0, 0x2e, 0x20, 0x95, 0x75, 0xb8, 0x20, 0x6d, + 0x83, 0x54, 0x65, 0x3d, 0x66, 0xf4, 0x8a, 0x43, 0x84, 0x9f, 0xa6, 0xc5, 0x2c, 0x08, + 0xe7, 0x59, 0x8e, 0x1f, 0x6d, 0xea, 0x32, 0x5b, 0x36, 0x8e, 0xd1, 0xf3, 0x09, 0x60, + 0x86, 0xdb, 0x55, 0xc9, 0xf0, 0xf9, 0x79, 0x87, 0x71, 0x1c, 0x7c, 0x98, 0xa4, 0xc8, + 0x91, 0x77, 0xa7, 0x95, 0x82, 0x19, 0xcc, 0x9d, 0xde, 0x4d, 0x7b, 0xf7, 0xc1, 0x31, + 0x5b, 0xae, 0x45, 0x6e, 0x6b, 0xf1, 0xaf, 0x89, 0x07, 0x91, 0x80, 0x9d, 0xe5, 0x49, + 0xfc, 0x5e, 0xb2, 0x15, 0x67, 0xcf, 0x05, 0xbb, 0xb3, 0x98, 0x54, 0x34, 0x45, 0x2c, + 0xc3, 0x3d, 0x09, 0x8e, 0x8d, 0x60, 0xba, 0x67, 0xd9, 0xbe, 0x1c, 0x2a, 0x2c, 0x2a, + 0xfa, 0xed, 0x26, 0x81, 0x96, 0x48, 0x17, 0xb3, 0xa6, 0x90, 0x9a, 0x78, 0xa5, 0xac, + 0x80, 0xb2, 0xbe, 0xff, 0x3d, 0x35, 0x00, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, + 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, + 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, + 0x78, 0x52, 0xb8, 0x55, 0x01, 0x00, 0x10, 0x00, 0x04, 0x40, 0x00, 0x00, 0x0b, 0x00, + 0x04, 0x40, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, 0x20, 0x28, 0xd0, 0x26, 0xfa, 0xfd, + 0x74, 0x91, 0x06, 0x74, 0x3e, 0x27, 0xc4, 0x28, 0x05, 0x51, 0x58, 0x5e, 0x5d, 0x17, + 0x66, 0x8e, 0xb5, 0x21, 0x83, 0x5e, 0xd6, 0x01, 0x27, 0xef, 0xfc, 0x05, 0xd4, 0x80, + 0x21, 0x40, 0x00, 0x00, 0x0b, 0x00, 0x30, 0xfb, 0xfe, 0xd4, 0xe7, 0x9f, 0xc5, 0x2f, + 0xfd, 0x7c, 0xe0, 0x4a, 0x97, 0xb5, 0xec, 0x61, 0x59, 0x4d, 0x43, 0x19, 0x29, 0xc0, + 0x4f, 0xef, 0xda, 0xdc, 0xe1, 0x48, 0x4d, 0xbd, 0x3d, 0x47, 0x0e, 0xe3, 0x2f, 0xd4, + 0xf9, 0x57, 0x4f, 0x77, 0x0f, 0x58, 0x5c, 0x73, 0x58, 0xc2, 0x2d, 0xd7, 0x4a, 0x00, + 0x22, 0x00, 0x0b, 0x92, 0x57, 0x64, 0x38, 0x21, 0xf9, 0x68, 0xe9, 0xfc, 0x47, 0xfa, + 0xbf, 0x9c, 0x56, 0x49, 0x7a, 0x63, 0xc2, 0xc0, 0x8a, 0x12, 0x80, 0x49, 0x73, 0xc3, + 0x8b, 0x00, 0x06, 0x99, 0xe9, 0xfc, 0x22, 0x00, 0x00, 0x01, 0x00, 0x00, + ]; + + const EK_PUB_EXPECTED_CMD: [u8; 355] = [ + 0x80, 0x02, 0x00, 0x00, 0x01, 0x63, 0x00, 0x00, 0x01, 0x31, 0x40, 0x00, 0x00, 0x0b, + 0x00, 0x00, 0x00, 0x09, 0x40, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3a, 0x00, 0x01, 0x00, 0x0b, 0x00, 0x03, 0x00, + 0xb2, 0x00, 0x20, 0x83, 0x71, 0x97, 0x67, 0x44, 0x84, 0xb3, 0xf8, 0x1a, 0x90, 0xcc, + 0x8d, 0x46, 0xa5, 0xd7, 0x24, 0xfd, 0x52, 0xd7, 0x6e, 0x06, 0x52, 0x0b, 0x64, 0xf2, + 0xa1, 0xda, 0x1b, 0x33, 0x14, 0x69, 0xaa, 0x00, 0x06, 0x00, 0x80, 0x00, 0x43, 0x00, + 0x10, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + + const EK_PUB_REPLY_SUCCEED: [u8; 522] = [ + 0x80, 0x02, 0x00, 0x00, 0x02, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0xf3, 0x01, 0x3a, 0x00, 0x01, 0x00, 0x0b, 0x00, 0x03, 0x00, 0xb2, + 0x00, 0x20, 0x83, 0x71, 0x97, 0x67, 0x44, 0x84, 0xb3, 0xf8, 0x1a, 0x90, 0xcc, 0x8d, + 0x46, 0xa5, 0xd7, 0x24, 0xfd, 0x52, 0xd7, 0x6e, 0x06, 0x52, 0x0b, 0x64, 0xf2, 0xa1, + 0xda, 0x1b, 0x33, 0x14, 0x69, 0xaa, 0x00, 0x06, 0x00, 0x80, 0x00, 0x43, 0x00, 0x10, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x9e, 0x9c, 0x1b, 0x40, 0x00, 0x00, + 0xea, 0x2f, 0xd5, 0xd7, 0xde, 0x9b, 0x18, 0x83, 0x55, 0x00, 0x09, 0x53, 0x13, 0xa8, + 0x88, 0x10, 0x24, 0x46, 0x44, 0xa8, 0x2d, 0x62, 0xd3, 0x24, 0xe5, 0xf9, 0xcd, 0xca, + 0x61, 0xb7, 0xd8, 0x15, 0x98, 0xf8, 0x56, 0x64, 0x14, 0x7b, 0x40, 0x5a, 0x47, 0xbd, + 0xd1, 0xc8, 0x7d, 0x1f, 0x93, 0x72, 0x3f, 0x03, 0xe0, 0x29, 0x38, 0x08, 0x03, 0xae, + 0x62, 0x13, 0x10, 0xf5, 0x88, 0x5f, 0x86, 0x84, 0x82, 0xfb, 0xda, 0xd8, 0x78, 0xfd, + 0x02, 0x9e, 0x88, 0x5c, 0xaf, 0x30, 0xd4, 0x3d, 0x41, 0xb2, 0xb7, 0x7a, 0x36, 0xa5, + 0x95, 0x37, 0x08, 0x44, 0x20, 0x10, 0xb3, 0x6c, 0xd0, 0x6d, 0xe9, 0xab, 0xce, 0x35, + 0xc0, 0x82, 0x52, 0x06, 0x41, 0x4c, 0xc5, 0x48, 0x5b, 0xe6, 0x22, 0x00, 0x7e, 0x1d, + 0x4b, 0x68, 0x80, 0x34, 0xe9, 0xea, 0x6e, 0xf9, 0xf7, 0xf7, 0x84, 0xbe, 0x56, 0xdf, + 0xea, 0x85, 0x97, 0x1b, 0x03, 0x5c, 0x5c, 0x9f, 0xf4, 0x72, 0xef, 0xe7, 0xfe, 0x5e, + 0x73, 0x2f, 0xf1, 0xdd, 0x40, 0x80, 0x16, 0x8d, 0x1b, 0x95, 0xee, 0xec, 0x21, 0x1c, + 0x30, 0x84, 0x25, 0x08, 0x8d, 0x0e, 0xda, 0x5b, 0x00, 0x9c, 0x49, 0x8b, 0xc8, 0xb3, + 0x48, 0x9a, 0xc9, 0x19, 0x0f, 0x68, 0xc7, 0x0a, 0x7a, 0x65, 0x35, 0xa0, 0x09, 0x23, + 0x88, 0x3f, 0x97, 0x53, 0x4e, 0xbc, 0x08, 0xc0, 0x5b, 0x69, 0x94, 0xcc, 0xd9, 0xb9, + 0xea, 0x8c, 0x20, 0x9e, 0x1a, 0xf9, 0x57, 0x08, 0x1a, 0xe0, 0x2d, 0x88, 0x56, 0x1f, + 0x9f, 0x50, 0x2e, 0x12, 0xf2, 0x69, 0x9a, 0xdf, 0x30, 0x56, 0xc1, 0xf0, 0x31, 0xef, + 0x64, 0xd5, 0x34, 0x02, 0x15, 0xf4, 0xd7, 0x7b, 0x76, 0xd9, 0x99, 0x24, 0x83, 0x99, + 0xa5, 0x05, 0xc1, 0xcd, 0xa6, 0xbd, 0xc3, 0x3d, 0x7c, 0x1e, 0x94, 0xdd, 0x00, 0x37, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, + 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, + 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55, 0x01, 0x00, 0x10, 0x00, + 0x04, 0x40, 0x00, 0x00, 0x0b, 0x00, 0x04, 0x40, 0x00, 0x00, 0x0b, 0x00, 0x00, 0x00, + 0x20, 0x28, 0xd0, 0x26, 0xfa, 0xfd, 0x74, 0x91, 0x06, 0x74, 0x3e, 0x27, 0xc4, 0x28, + 0x05, 0x51, 0x58, 0x5e, 0x5d, 0x17, 0x66, 0x8e, 0xb5, 0x21, 0x83, 0x5e, 0xd6, 0x01, + 0x27, 0xef, 0xfc, 0x05, 0xd4, 0x80, 0x21, 0x40, 0x00, 0x00, 0x0b, 0x00, 0x30, 0xe2, + 0xf2, 0x64, 0xc3, 0xd7, 0x9e, 0xc1, 0x07, 0xbb, 0x49, 0x74, 0x67, 0xd3, 0xc7, 0xf6, + 0xb7, 0x8c, 0xe3, 0x2e, 0x28, 0x36, 0xa6, 0x1f, 0x6f, 0x0b, 0xbd, 0xe3, 0x8e, 0x77, + 0xa1, 0x8c, 0x50, 0xe4, 0xaa, 0xa4, 0x01, 0x61, 0xb4, 0x7a, 0x4a, 0x3b, 0x5d, 0xac, + 0xe1, 0xd1, 0x65, 0x69, 0x1e, 0x00, 0x22, 0x00, 0x0b, 0xe5, 0x6f, 0x0f, 0xae, 0x8d, + 0x0f, 0x91, 0xb9, 0x84, 0x17, 0xc3, 0x86, 0x13, 0xa6, 0x12, 0xbe, 0xec, 0x85, 0xf9, + 0x0b, 0xd3, 0xfe, 0x4f, 0x3d, 0x79, 0x7d, 0x6d, 0x3c, 0xc5, 0xcc, 0xb1, 0x5b, 0x00, + 0x00, 0x01, 0x00, 0x00, + ]; + + const REPLY_FAIL: [u8; 10] = [0x80, 0x01, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x02, 0xda]; + + // Create AK pub + let symmetric = TpmtSymDefObject::new(AlgIdEnum::NULL.into(), None, None); + let scheme = TpmtRsaScheme::new(AlgIdEnum::RSASSA.into(), Some(AlgIdEnum::SHA256.into())); + let rsa_params = TpmsRsaParams::new(symmetric, scheme, 2048, 0); + + let object_attributes = TpmaObjectBits::new() + .with_fixed_tpm(true) + .with_fixed_parent(true) + .with_sensitive_data_origin(true) + .with_user_with_auth(true) + .with_no_da(true) + .with_restricted(true) + .with_sign_encrypt(true); + + let result = TpmtPublic::new( + AlgIdEnum::RSA.into(), + AlgIdEnum::SHA256.into(), + object_attributes, + &[], + rsa_params, + &[0u8; 256], + ); + assert!(result.is_ok()); + let in_public = result.unwrap(); + + let result = CreatePrimaryCmd::new( + SessionTagEnum::Sessions.into(), + TPM20_RH_ENDORSEMENT, + CmdAuth::new(TPM20_RS_PW, 0, 0, 0), + &[], + &[], + in_public, + &[], + &[], + ); + assert!(result.is_ok()); + let cmd = result.unwrap(); + + let bytes = cmd.serialize(); + + assert_eq!(bytes, AK_PUB_EXPECTED_CMD); + + let mut reply = [0u8; 4096]; + reply[..AK_PUB_REPLY_SUCCEED.len()].copy_from_slice(&AK_PUB_REPLY_SUCCEED); + + let response = CreatePrimaryReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x0); + assert_eq!(response.object_handle.0.get(), 0x80000000); + + reply[..REPLY_FAIL.len()].copy_from_slice(&REPLY_FAIL); + + let response = CreatePrimaryReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x2da); + + // Create EK pub + const AUTH_POLICY_A_SHA_256: [u8; 32] = [ + 0x83, 0x71, 0x97, 0x67, 0x44, 0x84, 0xB3, 0xF8, 0x1A, 0x90, 0xCC, 0x8D, 0x46, 0xA5, + 0xD7, 0x24, 0xFD, 0x52, 0xD7, 0x6E, 0x06, 0x52, 0x0B, 0x64, 0xF2, 0xA1, 0xDA, 0x1B, + 0x33, 0x14, 0x69, 0xAA, + ]; + let symmetric = TpmtSymDefObject::new( + AlgIdEnum::AES.into(), + Some(128), + Some(AlgIdEnum::CFB.into()), + ); + let scheme = TpmtRsaScheme::new(AlgIdEnum::NULL.into(), None); + let rsa_params = TpmsRsaParams::new(symmetric, scheme, 2048, 0); + + let object_attributes = TpmaObjectBits::new() + .with_fixed_tpm(true) + .with_fixed_parent(true) + .with_sensitive_data_origin(true) + .with_admin_with_policy(true) + .with_restricted(true) + .with_decrypt(true); + + let result = TpmtPublic::new( + AlgIdEnum::RSA.into(), + AlgIdEnum::SHA256.into(), + object_attributes, + &AUTH_POLICY_A_SHA_256, + rsa_params, + &[0u8; 256], + ); + assert!(result.is_ok()); + let in_public = result.unwrap(); + + let result = CreatePrimaryCmd::new( + SessionTagEnum::Sessions.into(), + TPM20_RH_ENDORSEMENT, + CmdAuth::new(TPM20_RS_PW, 0, 0, 0), + &[], + &[], + in_public, + &[], + &[], + ); + assert!(result.is_ok()); + let cmd = result.unwrap(); + + let bytes = cmd.serialize(); + + assert_eq!(bytes, EK_PUB_EXPECTED_CMD); + + reply[..EK_PUB_REPLY_SUCCEED.len()].copy_from_slice(&EK_PUB_REPLY_SUCCEED); + + let response = CreatePrimaryReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x0); + assert_eq!(response.object_handle.0.get(), 0x80000000); + } + + #[test] + fn test_read_public() { + const REPLY_SUCCEED: [u8; 364] = [ + 0x80, 0x01, 0x00, 0x00, 0x01, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x01, 0x18, 0x00, 0x01, + 0x00, 0x0b, 0x00, 0x05, 0x04, 0x72, 0x00, 0x00, 0x00, 0x10, 0x00, 0x14, 0x00, 0x0b, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0xa6, 0xaf, 0x71, 0xec, 0x00, 0x00, + 0xe0, 0x69, 0xa5, 0xc5, 0xcd, 0x94, 0x59, 0x3b, 0x79, 0xe6, 0xee, 0x14, 0xd3, 0x50, + 0xfb, 0x0b, 0xa9, 0x03, 0x51, 0xbf, 0x23, 0xc5, 0x15, 0xdc, 0xbc, 0x4a, 0x3b, 0xaa, + 0xef, 0x12, 0x3c, 0x24, 0x47, 0xf2, 0x81, 0xf6, 0x85, 0xf4, 0x8c, 0x16, 0x14, 0x10, + 0x3c, 0x3b, 0x2e, 0x7b, 0x04, 0x5e, 0x25, 0x66, 0xcd, 0x8d, 0x86, 0x0b, 0x8c, 0x2b, + 0x5f, 0xca, 0x36, 0x1d, 0x5f, 0xff, 0xbf, 0x70, 0x63, 0x79, 0x5b, 0x7f, 0x93, 0x94, + 0x6d, 0xbd, 0x6e, 0x4f, 0x22, 0x94, 0x93, 0x87, 0xe1, 0x63, 0x4d, 0xa4, 0x9a, 0x2f, + 0xad, 0x90, 0x4c, 0xc9, 0x37, 0x14, 0x59, 0xd3, 0x03, 0x6d, 0x37, 0x98, 0xd4, 0x85, + 0x19, 0x9b, 0x93, 0x7e, 0x61, 0x93, 0x6d, 0x1c, 0xe0, 0xe6, 0x72, 0x71, 0x81, 0x45, + 0xe0, 0xea, 0x5f, 0xb4, 0x6a, 0x9a, 0x3e, 0x86, 0x60, 0x86, 0xaf, 0xfc, 0x86, 0x0f, + 0x0d, 0xe8, 0x81, 0x46, 0x59, 0xad, 0xeb, 0x6f, 0xef, 0x38, 0x5e, 0x53, 0xea, 0x91, + 0xcb, 0xa9, 0xf8, 0x31, 0xcd, 0x52, 0x85, 0x55, 0xa8, 0x91, 0x68, 0xd8, 0xdd, 0x20, + 0x67, 0x21, 0x30, 0x03, 0xcd, 0x48, 0x3b, 0xb0, 0x33, 0x16, 0xb4, 0xf0, 0x06, 0x55, + 0xdf, 0x15, 0xd2, 0x65, 0x55, 0x2f, 0xec, 0xec, 0xc5, 0x74, 0xea, 0xd8, 0x0f, 0x29, + 0xac, 0x24, 0x38, 0x32, 0x34, 0x1f, 0xb3, 0x20, 0x28, 0xf6, 0x55, 0xfb, 0x51, 0xf1, + 0x22, 0xa3, 0x5e, 0x38, 0xc6, 0xa5, 0xa4, 0xe0, 0xc2, 0xa3, 0x50, 0x27, 0xf6, 0x1d, + 0x55, 0x8e, 0x95, 0xe9, 0x95, 0x26, 0x8e, 0x70, 0x35, 0x7b, 0x73, 0xbb, 0x8e, 0xf2, + 0xdc, 0x37, 0x30, 0x99, 0x20, 0x2e, 0x1f, 0x09, 0xbd, 0x85, 0x24, 0x44, 0x05, 0x8f, + 0x11, 0xc4, 0xb5, 0x71, 0xc1, 0x2e, 0x52, 0xf6, 0x2e, 0x6f, 0x9a, 0x11, 0x00, 0x22, + 0x00, 0x0b, 0x61, 0xca, 0x8b, 0xec, 0x0f, 0x9e, 0xc1, 0x38, 0x35, 0xd3, 0x43, 0x58, + 0x77, 0xdf, 0x53, 0x82, 0xe7, 0xb2, 0xff, 0x7b, 0xe4, 0x6c, 0xfb, 0x34, 0xa4, 0x28, + 0xdd, 0xda, 0xcb, 0xe9, 0x50, 0x50, 0x00, 0x22, 0x00, 0x0b, 0x51, 0xfa, 0x43, 0xbd, + 0x35, 0x01, 0xd6, 0x66, 0xa0, 0x4d, 0xc8, 0x03, 0x4f, 0xa1, 0x64, 0xa0, 0x91, 0x63, + 0x3c, 0x27, 0xd5, 0x90, 0xa3, 0x7a, 0xae, 0xbc, 0x52, 0xcc, 0x4e, 0x9a, 0xa3, 0x66, + ]; + + const REPLY_FAIL: [u8; 10] = [0x80, 0x01, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x01, 0x8b]; + + let mut reply = [0u8; 4096]; + reply[..REPLY_SUCCEED.len()].copy_from_slice(&REPLY_SUCCEED); + + let response: Option = ReadPublicReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x0); + + reply[..REPLY_FAIL.len()].copy_from_slice(&REPLY_FAIL); + + let response = ReadPublicReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x18b); + } + + #[test] + fn test_nv_read_public() { + const REPLY_SUCCEED: [u8; 62] = [ + 0x80, 0x01, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x01, 0x40, + 0x00, 0x01, 0x00, 0x0b, 0x42, 0x06, 0x00, 0x04, 0x00, 0x00, 0x10, 0x00, 0x00, 0x22, + 0x00, 0x0b, 0xc1, 0x0f, 0x8d, 0x61, 0x77, 0xea, 0xd0, 0x29, 0x52, 0xa6, 0x2d, 0x3a, + 0x39, 0xc7, 0x22, 0x0b, 0xb9, 0xa1, 0xe1, 0xfe, 0x08, 0x68, 0xa8, 0x6f, 0x5f, 0x10, + 0xd6, 0x86, 0x83, 0x28, 0x79, 0x3e, + ]; + + const REPLY_FAIL: [u8; 10] = [0x80, 0x01, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x01, 0x8b]; + + let mut reply = [0u8; 4096]; + reply[..REPLY_SUCCEED.len()].copy_from_slice(&REPLY_SUCCEED); + + let response = NvReadPublicReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x0); + + reply[..REPLY_FAIL.len()].copy_from_slice(&REPLY_FAIL); + + let response = NvReadPublicReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x18b); + } + + #[test] + fn test_define_space() { + const EXPECTED_CMD: [u8; 53] = [ + 0x80, 0x02, 0x00, 0x00, 0x00, 0x35, 0x00, 0x00, 0x01, 0x2a, 0x40, 0x00, 0x00, 0x0c, + 0x00, 0x00, 0x00, 0x09, 0x40, 0x00, 0x00, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x08, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00, 0x00, 0x0e, 0x01, 0xc1, 0x01, + 0xd0, 0x00, 0x0b, 0x42, 0x06, 0x00, 0x04, 0x00, 0x00, 0x10, 0x00, + ]; + + let auth_value: u64 = 0x7766554433221100; + + let attributes = TpmaNvBits::new() + .with_nv_authread(true) + .with_nv_authwrite(true) + .with_nv_ownerread(true) + .with_nv_platformcreate(true) + .with_nv_no_da(true); + + let result = TpmsNvPublic::new(0x1c101d0, AlgIdEnum::SHA256.into(), attributes, &[], 4096); + assert!(result.is_ok()); + let nv_public = result.unwrap(); + + let result = NvDefineSpaceCmd::new( + SessionTagEnum::Sessions.into(), + TPM20_RH_PLATFORM, + CmdAuth::new(TPM20_RS_PW, 0, 0, 0), + auth_value, + nv_public, + ); + assert!(result.is_ok()); + let cmd = result.unwrap(); + + let bytes = cmd.serialize(); + assert_eq!(bytes, EXPECTED_CMD); + } + + #[test] + fn test_nv_write_authwrite() { + const EXPECTED_CMD: [u8; 171] = [ + 0x80, 0x02, 0x00, 0x00, 0x00, 0xab, 0x00, 0x00, 0x01, 0x37, 0x01, 0xc1, 0x01, 0xd0, + 0x01, 0xc1, 0x01, 0xd0, 0x00, 0x00, 0x00, 0x11, 0x40, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x08, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11, 0x00, 0x00, 0x80, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x00, 0x00, + ]; + let auth_value: u64 = 0x7766554433221100; + + let result = NvWriteCmd::new( + SessionTagEnum::Sessions.into(), + ReservedHandle(0x1c101d0.into()), + CmdAuth::new(TPM20_RS_PW, 0, 0, size_of_val(&auth_value) as u16), + auth_value, + 0x1c101d0, + &[1u8; 128], + 0, + ); + assert!(result.is_ok()); + let cmd = result.unwrap(); + + let bytes = cmd.serialize(); + assert_eq!(bytes, EXPECTED_CMD); + } + + #[test] + fn test_nv_write_ownerwrite() { + const EXPECTED_CMD: [u8; 163] = [ + 0x80, 0x02, 0x00, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x01, 0x37, 0x40, 0x00, 0x00, 0x01, + 0x01, 0xc1, 0x01, 0xd0, 0x00, 0x00, 0x00, 0x09, 0x40, 0x00, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x80, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, + ]; + + let result = NvWriteCmd::new( + SessionTagEnum::Sessions.into(), + TPM20_RH_OWNER, + CmdAuth::new(TPM20_RS_PW, 0, 0, 0), + 0, + 0x1c101d0, + &[1u8; 128], + 0, + ); + assert!(result.is_ok()); + let cmd = result.unwrap(); + + let bytes = cmd.serialize(); + assert_eq!(bytes, EXPECTED_CMD); + } + + #[test] + fn test_nv_read() { + const REPLY_SUCCEED: [u8; 85] = [ + 0x80, 0x02, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, + 0x00, 0x40, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, + 0xdd, 0xee, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, + ]; + + const EXPECTED_DATA: [u8; 64] = [ + 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, + 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + + let mut reply = [0u8; 4096]; + reply[..REPLY_SUCCEED.len()].copy_from_slice(&REPLY_SUCCEED); + + let response = NvReadReply::deserialize(&reply); + assert!(response.is_some()); + let response = response.unwrap(); + assert_eq!(response.header.response_code.get(), 0x0); + assert_eq!(response.data.buffer[..EXPECTED_DATA.len()], EXPECTED_DATA); + } +} diff --git a/opentmk/opentmk/src/hypercall.rs b/opentmk/opentmk/src/hypercall.rs new file mode 100644 index 0000000000..c5ff0f471f --- /dev/null +++ b/opentmk/opentmk/src/hypercall.rs @@ -0,0 +1,921 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Hypercall infrastructure. + +#![allow(dead_code)] +use core::arch::asm; +use core::mem::size_of; +use core::sync::atomic::AtomicU16; +use core::sync::atomic::Ordering; + +use arrayvec::ArrayVec; +use hvdef::hypercall::EnablePartitionVtlFlags; +use hvdef::hypercall::HvInputVtl; +use hvdef::hypercall::InitialVpContextX64; +use hvdef::HvRegisterValue; +use hvdef::HvRegisterVsmPartitionConfig; +use hvdef::HvX64RegisterName; +use hvdef::HvX64SegmentRegister; +use hvdef::Vtl; +use hvdef::HV_PAGE_SIZE; +use memory_range::MemoryRange; +use minimal_rt::arch::hypercall::invoke_hypercall; +use minimal_rt::arch::hypercall::HYPERCALL_PAGE; +use zerocopy::FromBytes; +use zerocopy::IntoBytes; + +/// Page-aligned, page-sized buffer for use with hypercalls +#[repr(C, align(4096))] +struct HvcallPage { + buffer: [u8; HV_PAGE_SIZE as usize], +} + +#[inline(never)] +pub fn invoke_hypercall_vtl(control: hvdef::hypercall::Control) { + // SAFETY: the caller guarantees the safety of this operation. + unsafe { + core::arch::asm! { + "call {hypercall_page}", + hypercall_page = sym HYPERCALL_PAGE, + inout("rcx") u64::from(control) => _, + in("rdx") 0, + in("rax") 0, + } + } +} + +impl HvcallPage { + pub const fn new() -> Self { + HvcallPage { + buffer: [0; HV_PAGE_SIZE as usize], + } + } + + /// Address of the hypercall page. + fn address(&self) -> u64 { + let addr = self.buffer.as_ptr() as u64; + + // These should be page-aligned + assert!(addr % HV_PAGE_SIZE == 0); + + addr + } +} + +/// Provides mechanisms to invoke hypercalls within the boot shim. +/// +/// This module defines the `HvCall` struct and associated methods to interact with +/// hypervisor functionalities through hypercalls. It includes utilities for managing +/// hypercall pages, setting and getting virtual processor (VP) registers, enabling +/// VTL (Virtual Trust Levels), and applying memory protections. +/// +/// # Overview +/// +/// - **Hypercall Pages**: Manages page-aligned buffers for hypercall input and output. +/// - **VP Registers**: Provides methods to set and get VP registers. +/// - **VTL Management**: Includes methods to enable VTLs, apply VTL protections, and +/// manage VTL-specific operations. +/// - **Memory Protections**: Supports applying VTL protections and accepting VTL2 pages. +/// +/// # Safety +/// +/// Many methods in this module involve unsafe operations, such as invoking hypercalls +/// or interacting with low-level memory structures. The caller must ensure the safety +/// of these operations by adhering to the requirements of the hypervisor and the +/// underlying architecture. +/// +/// # Usage +/// +/// This module is designed for use in single-threaded environments, such as the boot +/// shim. It uses static buffers for hypercall pages, so it is not thread-safe. +/// +/// # Features +/// +/// - **Architecture-Specific Implementations**: Some methods are only available for +/// specific architectures (e.g., `x86_64` or `aarch64`). +/// - **Error Handling**: Methods return `Result` types to handle hypervisor errors. +/// +/// # Examples +/// +/// ```rust +/// let mut hv_call = HvCall::new(); +/// hv_call.initialize(); +/// let vtl = hv_call.vtl(); +/// println!("Current VTL: {:?}", vtl); +/// hv_call.uninitialize(); +/// ``` +/// +/// # Modules and Types +/// +/// - `HvCall`: Main struct for managing hypercalls. +/// - `HvcallPage`: Struct for page-aligned buffers. +/// - `HwId`: Type alias for hardware IDs (APIC ID on `x86_64`, MPIDR on `aarch64`). +/// +/// # Notes +/// +/// - This module assumes the presence of a hypervisor that supports the required +/// hypercalls. +/// - The boot shim must ensure that hypercalls are invoked in a valid context. +/// Internally uses static buffers for the hypercall page, the input +/// page, and the output page, so this should not be used in any +/// multi-threaded capacity (which the boot shim currently is not). +pub struct HvCall { + input_page: HvcallPage, + output_page: HvcallPage, +} + +static HV_PAGE_INIT_STATUS: AtomicU16 = AtomicU16::new(0); + +#[expect(unsafe_code)] +impl HvCall { + /// Hypercall to accept vtl2 pages from address start to end with VTL 2 + /// protections and no host visibility + #[cfg_attr(target_arch = "aarch64", allow(dead_code))] + pub fn accept_vtl2_pages( + &mut self, + range: MemoryRange, + memory_type: hvdef::hypercall::AcceptMemoryType, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); + + let mut current_page = range.start_4k_gpn(); + while current_page < range.end_4k_gpn() { + let header = hvdef::hypercall::AcceptGpaPages { + partition_id: hvdef::HV_PARTITION_ID_SELF, + page_attributes: hvdef::hypercall::AcceptPagesAttributes::new() + .with_memory_type(memory_type.0) + .with_host_visibility(hvdef::hypercall::HostVisibilityType::PRIVATE) // no host visibility + .with_vtl_set(1 << 2), // applies vtl permissions for vtl 2 + vtl_permission_set: hvdef::hypercall::VtlPermissionSet { + vtl_permission_from_1: [0; hvdef::hypercall::HV_VTL_PERMISSION_SET_SIZE], + }, + gpa_page_base: current_page, + }; + + let remaining_pages = range.end_4k_gpn() - current_page; + let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let output = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallAcceptGpaPages, + Some(count as usize), + ); + + output.result()?; + + current_page += count; + } + + Ok(()) + } + + /// Hypercall to apply vtl protections to the pages from address start to end + #[cfg_attr(target_arch = "aarch64", allow(dead_code))] + pub fn apply_vtl2_protections(&mut self, range: MemoryRange) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); + + let header = hvdef::hypercall::ModifyVtlProtectionMask { + partition_id: hvdef::HV_PARTITION_ID_SELF, + map_flags: hvdef::HV_MAP_GPA_PERMISSIONS_NONE, + target_vtl: HvInputVtl::CURRENT_VTL, + reserved: [0; 3], + }; + + let mut current_page = range.start_4k_gpn(); + while current_page < range.end_4k_gpn() { + let remaining_pages = range.end_4k_gpn() - current_page; + let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let mut input_offset = HEADER_SIZE; + for i in 0..count { + let page_num = current_page + i; + let _ = page_num.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + input_offset += size_of::(); + } + + let output = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallModifyVtlProtectionMask, + Some(count as usize), + ); + + output.result()?; + + current_page += count; + } + + Ok(()) + } + + /// Hypercall to apply vtl protections to the pages from address start to end + #[cfg_attr(target_arch = "x86_64", allow(dead_code))] + pub fn apply_vtl_protections( + &mut self, + range: MemoryRange, + vtl: Vtl, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + const MAX_INPUT_ELEMENTS: usize = (HV_PAGE_SIZE as usize - HEADER_SIZE) / size_of::(); + + let header = hvdef::hypercall::ModifyVtlProtectionMask { + partition_id: hvdef::HV_PARTITION_ID_SELF, + map_flags: hvdef::HV_MAP_GPA_PERMISSIONS_NONE, + target_vtl: HvInputVtl::new() + .with_target_vtl_value(vtl.into()) + .with_use_target_vtl(true), + reserved: [0; 3], + }; + + let mut current_page = range.start_4k_gpn(); + while current_page < range.end_4k_gpn() { + let remaining_pages = range.end_4k_gpn() - current_page; + let count = remaining_pages.min(MAX_INPUT_ELEMENTS as u64); + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let mut input_offset = HEADER_SIZE; + for i in 0..count { + let page_num = current_page + i; + let _ = page_num.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + input_offset += size_of::(); + } + + let output = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallModifyVtlProtectionMask, + Some(count as usize), + ); + + output.result()?; + + current_page += count; + } + + Ok(()) + } + + /// Makes a hypercall. + /// rep_count is Some for rep hypercalls + fn dispatch_hvcall( + &mut self, + code: hvdef::HypercallCode, + rep_count: Option, + ) -> hvdef::hypercall::HypercallOutput { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(code.0) + .with_rep_count(rep_count.unwrap_or_default()); + + // SAFETY: Invoking hypercall per TLFS spec + unsafe { + invoke_hypercall( + control, + self.input_page().address(), + self.output_page().address(), + ) + } + } + + /// Enables a VTL for the specified partition. + pub fn enable_partition_vtl( + &mut self, + partition_id: u64, + target_vtl: Vtl, + ) -> Result<(), hvdef::HvError> { + let flags: EnablePartitionVtlFlags = EnablePartitionVtlFlags::new() + .with_enable_mbec(false) + .with_enable_supervisor_shadow_stack(false); + + let header = hvdef::hypercall::EnablePartitionVtl { + partition_id, + target_vtl: target_vtl.into(), + flags, + reserved_z0: 0, + reserved_z1: 0, + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnablePartitionVtl, None); + match output.result() { + Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()), + err => err, + } + } + + /// Enables VTL protection for the specified VTL. + pub fn enable_vtl_protection(&mut self, vtl: HvInputVtl) -> Result<(), hvdef::HvError> { + // let hvreg = self.get_register(HvX64RegisterName::VsmPartitionConfig.into(), Some(vtl))?; + let mut hvreg: HvRegisterVsmPartitionConfig = HvRegisterVsmPartitionConfig::new(); + hvreg.set_enable_vtl_protection(true); + // hvreg.set_intercept_page(true); + hvreg.set_default_vtl_protection_mask(0xF); + // hvreg.set_intercept_enable_vtl_protection(true); + let bits = hvreg.into_bits(); + let hvre: HvRegisterValue = HvRegisterValue::from(bits); + self.set_register( + HvX64RegisterName::VsmPartitionConfig.into(), + hvre, + Some(vtl), + ) + } + + #[cfg(target_arch = "x86_64")] + /// Enables a VTL for a specific virtual processor (VP) on x86_64. + pub fn enable_vp_vtl( + &mut self, + vp_index: u32, + target_vtl: Vtl, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::EnableVpVtlX64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + target_vtl: target_vtl.into(), + reserved: [0; 3], + vp_vtl_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), + }; + + header + .write_to_prefix(self.input_page().buffer.as_mut_slice()) + .expect("size of enable_vp_vtl header is not correct"); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnableVpVtl, None); + match output.result() { + Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()), + err => err, + } + } + + /// Hypercall to enable VP VTL + #[cfg(target_arch = "aarch64")] + pub fn enable_vp_vtl(&mut self, vp_index: u32) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::EnableVpVtlArm64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + // The VTL value here is just a u8 and not the otherwise usual + // HvInputVtl value. + target_vtl: Vtl::Vtl2.into(), + reserved: [0; 3], + vp_vtl_context: zerocopy::FromZeroes::new_zeroed(), + }; + + header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallEnableVpVtl, None); + match output.result() { + Ok(()) | Err(hvdef::HvError::VtlAlreadyEnabled) => Ok(()), + err => err, + } + } + + fn get_segment_descriptor(segment_reg: &str) -> HvX64SegmentRegister { + unsafe { + use core::arch::asm; + let mut descriptor = HvX64SegmentRegister { + base: 0, + limit: 0, + selector: 0, + attributes: 0, + }; + match segment_reg { + "cs" => { + asm!("mov {0:x}, cs", out(reg) descriptor.selector, options(nomem, nostack)) + } + "ds" => { + asm!("mov {0:x}, ds", out(reg) descriptor.selector, options(nomem, nostack)) + } + "es" => { + asm!("mov {0:x}, es", out(reg) descriptor.selector, options(nomem, nostack)) + } + "ss" => { + asm!("mov {0:x}, ss", out(reg) descriptor.selector, options(nomem, nostack)) + } + "fs" => { + asm!("mov {0:x}, fs", out(reg) descriptor.selector, options(nomem, nostack)) + } + "gs" => { + asm!("mov {0:x}, gs", out(reg) descriptor.selector, options(nomem, nostack)) + } + "tr" => asm!("str {0:x}", out(reg) descriptor.selector, options(nomem, nostack)), + _ => panic!("Invalid segment register"), + } + + // For FS and GS in 64-bit mode, we can get the base directly via MSRs + if segment_reg == "fs" { + let mut base_low: u32; + let mut base_high: u32; + asm!( + "mov ecx, 0xC0000100", // FS_BASE MSR + "rdmsr", + out("eax") base_low, + out("edx") base_high, + options(nomem, nostack) + ); + descriptor.base = ((base_high as u64) << 32) | (base_low as u64); + } else if segment_reg == "gs" { + let mut base_low: u32; + let mut base_high: u32; + asm!( + "mov ecx, 0xC0000101", // GS_BASE MSR + "rdmsr", + out("eax") base_low, + out("edx") base_high, + options(nomem, nostack) + ); + descriptor.base = ((base_high as u64) << 32) | (base_low as u64); + } else { + // For other segments, need to look up in GDT/LDT + // Allocate 10 bytes for storing GDTR/LDTR content + let mut descriptor_table = [0u8; 10]; + + // Determine if selector is in GDT or LDT + let table_indicator = descriptor.selector & 0x04; + + if table_indicator == 0 { + // Get GDT base + asm!("sgdt [{}]", in(reg) descriptor_table.as_mut_ptr(), options(nostack)); + } else { + // Get LDT base + asm!("sldt [{}]", in(reg) descriptor_table.as_mut_ptr(), options(nostack)); + } + + // Extract GDT/LDT base (bytes 2-9 of descriptor_table) + let table_base = u64::from_ne_bytes([ + descriptor_table[2], + descriptor_table[3], + descriptor_table[4], + descriptor_table[5], + descriptor_table[6], + descriptor_table[7], + descriptor_table[8], + descriptor_table[9], + ]); + + // Calculate descriptor entry address + let index = (descriptor.selector & 0xFFF8) as u64; // Clear RPL and TI bits + let desc_addr = table_base + index; + + // Read the 8-byte descriptor + let desc_bytes = alloc::slice::from_raw_parts(desc_addr as *const u8, 8); + let desc_low = u32::from_ne_bytes([ + desc_bytes[0], + desc_bytes[1], + desc_bytes[2], + desc_bytes[3], + ]); + let desc_high = u32::from_ne_bytes([ + desc_bytes[4], + desc_bytes[5], + desc_bytes[6], + desc_bytes[7], + ]); + + // Extract base (bits 16-39 and 56-63) + let base_low = ((desc_low >> 16) & 0xFFFF) as u64; + let base_mid = (desc_high & 0xFF) as u64; + let base_high = ((desc_high >> 24) & 0xFF) as u64; + descriptor.base = base_low | (base_mid << 16) | (base_high << 24); + + // Extract limit (bits 0-15 and 48-51) + let limit_low = desc_low & 0xFFFF; + let limit_high = (desc_high >> 16) & 0x0F; + descriptor.limit = limit_low | (limit_high << 16); + + // Extract attributes (bits 40-47 and 52-55) + let attr_low = (desc_high >> 8) & 0xFF; + let attr_high = (desc_high >> 20) & 0x0F; + descriptor.attributes = (attr_low as u16) | ((attr_high as u16) << 8); + + // If G bit is set (bit 55), the limit is in 4K pages + if (desc_high & 0x00800000) != 0 { + descriptor.limit = (descriptor.limit << 12) | 0xFFF; + } + + // For TR, which is a system segment in 64-bit mode, read the second 8 bytes to get the high 32 bits of base + if segment_reg == "tr" { + // Check if it's a system descriptor (bit 4 of attributes is 0) + if (descriptor.attributes & 0x10) == 0 { + // Read the next 8 bytes of the descriptor (high part of 16-byte descriptor) + let high_desc_bytes = + alloc::slice::from_raw_parts((desc_addr + 8) as *const u8, 8); + let high_base = u32::from_ne_bytes([ + high_desc_bytes[0], + high_desc_bytes[1], + high_desc_bytes[2], + high_desc_bytes[3], + ]) as u64; + + // Combine with existing base to get full 64-bit base + descriptor.base |= high_base << 32; + } + } + } + + descriptor + } + } + + #[cfg(target_arch = "x86_64")] + /// Hypercall to get the current VTL VP context + pub fn get_current_vtl_vp_context(&mut self) -> Result { + use minimal_rt::arch::msr::read_msr; + use zerocopy::FromZeros; + let mut context: InitialVpContextX64 = FromZeros::new_zeroed(); + + let rsp: u64; + unsafe { asm!("mov {0:r}, rsp", out(reg) rsp, options(nomem, nostack)) }; + + let cr0; + unsafe { asm!("mov {0:r}, cr0", out(reg) cr0, options(nomem, nostack)) }; + let cr3; + unsafe { asm!("mov {0:r}, cr3", out(reg) cr3, options(nomem, nostack)) }; + let cr4; + unsafe { asm!("mov {0:r}, cr4", out(reg) cr4, options(nomem, nostack)) }; + + let rflags: u64; + unsafe { + asm!( + "pushfq", + "pop {0}", + out(reg) rflags, + ); + } + + context.cr0 = cr0; + context.cr3 = cr3; + context.cr4 = cr4; + + context.rsp = rsp; + context.rip = 0; + + context.rflags = rflags; + + // load segment registers + + let cs: u16; + let ss: u16; + let ds: u16; + let es: u16; + let fs: u16; + let gs: u16; + + unsafe { + asm!(" + mov {0:x}, cs + mov {1:x}, ss + mov {2:x}, ds + mov {3:x}, es + mov {4:x}, fs + mov {5:x}, gs + ", out(reg) cs, out(reg) ss, out(reg) ds, out(reg) es, out(reg) fs, out(reg) gs, options(nomem, nostack)) + } + + context.cs.selector = cs; + context.cs.attributes = 0xA09B; + context.cs.limit = 0xFFFFFFFF; + + context.ss.selector = ss; + context.ss.attributes = 0xC093; + context.ss.limit = 0xFFFFFFFF; + + context.ds.selector = ds; + context.ds.attributes = 0xC093; + context.ds.limit = 0xFFFFFFFF; + + context.es.selector = es; + context.es.attributes = 0xC093; + context.es.limit = 0xFFFFFFFF; + + context.fs.selector = fs; + context.fs.attributes = 0xC093; + context.fs.limit = 0xFFFFFFFF; + + context.gs.selector = gs; + context.gs.attributes = 0xC093; + context.gs.limit = 0xFFFFFFFF; + + context.tr.selector = 0; + context.tr.attributes = 0x8B; + context.tr.limit = 0xFFFF; + + let idt = x86_64::instructions::tables::sidt(); + context.idtr.base = idt.base.as_u64(); + context.idtr.limit = idt.limit; + + let gdtr = x86_64::instructions::tables::sgdt(); + context.gdtr.base = gdtr.base.as_u64(); + context.gdtr.limit = gdtr.limit; + + let efer = unsafe { read_msr(0xC0000080) }; + context.efer = efer; + + log::info!("Current VTL VP context: {:?}", context); + Ok(context) + } + + /// Hypercall for setting a register to a value. + pub fn get_register( + &mut self, + name: hvdef::HvRegisterName, + vtl: Option, + ) -> Result { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: hvdef::HV_VP_INDEX_SELF, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = name.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallGetVpRegisters, Some(1)); + output.result()?; + let value = HvRegisterValue::read_from_prefix(&self.output_page().buffer).unwrap(); + + Ok(value.0) + } + + /// Get the corresponding VP indices from a list of VP hardware IDs (APIC + /// IDs on x64, MPIDR on ARM64). + /// + /// This always queries VTL0, since the hardware IDs are the same across the + /// VTLs in practice, and the hypercall only succeeds for VTL2 once VTL2 has + /// been enabled (which it might not be at this point). + pub fn get_vp_index_from_hw_id( + &mut self, + hw_ids: &[HwId], + output: &mut ArrayVec, + ) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::GetVpIndexFromApicId { + partition_id: hvdef::HV_PARTITION_ID_SELF, + target_vtl: 0, + reserved: [0; 7], + }; + + // Split the call up to avoid exceeding the hypercall input/output size limits. + const MAX_PER_CALL: usize = 512; + + for hw_ids in hw_ids.chunks(MAX_PER_CALL) { + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + let _ = + hw_ids.write_to_prefix(&mut self.input_page().buffer[header.as_bytes().len()..]); + + // SAFETY: The input header and rep slice are the correct types for this hypercall. + // The hypercall output is validated right after the hypercall is issued. + let r = self.dispatch_hvcall( + hvdef::HypercallCode::HvCallGetVpIndexFromApicId, + Some(hw_ids.len()), + ); + + let n = r.elements_processed(); + + output.extend( + <[u32]>::ref_from_bytes(&mut self.output_page().buffer[..n * 4]) + .unwrap() + .iter() + .copied(), + ); + r.result()?; + assert_eq!(n, hw_ids.len()); + } + + Ok(()) + } + + /// Initializes the hypercall interface. + pub fn initialize(&mut self) { + // TODO: revisit os id value. For now, use 1 (which is what UEFI does) + let guest_os_id = hvdef::hypercall::HvGuestOsMicrosoft::new().with_os_id(1); + // This is an idempotent operation, so we can call it multiple times. + // we proceed and initialize the hypercall interface because we don't know the current vtl + // This prohibit us to call this selectively for new VTLs + crate::arch::hypercall::initialize(guest_os_id.into()); + + HV_PAGE_INIT_STATUS.fetch_add(1, Ordering::SeqCst); + } + + /// Returns a mutable reference to the hypercall input page. + fn input_page(&mut self) -> &mut HvcallPage { + &mut self.input_page + } + + /// Creates a new `HvCall` instance. + pub const fn new() -> Self { + HvCall { + input_page: HvcallPage::new(), + output_page: HvcallPage::new(), + } + } + + /// Returns a mutable reference to the hypercall output page. + fn output_page(&mut self) -> &mut HvcallPage { + &mut self.output_page + } + + /// Hypercall for setting a register to a value. + pub fn set_register( + &mut self, + name: hvdef::HvRegisterName, + value: HvRegisterValue, + vtl: Option, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: hvdef::HV_VP_INDEX_SELF, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let reg = hvdef::hypercall::HvRegisterAssoc { + name, + pad: Default::default(), + value, + }; + + let _ = reg.write_to_prefix(&mut self.input_page().buffer[HEADER_SIZE..]); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(1)); + + output.result() + } + + /// Sets multiple virtual processor (VP) registers for a given VP and VTL. + pub fn set_vp_registers( + &mut self, + vp: u32, + vtl: Option, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { + const HEADER_SIZE: usize = size_of::(); + + let header = hvdef::hypercall::GetSetVpRegisters { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index: vp, + target_vtl: vtl.unwrap_or(HvInputVtl::CURRENT_VTL), + rsvd: [0; 3], + }; + + let _ = header.write_to_prefix(self.input_page().buffer.as_mut_slice()); + + let mut input_offset = HEADER_SIZE; + + let mut count = 0; + let mut write_reg = |reg_name: hvdef::HvRegisterName, reg_value: HvRegisterValue| { + let reg = hvdef::hypercall::HvRegisterAssoc { + name: reg_name, + pad: Default::default(), + value: reg_value, + }; + + let _ = reg.write_to_prefix(&mut self.input_page().buffer[input_offset..]); + + input_offset += size_of::(); + count += 1; + }; + // pub msr_cr_pat: u64, + + write_reg( + HvX64RegisterName::Cr0.into(), + vp_context.unwrap().cr0.into(), + ); + write_reg( + HvX64RegisterName::Cr3.into(), + vp_context.unwrap().cr3.into(), + ); + write_reg( + HvX64RegisterName::Cr4.into(), + vp_context.unwrap().cr4.into(), + ); + write_reg( + HvX64RegisterName::Rip.into(), + vp_context.unwrap().rip.into(), + ); + write_reg( + HvX64RegisterName::Rsp.into(), + vp_context.unwrap().rsp.into(), + ); + write_reg( + HvX64RegisterName::Rflags.into(), + vp_context.unwrap().rflags.into(), + ); + write_reg(HvX64RegisterName::Cs.into(), vp_context.unwrap().cs.into()); + write_reg(HvX64RegisterName::Ss.into(), vp_context.unwrap().ss.into()); + write_reg(HvX64RegisterName::Ds.into(), vp_context.unwrap().ds.into()); + write_reg(HvX64RegisterName::Es.into(), vp_context.unwrap().es.into()); + write_reg(HvX64RegisterName::Fs.into(), vp_context.unwrap().fs.into()); + write_reg(HvX64RegisterName::Gs.into(), vp_context.unwrap().gs.into()); + write_reg( + HvX64RegisterName::Gdtr.into(), + vp_context.unwrap().gdtr.into(), + ); + write_reg( + HvX64RegisterName::Idtr.into(), + vp_context.unwrap().idtr.into(), + ); + write_reg( + HvX64RegisterName::Ldtr.into(), + vp_context.unwrap().ldtr.into(), + ); + write_reg(HvX64RegisterName::Tr.into(), vp_context.unwrap().tr.into()); + write_reg( + HvX64RegisterName::Efer.into(), + vp_context.unwrap().efer.into(), + ); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallSetVpRegisters, Some(count)); + + output.result() + } + + #[cfg(target_arch = "x86_64")] + /// Starts a virtual processor (VP) with the specified VTL and context on x86_64. + pub fn start_virtual_processor( + &mut self, + vp_index: u32, + target_vtl: Vtl, + vp_context: Option, + ) -> Result<(), hvdef::HvError> { + let header = hvdef::hypercall::StartVirtualProcessorX64 { + partition_id: hvdef::HV_PARTITION_ID_SELF, + vp_index, + target_vtl: target_vtl.into(), + vp_context: vp_context.unwrap_or(zerocopy::FromZeros::new_zeroed()), + rsvd0: 0u8, + rsvd1: 0u16, + }; + + header + .write_to_prefix(self.input_page().buffer.as_mut_slice()) + .expect("size of start_virtual_processor header is not correct"); + + let output = self.dispatch_hvcall(hvdef::HypercallCode::HvCallStartVirtualProcessor, None); + match output.result() { + Ok(()) => Ok(()), + err => panic!("Failed to start virtual processor: {:?}", err), + } + } + + /// Call before jumping to kernel. + pub fn uninitialize(&mut self) { + crate::arch::hypercall::uninitialize(); + } + + /// Returns the environment's VTL. + pub fn vtl(&mut self) -> Vtl { + self.get_register(hvdef::HvAllArchRegisterName::VsmVpStatus.into(), None) + .map_or(Vtl::Vtl0, |status| { + hvdef::HvRegisterVsmVpStatus::from(status.as_u64()) + .active_vtl() + .try_into() + .unwrap() + }) + } + + #[inline(never)] + /// Invokes the HvCallVtlCall hypercall. + pub fn vtl_call() { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(hvdef::HypercallCode::HvCallVtlCall.0) + .with_rep_count(0); + invoke_hypercall_vtl(control); + } + + #[inline(never)] + /// Invokes the HvCallVtlReturn hypercall. + pub fn vtl_return() { + let control: hvdef::hypercall::Control = hvdef::hypercall::Control::new() + .with_code(hvdef::HypercallCode::HvCallVtlReturn.0) + .with_rep_count(0); + invoke_hypercall_vtl(control); + } +} + +/// The "hardware ID" used for [`HvCall::get_vp_index_from_hw_id`]. This is the +/// APIC ID on x64. +#[cfg(target_arch = "x86_64")] +pub type HwId = u32; + +/// The "hardware ID" used for [`HvCall::get_vp_index_from_hw_id`]. This is the +/// MPIDR on ARM64. +#[cfg(target_arch = "aarch64")] +pub type HwId = u64; + +impl Drop for HvCall { + fn drop(&mut self) { + let seq = HV_PAGE_INIT_STATUS.fetch_sub(1, Ordering::SeqCst); + if seq == 0 { + self.uninitialize(); + } + } +} diff --git a/opentmk/opentmk/src/main.rs b/opentmk/opentmk/src/main.rs new file mode 100644 index 0000000000..0a0f6f0e9a --- /dev/null +++ b/opentmk/opentmk/src/main.rs @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +#![no_std] +#![allow(unsafe_code)] +#![feature(abi_x86_interrupt)] +#![doc = include_str!("../README.md")] +#![cfg_attr(all(not(test), target_os = "uefi"), no_main)] +#![cfg_attr(all(not(test), target_os = "uefi"), no_std)] + +// Actual entrypoint is `uefi::uefi_main`, via the `#[entry]` macro +#[cfg(any(test, not(target_os = "uefi")))] +fn main() {} + +#[macro_use] +extern crate alloc; + +pub mod arch; +pub mod context; +pub mod devices; +pub mod hypercall; +pub mod platform; +pub mod tests; +pub mod tmk_assert; +pub mod tmk_logger; +pub mod tmkdefs; +mod uefi; diff --git a/opentmk/opentmk/src/platform/hypvctx.rs b/opentmk/opentmk/src/platform/hypvctx.rs new file mode 100644 index 0000000000..0c1114c957 --- /dev/null +++ b/opentmk/opentmk/src/platform/hypvctx.rs @@ -0,0 +1,756 @@ +use alloc::alloc::alloc; +use alloc::boxed::Box; +use alloc::collections::btree_map::BTreeMap; +use alloc::collections::btree_set::BTreeSet; +use alloc::collections::linked_list::LinkedList; +use core::alloc::Layout; +use core::arch::asm; +use core::fmt::Display; +use core::ops::Range; + +use hvdef::hypercall::HvInputVtl; +use hvdef::hypercall::InitialVpContextX64; +use hvdef::AlignedU128; +use hvdef::Vtl; +use memory_range::MemoryRange; +use minimal_rt::arch::msr::read_msr; +use minimal_rt::arch::msr::write_msr; +use sync_nostd::Mutex; + +use crate::context::InterruptPlatformTrait; +use crate::context::MsrPlatformTrait; +use crate::context::SecureInterceptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecutor; +use crate::context::VtlPlatformTrait; +use crate::hypercall::HvCall; +use crate::tmkdefs::TmkError; +use crate::tmkdefs::TmkErrorType; +use crate::tmkdefs::TmkResult; + +const ALIGNMENT: usize = 4096; + +type ComandTable = BTreeMap, Vtl)>>; +static mut CMD: Mutex = Mutex::new(BTreeMap::new()); + +#[allow(static_mut_refs)] +fn cmdt() -> &'static Mutex { + unsafe { &CMD } +} + +fn register_command_queue(vp_index: u32) { + log::debug!("registering command queue for vp: {}", vp_index); + if cmdt().lock().get(&vp_index).is_none() { + cmdt().lock().insert(vp_index, LinkedList::new()); + log::debug!("registered command queue for vp: {}", vp_index); + } else { + log::debug!("command queue already registered for vp: {}", vp_index); + } +} + +pub struct HvTestCtx { + pub hvcall: HvCall, + // BUG: make this static + pub vp_runing: BTreeSet, + pub my_vp_idx: u32, + pub my_vtl: Vtl, +} + +impl Display for HvTestCtx { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "HvTestCtx {{ vp_idx: {}, vtl: {:?} }}", + self.my_vp_idx, self.my_vtl + ) + } +} + +impl SecureInterceptPlatformTrait for HvTestCtx { + /// Configure the Secure Interrupt Message Page (SIMP) and the first + /// SynIC interrupt (SINT0) so that the hypervisor can vector + /// hypervisor side notifications back to the guest. + /// Returns [`TmkResult::Err`] if the allocation of the SIMP buffer fails. + fn setup_secure_intercept(&mut self, interrupt_idx: u8) -> TmkResult<()> { + let layout = Layout::from_size_align(4096, ALIGNMENT) + .map_err(|_| TmkError(TmkErrorType::AllocationFailed))?; + + let ptr = unsafe { alloc(layout) }; + let gpn = (ptr as u64) >> 12; + let reg = (gpn << 12) | 0x1; + + unsafe { write_msr(hvdef::HV_X64_MSR_SIMP, reg) }; + log::info!("Successfuly set the SIMP register."); + + let reg = unsafe { read_msr(hvdef::HV_X64_MSR_SINT0) }; + let mut reg: hvdef::HvSynicSint = reg.into(); + reg.set_vector(interrupt_idx); + reg.set_masked(false); + reg.set_auto_eoi(true); + + self.write_msr(hvdef::HV_X64_MSR_SINT0, reg.into())?; + log::info!("Successfuly set the SINT0 register."); + Ok(()) + } +} + +impl InterruptPlatformTrait for HvTestCtx { + /// Install an interrupt handler for the supplied vector on x86-64. + /// For non-x86-64 targets the call returns + /// [`TmkErrorType::NotImplemented`]. + fn set_interrupt_idx(&mut self, interrupt_idx: u8, handler: fn()) -> TmkResult<()> { + #[cfg(target_arch = "x86_64")] + { + crate::arch::interrupt::set_handler(interrupt_idx, handler); + Ok(()) + } + + #[cfg(not(target_arch = "x86_64"))] + { + Err(TmkError(TmkErrorType::NotImplemented)) + } + } + + /// Initialise the minimal in-guest interrupt infrastructure + /// (IDT/GIC, etc. depending on architecture). + fn setup_interrupt_handler(&mut self) -> TmkResult<()> { + crate::arch::interrupt::init(); + Ok(()) + } +} + +impl MsrPlatformTrait for HvTestCtx { + /// Read an MSR directly from the CPU and return the raw value. + fn read_msr(&mut self, msr: u32) -> TmkResult { + let r = unsafe { read_msr(msr) }; + Ok(r) + } + + /// Write an MSR directly on the CPU. + fn write_msr(&mut self, msr: u32, value: u64) -> TmkResult<()> { + unsafe { write_msr(msr, value) }; + Ok(()) + } +} + +impl VirtualProcessorPlatformTrait for HvTestCtx { + /// Fetch the content of the specified architectural register from + /// the current VTL for the executing VP. + fn get_register(&mut self, reg: u32) -> TmkResult { + #[cfg(target_arch = "x86_64")] + { + use hvdef::HvX64RegisterName; + let reg = HvX64RegisterName(reg); + let val = self.hvcall.get_register(reg.into(), None)?.as_u128(); + Ok(val) + } + + #[cfg(target_arch = "aarch64")] + { + use hvdef::HvAarch64RegisterName; + let reg = HvAarch64RegisterName(reg); + let val = self.hvcall.get_register(reg.into(), None)?.as_u128(); + Ok(val) + } + + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))] + { + Err(TmkError(TmkErrorType::NotImplemented)) + } + } + + /// Return the number of logical processors present in the machine + /// by issuing the `cpuid` leaf 1 call on x86-64. + fn get_vp_count(&self) -> TmkResult { + #[cfg(target_arch = "x86_64")] + { + Ok(4) + } + + #[cfg(not(target_arch = "x86_64"))] + { + Err(TmkError(TmkErrorType::NotImplemented)) + } + } + + /// Push a command onto the per-VP linked-list so it will be executed + /// by the busy-loop running in `exec_handler`. No scheduling happens + /// here – we simply enqueue. + fn queue_command_vp(&mut self, cmd: VpExecutor) -> TmkResult<()> { + let (vp_index, vtl, cmd) = cmd.get(); + let cmd = cmd.ok_or(TmkError(TmkErrorType::QueueCommandFailed))?; + cmdt() + .lock() + .get_mut(&vp_index) + .unwrap() + .push_back((cmd, vtl)); + Ok(()) + } + + #[inline(never)] + /// Ensure the target VP is running in the requested VTL and queue + /// the command for execution. + /// – If the VP is not yet running, it is started with a default + /// context. + /// – If the command targets a different VTL than the current one, + /// control is switched via `vtl_call` / `vtl_return` so that the + /// executor loop can pick the command up. + /// in short every VP acts as an executor engine and + /// spins in `exec_handler` waiting for work. + fn start_on_vp(&mut self, cmd: VpExecutor) -> TmkResult<()> { + let (vp_index, vtl, cmd) = cmd.get(); + let cmd = cmd.ok_or(TmkError(TmkErrorType::InvalidParameter))?; + if vtl >= Vtl::Vtl2 { + return Err(TmkError(TmkErrorType::InvalidParameter)); + } + let is_vp_running = self.vp_runing.get(&vp_index); + if let Some(_running_vtl) = is_vp_running { + log::debug!("both vtl0 and vtl1 are running for VP: {:?}", vp_index); + } else { + if vp_index == 0 { + let vp_context = self.get_default_context(Vtl::Vtl1)?; + self.hvcall.enable_vp_vtl(0, Vtl::Vtl1, Some(vp_context))?; + + cmdt().lock().get_mut(&vp_index).unwrap().push_back(( + Box::new(move |ctx| { + ctx.switch_to_low_vtl(); + }), + Vtl::Vtl1, + )); + log::info!("self addr: {:p}", self as *const _); + self.switch_to_high_vtl(); + log::info!("self addr after switch: {:p}", self as *const _); + self.vp_runing.insert(vp_index); + } else { + let (tx, rx) = sync_nostd::Channel::>::new().split(); + let self_vp_idx = self.my_vp_idx; + cmdt().lock().get_mut(&self_vp_idx).unwrap().push_back(( + Box::new(move |ctx| { + log::debug!("starting VP{} in VTL1 of vp{}", vp_index, self_vp_idx); + let r = ctx.enable_vp_vtl_with_default_context(vp_index, Vtl::Vtl1); + if r.is_err() { + log::error!("failed to enable VTL1 for VP{}: {:?}", vp_index, r); + let _ = tx.send(r); + return; + } + log::debug!("successfully enabled VTL1 for VP{}", vp_index); + let r = ctx.start_running_vp_with_default_context(VpExecutor::new( + vp_index, + Vtl::Vtl0, + )); + if r.is_err() { + log::error!("failed to start VP{}: {:?}", vp_index, r); + let _ = tx.send(r); + return; + } + log::debug!("successfully started VP{}", vp_index); + let _ = tx.send(Ok(())); + ctx.switch_to_low_vtl(); + }), + Vtl::Vtl1, + )); + self.switch_to_high_vtl(); + let rx = rx.recv(); + if let Ok(r) = rx { + r?; + } + self.vp_runing.insert(vp_index); + } + } + cmdt() + .lock() + .get_mut(&vp_index) + .unwrap() + .push_back((cmd, vtl)); + + if vp_index == self.my_vp_idx && self.my_vtl != vtl { + if vtl == Vtl::Vtl0 { + self.switch_to_low_vtl(); + } else { + self.switch_to_high_vtl(); + } + } + Ok(()) + } + + /// Start the given VP in the current VTL using a freshly captured + /// context and *do not* queue any additional work. + fn start_running_vp_with_default_context( + &mut self, + cmd: VpExecutor, + ) -> TmkResult<()> { + let (vp_index, vtl, _cmd) = cmd.get(); + let vp_ctx: InitialVpContextX64 = self.get_default_context(vtl)?; + self.hvcall + .start_virtual_processor(vp_index, vtl, Some(vp_ctx))?; + Ok(()) + } + + /// Return the index of the VP that is currently executing this code. + fn get_current_vp(&self) -> TmkResult { + Ok(self.my_vp_idx) + } +} + +impl VtlPlatformTrait for HvTestCtx { + /// Apply VTL protections to the supplied GPA range so that only the + /// provided VTL can access it. + fn apply_vtl_protection_for_memory(&mut self, range: Range, vtl: Vtl) -> TmkResult<()> { + self.hvcall + .apply_vtl_protections(MemoryRange::new(range), vtl)?; + Ok(()) + } + + /// Enable the specified VTL on a VP and seed it with a default + /// context captured from the current execution environment. + fn enable_vp_vtl_with_default_context(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { + let vp_ctx = self.get_default_context(vtl)?; + self.hvcall.enable_vp_vtl(vp_index, vtl, Some(vp_ctx))?; + Ok(()) + } + + /// Return the VTL in which the current code is running. + fn get_current_vtl(&self) -> TmkResult { + Ok(self.my_vtl) + } + + /// Inject a default context into an already existing VP/VTL pair. + fn set_default_ctx_to_vp(&mut self, vp_index: u32, vtl: Vtl) -> TmkResult<()> { + let i: u8 = match vtl { + Vtl::Vtl0 => 0, + Vtl::Vtl1 => 1, + Vtl::Vtl2 => 2, + }; + let vp_context = self.get_default_context(vtl)?; + self.hvcall.set_vp_registers( + vp_index, + Some( + HvInputVtl::new() + .with_target_vtl_value(i) + .with_use_target_vtl(true), + ), + Some(vp_context), + )?; + Ok(()) + } + + /// Enable VTL support for the entire partition. + fn setup_partition_vtl(&mut self, vtl: Vtl) -> TmkResult<()> { + self.hvcall + .enable_partition_vtl(hvdef::HV_PARTITION_ID_SELF, vtl)?; + log::info!("enabled vtl protections for the partition."); + Ok(()) + } + + /// Turn on VTL protections for the currently running VTL. + fn setup_vtl_protection(&mut self) -> TmkResult<()> { + self.hvcall.enable_vtl_protection(HvInputVtl::CURRENT_VTL)?; + log::info!("enabled vtl protections for the partition."); + Ok(()) + } + + /// Switch execution from the current (low) VTL to the next higher + /// one (`vtl_call`). + #[inline(never)] + fn switch_to_high_vtl(&mut self) { + unsafe { + asm!( + " + push rax + push rbx + push rcx + push rdx + push rdi + push rsi + push rbp + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + call {call_address} + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop rbp + pop rsi + pop rdi + pop rdx + pop rcx + pop rbx + pop rax", + call_address = sym HvCall::vtl_call, + ); + } + + // let reg = self + // .get_register(hvdef::HvAllArchRegisterName::VsmCodePageOffsets.0) + // .unwrap(); + // let reg = HvRegisterValue::from(reg); + // let offset = hvdef::HvRegisterVsmCodePageOffsets::from_bits(reg.as_u64()); + + // log::debug!("call_offset: {:?}", offset); + + // let call_offset = offset.call_offset(); + // unsafe { + // let call_address = &raw const HYPERCALL_PAGE as *const u8; + // let off_addr = call_address.add(call_offset.into()) as u64; + // asm!( + // " + // call {call_address}", + // in("rcx") 0x0, + // call_address = in(reg) off_addr, + // ); + // } + } + + /// Return from a high VTL back to the low VTL (`vtl_return`). + #[inline(never)] + fn switch_to_low_vtl(&mut self) { + // HvCall::vtl_return(); + unsafe { + asm!( + " + push rax + push rbx + push rcx + push rdx + push rdi + push rsi + push rbp + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + call {call_address} + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop rbp + pop rsi + pop rdi + pop rdx + pop rcx + pop rbx + pop rax", + call_address = sym HvCall::vtl_return, + ); + } + // let reg = self + // .get_register(hvdef::HvAllArchRegisterName::VsmCodePageOffsets.0) + // .unwrap(); + // let reg = HvRegisterValue::from(reg); + // let offset = hvdef::HvRegisterVsmCodePageOffsets::from_bits(reg.as_u64()); + + // let call_offset = offset.return_offset(); + // unsafe { + // let call_address = &raw const HYPERCALL_PAGE as *const u8; + // let off_addr = call_address.add(call_offset.into()) as u64; + // asm!( + // " + // call {call_address}", + // in("rcx") 0x0, + // call_address = in(reg) off_addr, + // ); + // } + } + + fn set_vp_state_with_vtl( + &mut self, + register_index: u32, + value: u64, + vtl: Vtl, + ) -> TmkResult<()> { + let vtl = vtl_transform(vtl); + let value = AlignedU128::from(value); + let reg_value = hvdef::HvRegisterValue(value); + self.hvcall + .set_register(hvdef::HvRegisterName(register_index), reg_value, Some(vtl)) + .map_err(|e| e.into()) + } + + fn get_vp_state_with_vtl(&mut self, register_index: u32, vtl: Vtl) -> TmkResult { + let vtl = vtl_transform(vtl); + self.hvcall + .get_register(hvdef::HvRegisterName(register_index), Some(vtl)) + .map(|v| v.as_u64()) + .map_err(|e| e.into()) + } +} + +fn vtl_transform(vtl: Vtl) -> HvInputVtl { + let vtl = match vtl { + Vtl::Vtl0 => 0, + Vtl::Vtl1 => 1, + Vtl::Vtl2 => 2, + }; + HvInputVtl::new() + .with_target_vtl_value(vtl) + .with_use_target_vtl(true) +} + +impl HvTestCtx { + /// Construct an *un-initialised* test context. + /// Call [`HvTestCtx::init`] before using the value. + pub const fn new() -> Self { + HvTestCtx { + hvcall: HvCall::new(), + vp_runing: BTreeSet::new(), + my_vp_idx: 0, + my_vtl: Vtl::Vtl0, + } + } + + /// Perform the one-time initialisation sequence: + /// – initialise the hypercall page, + /// – discover the VP count and create command queues, + /// – record the current VTL. + pub fn init(&mut self, vtl: Vtl) -> TmkResult<()> { + self.hvcall.initialize(); + let vp_count = self.get_vp_count()?; + for i in 0..vp_count { + register_command_queue(i); + } + self.my_vtl = vtl; + // let reg = self + // .hvcall + // .get_register(hvdef::HvAllArchRegisterName::VpIndex.into(), None) + // .expect("error: failed to get vp index"); + // let reg = reg.as_u64(); + // self.my_vp_idx = reg as u32; + + let result = unsafe { core::arch::x86_64::__cpuid(0x1) }; + self.my_vp_idx = (result.ebx >> 24) & 0xFF; + Ok(()) + } + + fn secure_exec_handler() { + HvTestCtx::exec_handler(Vtl::Vtl1); + } + + fn general_exec_handler() { + HvTestCtx::exec_handler(Vtl::Vtl0); + } + + /// Busy-loop executor that runs on every VP. + /// Extracts commands from the per-VP queue and executes them in the + /// appropriate VTL, switching VTLs when necessary. + fn exec_handler(vtl: Vtl) { + let mut ctx = HvTestCtx::new(); + ctx.init(vtl).expect("error: failed to init on a VP"); + + ctx.print_rbp(); + ctx.print_rsp(); + + loop { + let mut vtl: Option = None; + let mut cmd: Option> = None; + + { + let mut cmdt = cmdt().lock(); + let d = cmdt.get_mut(&ctx.my_vp_idx); + if let Some(d) = d { + if !d.is_empty() { + let (_c, v) = d.front().unwrap(); + if *v == ctx.my_vtl { + let (c, _v) = d.pop_front().unwrap(); + cmd = Some(c); + } else { + vtl = Some(*v); + } + } + } + } + + if let Some(vtl) = vtl { + if vtl == Vtl::Vtl0 { + ctx.switch_to_low_vtl(); + } else { + ctx.switch_to_high_vtl(); + } + } + + if let Some(cmd) = cmd { + cmd(&mut ctx); + } + } + } + + #[cfg(target_arch = "x86_64")] + /// Capture the current VP context, patch the entry point and stack + /// so that the new VP starts in `exec_handler`. + fn get_default_context(&mut self, vtl: Vtl) -> Result { + let handler = match vtl { + Vtl::Vtl0 => HvTestCtx::general_exec_handler, + Vtl::Vtl1 => HvTestCtx::secure_exec_handler, + _ => return Err(TmkErrorType::InvalidParameter.into()), + }; + self.run_fn_with_current_context(handler) + } + + #[cfg(target_arch = "x86_64")] + /// Helper to wrap an arbitrary function inside a captured VP context + /// that can later be used to start a new VP/VTL instance. + fn run_fn_with_current_context(&mut self, func: fn()) -> Result { + let mut vp_context: InitialVpContextX64 = self + .hvcall + .get_current_vtl_vp_context() + .expect("Failed to get VTL1 context"); + let stack_layout = Layout::from_size_align(1024 * 1024, 16) + .expect("Failed to create layout for stack allocation"); + let allocated_stack_ptr = unsafe { alloc(stack_layout) }; + if allocated_stack_ptr.is_null() { + return Err(TmkErrorType::AllocationFailed.into()); + } + let stack_size = stack_layout.size(); + let stack_top = allocated_stack_ptr as u64 + stack_size as u64; + let fn_ptr = func as fn(); + let fn_address = fn_ptr as u64; + vp_context.rip = fn_address; + vp_context.rsp = stack_top; + Ok(vp_context) + } + + // function to print the current register states for x64 + #[cfg(target_arch = "x86_64")] + #[inline(always)] + pub fn print_rbp(&self) { + let rbp: u64; + unsafe { + asm!( + "mov {}, rbp", + out(reg) rbp, + ); + } + log::debug!( + "Current RBP: 0x{:#x}, VP:{} VTL:{:?}", + rbp, + self.my_vp_idx, + self.my_vtl + ); + } + + #[cfg(target_arch = "x86_64")] + #[inline(always)] + pub fn print_rsp(&self) { + let rsp: u64; + unsafe { + asm!( + "mov {}, rsp", + out(reg) rsp, + ); + } + log::debug!( + "Current RSP: 0x{:#x}, VP:{} VTL:{:?}", + rsp, + self.my_vp_idx, + self.my_vtl + ); + } +} + +impl From for TmkError { + fn from(e: hvdef::HvError) -> Self { + log::debug!("Converting hvdef::HvError::{:?} to TmkError", e); + let tmk_error_type = match e { + hvdef::HvError::InvalidHypercallCode => TmkErrorType::InvalidHypercallCode, + hvdef::HvError::InvalidHypercallInput => TmkErrorType::InvalidHypercallInput, + hvdef::HvError::InvalidAlignment => TmkErrorType::InvalidAlignment, + hvdef::HvError::InvalidParameter => TmkErrorType::InvalidParameter, + hvdef::HvError::AccessDenied => TmkErrorType::AccessDenied, + hvdef::HvError::InvalidPartitionState => TmkErrorType::InvalidPartitionState, + hvdef::HvError::OperationDenied => TmkErrorType::OperationDenied, + hvdef::HvError::UnknownProperty => TmkErrorType::UnknownProperty, + hvdef::HvError::PropertyValueOutOfRange => TmkErrorType::PropertyValueOutOfRange, + hvdef::HvError::InsufficientMemory => TmkErrorType::InsufficientMemory, + hvdef::HvError::PartitionTooDeep => TmkErrorType::PartitionTooDeep, + hvdef::HvError::InvalidPartitionId => TmkErrorType::InvalidPartitionId, + hvdef::HvError::InvalidVpIndex => TmkErrorType::InvalidVpIndex, + hvdef::HvError::NotFound => TmkErrorType::NotFound, + hvdef::HvError::InvalidPortId => TmkErrorType::InvalidPortId, + hvdef::HvError::InvalidConnectionId => TmkErrorType::InvalidConnectionId, + hvdef::HvError::InsufficientBuffers => TmkErrorType::InsufficientBuffers, + hvdef::HvError::NotAcknowledged => TmkErrorType::NotAcknowledged, + hvdef::HvError::InvalidVpState => TmkErrorType::InvalidVpState, + hvdef::HvError::Acknowledged => TmkErrorType::Acknowledged, + hvdef::HvError::InvalidSaveRestoreState => TmkErrorType::InvalidSaveRestoreState, + hvdef::HvError::InvalidSynicState => TmkErrorType::InvalidSynicState, + hvdef::HvError::ObjectInUse => TmkErrorType::ObjectInUse, + hvdef::HvError::InvalidProximityDomainInfo => TmkErrorType::InvalidProximityDomainInfo, + hvdef::HvError::NoData => TmkErrorType::NoData, + hvdef::HvError::Inactive => TmkErrorType::Inactive, + hvdef::HvError::NoResources => TmkErrorType::NoResources, + hvdef::HvError::FeatureUnavailable => TmkErrorType::FeatureUnavailable, + hvdef::HvError::PartialPacket => TmkErrorType::PartialPacket, + hvdef::HvError::ProcessorFeatureNotSupported => { + TmkErrorType::ProcessorFeatureNotSupported + } + hvdef::HvError::ProcessorCacheLineFlushSizeIncompatible => { + TmkErrorType::ProcessorCacheLineFlushSizeIncompatible + } + hvdef::HvError::InsufficientBuffer => TmkErrorType::InsufficientBuffer, + hvdef::HvError::IncompatibleProcessor => TmkErrorType::IncompatibleProcessor, + hvdef::HvError::InsufficientDeviceDomains => TmkErrorType::InsufficientDeviceDomains, + hvdef::HvError::CpuidFeatureValidationError => { + TmkErrorType::CpuidFeatureValidationError + } + hvdef::HvError::CpuidXsaveFeatureValidationError => { + TmkErrorType::CpuidXsaveFeatureValidationError + } + hvdef::HvError::ProcessorStartupTimeout => TmkErrorType::ProcessorStartupTimeout, + hvdef::HvError::SmxEnabled => TmkErrorType::SmxEnabled, + hvdef::HvError::InvalidLpIndex => TmkErrorType::InvalidLpIndex, + hvdef::HvError::InvalidRegisterValue => TmkErrorType::InvalidRegisterValue, + hvdef::HvError::InvalidVtlState => TmkErrorType::InvalidVtlState, + hvdef::HvError::NxNotDetected => TmkErrorType::NxNotDetected, + hvdef::HvError::InvalidDeviceId => TmkErrorType::InvalidDeviceId, + hvdef::HvError::InvalidDeviceState => TmkErrorType::InvalidDeviceState, + hvdef::HvError::PendingPageRequests => TmkErrorType::PendingPageRequests, + hvdef::HvError::PageRequestInvalid => TmkErrorType::PageRequestInvalid, + hvdef::HvError::KeyAlreadyExists => TmkErrorType::KeyAlreadyExists, + hvdef::HvError::DeviceAlreadyInDomain => TmkErrorType::DeviceAlreadyInDomain, + hvdef::HvError::InvalidCpuGroupId => TmkErrorType::InvalidCpuGroupId, + hvdef::HvError::InvalidCpuGroupState => TmkErrorType::InvalidCpuGroupState, + hvdef::HvError::OperationFailed => TmkErrorType::OperationFailed, + hvdef::HvError::NotAllowedWithNestedVirtActive => { + TmkErrorType::NotAllowedWithNestedVirtActive + } + hvdef::HvError::InsufficientRootMemory => TmkErrorType::InsufficientRootMemory, + hvdef::HvError::EventBufferAlreadyFreed => TmkErrorType::EventBufferAlreadyFreed, + hvdef::HvError::Timeout => TmkErrorType::Timeout, + hvdef::HvError::VtlAlreadyEnabled => TmkErrorType::VtlAlreadyEnabled, + hvdef::HvError::UnknownRegisterName => TmkErrorType::UnknownRegisterName, + // Add any other specific mappings here if hvdef::HvError has more variants + _ => { + log::warn!( + "Unhandled hvdef::HvError variant: {:?}. Mapping to TmkErrorType::OperationFailed.", + e + ); + TmkErrorType::OperationFailed // Generic fallback + } + }; + log::debug!( + "Mapped hvdef::HvError::{:?} to TmkErrorType::{:?}", + e, + tmk_error_type + ); + TmkError(tmk_error_type) + } +} diff --git a/opentmk/opentmk/src/platform/mod.rs b/opentmk/opentmk/src/platform/mod.rs new file mode 100644 index 0000000000..6f4c0a0b95 --- /dev/null +++ b/opentmk/opentmk/src/platform/mod.rs @@ -0,0 +1 @@ +pub mod hypvctx; diff --git a/opentmk/opentmk/src/tests/hv_cvm_mem_protect.rs b/opentmk/opentmk/src/tests/hv_cvm_mem_protect.rs new file mode 100644 index 0000000000..d17639bd0f --- /dev/null +++ b/opentmk/opentmk/src/tests/hv_cvm_mem_protect.rs @@ -0,0 +1,186 @@ +#![allow(warnings)] +use alloc::alloc::alloc; +use alloc::sync::Arc; +use core::alloc::GlobalAlloc; +use core::alloc::Layout; +use core::arch::asm; +use core::cell::RefCell; +use core::cell::UnsafeCell; +use core::fmt::Write; +use core::ops::Range; +use core::sync::atomic::AtomicBool; +use core::sync::atomic::AtomicI32; +use core::sync::atomic::Ordering; + +use ::alloc::boxed::Box; +use ::alloc::vec::Vec; +use context::VpExecutor; +use hvdef::hypercall::HvInputVtl; +use hvdef::HvAllArchRegisterName; +use hvdef::HvRegisterVsmVpStatus; +use hvdef::HvX64RegisterName; +use hvdef::Vtl; +use hypvctx::HvTestCtx; +use sync_nostd::Channel; +use sync_nostd::Receiver; +use sync_nostd::Sender; +use uefi::entry; +use uefi::Status; + +use crate::context; +use crate::context::InterruptPlatformTrait; +use crate::context::SecureInterceptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VtlPlatformTrait; +use crate::platform::hypvctx; +use crate::tmk_assert; +use crate::tmk_logger; +use crate::tmkdefs::TmkResult; + +static mut HEAPX: RefCell<*mut u8> = RefCell::new(0 as *mut u8); +static mut RETURN_VALUE: u8 = 0; +#[inline(never)] +fn violate_heap() { + unsafe { + let heapx = *HEAPX.borrow(); + RETURN_VALUE = *(heapx.add(10)); + } +} + +#[inline(never)] +fn backup_and_restore() { + use core::arch::asm; + unsafe { + asm!(" + push rax + push rbx + push rcx + push rdx + push rsi + push rdi + push rbp + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + call {} + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop rbp + pop rdi + pop rsi + pop rdx + pop rcx + pop rbx + pop rax + ", sym violate_heap); + } +} + +pub fn exec(ctx: &mut T) +where + T: InterruptPlatformTrait + VtlPlatformTrait + VirtualProcessorPlatformTrait, +{ + log::info!("ctx ptr: {:p}", &ctx as *const _); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 8"); + + ctx.setup_interrupt_handler(); + log::info!("successfully setup interrupt handler"); + + ctx.setup_partition_vtl(Vtl::Vtl1); + log::info!("successfully setup partition vtl1"); + + ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + + let layout = + Layout::from_size_align(1024 * 1024, 4096).expect("msg: failed to create layout"); + let ptr = unsafe { alloc(layout) }; + + log::info!("allocated some memory in the heap from vtl1"); + + unsafe { + let mut z = HEAPX.borrow_mut(); + *z = ptr; + *ptr.add(10) = 0xA2; + } + + let size = layout.size(); + + ctx.setup_vtl_protection(); + + log::info!("enabled vtl protections for the partition."); + + let range = Range { + start: ptr as u64, + end: ptr as u64 + size as u64, + }; + + let result = ctx.apply_vtl_protection_for_memory(range, Vtl::Vtl1); + tmk_assert!( + result.is_ok(), + "apply_vtl_protection_for_memory should succeed" + ); + + log::info!("moving to vtl0 to attempt to read the heap memory"); + + ctx.switch_to_low_vtl(); + })); + + log::info!("BACK to vtl0"); + ctx.set_interrupt_idx(18, || { + tmk_assert!(true, "we reached to MC handler"); + panic!("MC causes the test to end"); + }); + + let (tx, rx) = Channel::new().split(); + + ctx.start_on_vp(VpExecutor::new(0x2, Vtl::Vtl1).command(move |ctx: &mut T| { + ctx.setup_interrupt_handler(); + log::info!("successfully started running VTL1 on vp2."); + })); + + ctx.start_on_vp( + VpExecutor::new(0x2, Vtl::Vtl0).command(move |ctx: &mut T| unsafe { + log::info!("successfully started running VTL0 on vp2."); + unsafe { + let heapx = *HEAPX.borrow(); + + let read_protected_memory = || *(heapx.add(10)); + + let read_result = read_protected_memory(); + log::info!( + "reading mutated heap memory from vtl0(it should not be 0xA2): 0x{:x}", + read_result + ); + tmk_assert!( + read_result != 0xA2, + "heap memory should not be accessible from vtl0" + ); + } + + tx.send(()); + }), + ); + + rx.recv(); + + tmk_assert!( + false, + "we should not reach here injecting MC should terminate the test" + ); +} diff --git a/opentmk/opentmk/src/tests/hv_error_vp_start.rs b/opentmk/opentmk/src/tests/hv_error_vp_start.rs new file mode 100644 index 0000000000..fe98b49bfd --- /dev/null +++ b/opentmk/opentmk/src/tests/hv_error_vp_start.rs @@ -0,0 +1,51 @@ +use hvdef::Vtl; +use sync_nostd::Channel; + +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecutor; +use crate::context::VtlPlatformTrait; +use crate::tmk_assert; + +pub fn exec(ctx: &mut T) +where + T: VtlPlatformTrait + VirtualProcessorPlatformTrait, +{ + // Skiping VTL setup for now to test the negitive case + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 4"); + + // Testing BSP VTL1 Bringup + { + let (tx, _rx) = Channel::new().split(); + + let result = ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == 0, "vp should be equal to 0"); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); + tx.send(()) + .expect("Failed to send message through the channel"); + ctx.switch_to_low_vtl(); + })); + + tmk_assert!(result.is_err(), "start_on_vp should fail"); + tmk_assert!( + result.unwrap_err() == crate::tmkdefs::TmkErrorType::InvalidVtlState.into(), + "start_on_vp should fail with InvalidVtlState" + ); + log::info!("result on start_on_vp: {:?}", result); + } +} diff --git a/opentmk/opentmk/src/tests/hv_misc.rs b/opentmk/opentmk/src/tests/hv_misc.rs new file mode 100644 index 0000000000..bea9f22ad7 --- /dev/null +++ b/opentmk/opentmk/src/tests/hv_misc.rs @@ -0,0 +1,266 @@ +#![allow(warnings)] +use alloc::alloc::alloc; +use alloc::string::String; +use alloc::sync::Arc; +use core::alloc::GlobalAlloc; +use core::alloc::Layout; +use core::arch::asm; +use core::cell::RefCell; +use core::cell::UnsafeCell; +use core::fmt::Write; +use core::ops::Range; +use core::sync::atomic::AtomicBool; +use core::sync::atomic::AtomicI32; +use core::sync::atomic::Ordering; + +use ::alloc::boxed::Box; +use ::alloc::vec::Vec; +use context::VpExecutor; +use hvdef::hypercall::HvInputVtl; +use hvdef::HvAllArchRegisterName; +use hvdef::HvRegisterVsmVpStatus; +use hvdef::HvX64RegisterName; +use hvdef::Vtl; +use hypvctx::HvTestCtx; +use iced_x86::DecoderOptions; +use iced_x86::Formatter; +use iced_x86::NasmFormatter; +use sync_nostd::Channel; +use sync_nostd::Receiver; +use sync_nostd::Sender; +use uefi::entry; +use uefi::Status; + +use crate::context; +use crate::context::InterruptPlatformTrait; +use crate::context::SecureInterceptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VtlPlatformTrait; +use crate::platform::hypvctx; +use crate::tmk_assert; +use crate::tmk_logger; +use crate::tmkdefs::TmkResult; + +static mut HEAPX: RefCell<*mut u8> = RefCell::new(0 as *mut u8); + +static mut RETURN_VALUE: u8 = 0; +#[inline(never)] +fn violate_heap() { + unsafe { + let heapx = *HEAPX.borrow(); + RETURN_VALUE = *(heapx.add(10)); + } +} + +#[inline(never)] +fn backup_and_restore() { + unsafe { + asm!(" + push rax + push rbx + push rcx + push rdx + push rsi + push rdi + push rbp + push r8 + push r9 + push r10 + push r11 + push r12 + push r13 + push r14 + push r15 + call {} + pop r15 + pop r14 + pop r13 + pop r12 + pop r11 + pop r10 + pop r9 + pop r8 + pop rbp + pop rdi + pop rsi + pop rdx + pop rcx + pop rbx + pop rax + ", sym violate_heap); + } +} + +pub fn read_assembly_output(target: u64) -> usize { + unsafe { + let target_ptr = target as *const u8; + let code_bytes = core::slice::from_raw_parts(target_ptr, 0x100); + let mut decoder = iced_x86::Decoder::with_ip(64, code_bytes, target, DecoderOptions::NONE); + + let mut formatter = NasmFormatter::new(); + let mut output = String::new(); + let mut first_ip_len = 0; + let mut set = false; + while decoder.can_decode() { + let instr = decoder.decode(); + if !set { + first_ip_len = instr.len(); + set = true; + } + formatter.format(&instr, &mut output); + log::info!("{}:{}", instr.ip(), output); + output.clear(); + } + + first_ip_len + } +} + +pub fn exec(ctx: &mut T) +where + T: InterruptPlatformTrait + + SecureInterceptPlatformTrait + + VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + log::info!("ctx ptr: {:p}", &ctx as *const _); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 8"); + + ctx.setup_interrupt_handler(); + + log::info!("set intercept handler successfully!"); + + ctx.setup_partition_vtl(Vtl::Vtl1); + + ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + ctx.setup_secure_intercept(0x30); + ctx.set_interrupt_idx(0x30, move || { + log::info!("interrupt fired!"); + let mut hv = HvTestCtx::new(); + // expected to get interrupt in VTL1. + // CVMs dont support hypercalls to get the current VTL from VTL1/0. + hv.init(Vtl::Vtl1); + log::info!( + "current vp from interrupt: {}", + hv.get_current_vp().unwrap() + ); + + let rip = hvdef::HvX64RegisterName::Rip.0; + + let reg = hv.get_vp_state_with_vtl(rip, Vtl::Vtl0); + tmk_assert!(reg.is_ok(), "get_vp_state_with_vtl should succeed"); + + let reg = reg.unwrap(); + log::info!("rip from vtl0: 0x{:x}", reg); + + log::info!("pring assembly for the current RIP:"); + let size = read_assembly_output(reg); + + let new_rip_value = reg + size as u64; + + log::info!("pring assembly for the updated RIP:"); + read_assembly_output(new_rip_value); + + let r = hv.set_vp_state_with_vtl(HvX64RegisterName::Rip.0, new_rip_value, Vtl::Vtl0); + tmk_assert!(r.is_ok(), "set_vp_state_with_vtl should succeed"); + + let reg = hv.get_vp_state_with_vtl(rip, Vtl::Vtl0); + tmk_assert!(reg.is_ok(), "get_vp_state_with_vtl should succeed"); + + let reg = reg.unwrap(); + log::info!("rip from vtl0 after modification: 0x{:x}", reg); + tmk_assert!(reg == new_rip_value, "rip should be modified"); + + log::info!("pring assembly for the updated RIP after fetch:"); + read_assembly_output(reg); + + log::info!("interrupt handled!"); + hv.print_rbp(); + }); + + let layout = + Layout::from_size_align(1024 * 1024, 4096).expect("msg: failed to create layout"); + let ptr = unsafe { alloc(layout) }; + log::info!("allocated some memory in the heap from vtl1"); + unsafe { + let mut z = HEAPX.borrow_mut(); + *z = ptr; + *ptr.add(10) = 0xA2; + } + + let size = layout.size(); + ctx.setup_vtl_protection(); + + log::info!("enabled vtl protections for the partition."); + + let range = Range { + start: ptr as u64, + end: ptr as u64 + size as u64, + }; + + ctx.apply_vtl_protection_for_memory(range, Vtl::Vtl1); + + log::info!("moving to vtl0 to attempt to read the heap memory"); + + ctx.switch_to_low_vtl(); + })); + + log::info!("ctx ptr: {:p}", &ctx as *const _); + + let mut l = 0u64; + unsafe { asm!("mov {}, rsp", out(reg) l) }; + log::info!("rsp: 0x{:x}", l); + + let (tx, rx) = Channel::new().split(); + + ctx.start_on_vp(VpExecutor::new(0x2, Vtl::Vtl1).command(move |ctx: &mut T| { + ctx.setup_interrupt_handler(); + ctx.setup_secure_intercept(0x30); + + log::info!("successfully started running VTL1 on vp2."); + })); + + ctx.start_on_vp( + VpExecutor::new(0x2, Vtl::Vtl0).command(move |ctx: &mut T| unsafe { + log::info!("successfully started running VTL0 on vp2."); + + ctx.queue_command_vp(VpExecutor::new(2, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("after intercept successfully started running VTL1 on vp2."); + ctx.switch_to_low_vtl(); + })); + + backup_and_restore(); + log::info!( + "reading mutated heap memory from vtl0(it should not be 0xA2): 0x{:x}", + RETURN_VALUE + ); + tmk_assert!( + RETURN_VALUE != 0xA2, + "heap memory should not be accessible from vtl0" + ); + tx.send(()); + }), + ); + + rx.recv(); + // let (mut tx, mut rx) = Channel::new(1); + // { + // let mut tx = tx.clone(); + // ctx.start_on_vp(VpExecutor::new(2, Vtl::Vtl0).command( + // move |ctx: &mut dyn TestCtxTrait| { + // log::info!("Hello form vtl0 on vp2!"); + // tx.send(()); + // }, + // )); + // } + + // rx.recv(); + + log::info!("we are in vtl0 now!"); + log::info!("we reached the end of the test"); +} diff --git a/opentmk/opentmk/src/tests/hv_processor.rs b/opentmk/opentmk/src/tests/hv_processor.rs new file mode 100644 index 0000000000..3ce93018c6 --- /dev/null +++ b/opentmk/opentmk/src/tests/hv_processor.rs @@ -0,0 +1,104 @@ +use hvdef::Vtl; +use sync_nostd::Channel; + +use crate::context::InterruptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecutor; +use crate::context::VtlPlatformTrait; +use crate::tmk_assert; + +#[inline(never)] +pub fn exec(ctx: &mut T) +where + T: VtlPlatformTrait + VirtualProcessorPlatformTrait + InterruptPlatformTrait, +{ + let r = ctx.setup_partition_vtl(Vtl::Vtl1); + tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 4"); + + _ = ctx.setup_interrupt_handler(); + + _ = ctx.set_interrupt_idx(0x6, || loop {}); + + // Testing BSP VTL Bringup + { + let (tx, rx) = Channel::new().split(); + let result = ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == 0, "vp should be equal to 0"); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, "vtl should be Vtl1 for BSP"); + tx.send(()) + .expect("Failed to send message through the channel"); + ctx.switch_to_low_vtl(); + })); + tmk_assert!(result.is_ok(), "start_on_vp should succeed"); + _ = rx.recv(); + } + + for i in 1..vp_count { + // Testing VTL1 + { + let (tx, rx) = Channel::new().split(); + let result = + ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl1).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == i, format!("vp should be equal to {}", i)); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl1, format!("vtl should be Vtl1 for VP {}", i)); + _ = tx.send(()); + })); + tmk_assert!(result.is_ok(), "start_on_vp should succeed"); + _ = rx.recv(); + } + + // Testing VTL0 + { + let (tx, rx) = Channel::new().split(); + let result = + ctx.start_on_vp(VpExecutor::new(i, Vtl::Vtl0).command(move |ctx: &mut T| { + let vp = ctx.get_current_vp(); + tmk_assert!(vp.is_ok(), "vp should be valid"); + + let vp = vp.unwrap(); + log::info!("vp: {}", vp); + tmk_assert!(vp == i, format!("vp should be equal to {}", i)); + + let vtl = ctx.get_current_vtl(); + tmk_assert!(vtl.is_ok(), "vtl should be valid"); + + let vtl = vtl.unwrap(); + log::info!("vtl: {:?}", vtl); + tmk_assert!(vtl == Vtl::Vtl0, format!("vtl should be Vtl0 for VP {}", i)); + _ = tx.send(()); + })); + tmk_assert!(result.is_ok(), "start_on_vp should succeed"); + _ = rx.recv(); + } + } + + log::warn!("All VPs have been tested"); +} diff --git a/opentmk/opentmk/src/tests/hv_tpm.rs b/opentmk/opentmk/src/tests/hv_tpm.rs new file mode 100644 index 0000000000..a866cf41fa --- /dev/null +++ b/opentmk/opentmk/src/tests/hv_tpm.rs @@ -0,0 +1,180 @@ +use alloc::string::String; +use core::ops::Range; + +use hvdef::HvX64RegisterName; +use hvdef::Vtl; +use iced_x86::DecoderOptions; +use iced_x86::Formatter; +use iced_x86::NasmFormatter; + +use crate::arch::tpm::Tpm; +use crate::context::InterruptPlatformTrait; +use crate::context::SecureInterceptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecutor; +use crate::context::VtlPlatformTrait; +use crate::devices::tpm::TpmUtil; +use crate::platform::hypvctx::HvTestCtx; +use crate::tmk_assert; + +pub fn read_assembly_output(target: u64) -> usize { + unsafe { + let target_ptr = target as *const u8; + let code_bytes = core::slice::from_raw_parts(target_ptr, 0x100); + let mut decoder = iced_x86::Decoder::with_ip(64, code_bytes, target, DecoderOptions::NONE); + + let mut formatter = NasmFormatter::new(); + let mut output = String::new(); + let mut first_ip_len = 0; + let mut set = false; + while decoder.can_decode() { + let instr = decoder.decode(); + if !set { + first_ip_len = instr.len(); + set = true; + } + formatter.format(&instr, &mut output); + log::info!("{}:{}", instr.ip(), output); + output.clear(); + } + + first_ip_len + } +} + +pub fn exec(ctx: &mut T) +where + T: InterruptPlatformTrait + + SecureInterceptPlatformTrait + + VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + let mut _tpm = Tpm::new(); + let protocol_version = Tpm::get_tcg_protocol_version(); + log::warn!("TPM protocol version: 0x{:x}", protocol_version); + // SAFETY: asuming that memory range is limited to 4GB (addressable by 32-bit) + // let tpm_layout = Layout::from_size_align(4096 * 2, 4096); + // tmk_assert!(tpm_layout.is_ok(), "TPM layout is allocated as expected"); + // let tpm_layout = tpm_layout.unwrap(); + // let tpm_ptr = unsafe { alloc(tpm_layout) }; + + // let tpm_gpa = tpm_ptr as u64; + // tmk_assert!( + // tpm_gpa >> 32 == 0, + // "TPM layout is allocated in the first 4GB" + // ); + + // let tpm_gpa = tpm_gpa as u32; + + // let set_tpm_gpa = Tpm::map_shared_memory(tpm_gpa); + // tmk_assert!( + // set_tpm_gpa == tpm_gpa, + // format!( + // "TPM layout is mapped as expected, tpm_gpa: 0x{:x}, set_tpm_gpa: 0x{:x}", + // tpm_gpa, set_tpm_gpa + // ) + // ); + + let tpm_gpa = Tpm::get_mapped_shared_memory(); + log::warn!("TPM CMD buffer from vTPM Device: 0x{:x}", tpm_gpa); + let tpm_ptr = (tpm_gpa as u64) as *mut u8; + + // build slice from pointer + let tpm_command = unsafe { core::slice::from_raw_parts_mut(tpm_ptr, 4096) }; + let tpm_response = unsafe { core::slice::from_raw_parts_mut(tpm_ptr.add(4096), 4096) }; + + _tpm.set_command_buffer(tpm_command); + _tpm.set_response_buffer(tpm_response); + + let result = _tpm.self_test(); + + log::warn!("TPM self test result: {:?}", result); + tmk_assert!(result.is_ok(), "TPM self test is successful"); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 8"); + let r = ctx.setup_interrupt_handler(); + tmk_assert!(r.is_ok(), "setup_interrupt_handler should succeed"); + log::info!("set intercept handler successfully!"); + let r = ctx.setup_partition_vtl(Vtl::Vtl1); + tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); + + let response_rage = Range { + start: tpm_gpa as u64 + 4096, + end: tpm_gpa as u64 + 4096 * 2, + }; + + let _r = ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + let r = ctx.setup_secure_intercept(0x30); + tmk_assert!(r.is_ok(), "setup_secure_intercept should succeed"); + + let r = ctx.set_interrupt_idx(0x30, move || { + log::info!("interrupt fired!"); + let mut hv = HvTestCtx::new(); + // expected to get interrupt in VTL1. + // CVMs dont support hypercalls to get the current VTL from VTL1/0. + _ = hv.init(Vtl::Vtl1); + log::info!( + "current vp from interrupt: {}", + hv.get_current_vp().unwrap() + ); + + let rip = HvX64RegisterName::Rip.0; + + let reg = hv.get_vp_state_with_vtl(rip, Vtl::Vtl0); + tmk_assert!(reg.is_ok(), "get_vp_state_with_vtl should succeed"); + + let reg = reg.unwrap(); + log::info!("rip from vtl0: 0x{:x}", reg); + + log::info!("pring assembly for the current RIP:"); + let size = read_assembly_output(reg); + + let new_rip_value = reg + size as u64; + + log::info!("pring assembly for the updated RIP:"); + read_assembly_output(new_rip_value); + + let r = hv.set_vp_state_with_vtl(HvX64RegisterName::Rip.0, new_rip_value, Vtl::Vtl0); + tmk_assert!(r.is_ok(), "set_vp_state_with_vtl should succeed"); + + let reg = hv.get_vp_state_with_vtl(rip, Vtl::Vtl0); + tmk_assert!(reg.is_ok(), "get_vp_state_with_vtl should succeed"); + + let reg = reg.unwrap(); + log::info!("rip from vtl0 after modification: 0x{:x}", reg); + tmk_assert!(reg == new_rip_value, "rip should be modified"); + + log::info!("pring assembly for the updated RIP after fetch:"); + read_assembly_output(reg); + + log::info!("interrupt handled!"); + hv.print_rbp(); + }); + tmk_assert!(r.is_ok(), "set_interrupt_idx should succeed"); + + let r = ctx.setup_vtl_protection(); + tmk_assert!(r.is_ok(), "setup_vtl_protection should succeed"); + + log::info!("enabled vtl protections for the partition."); + + let r = ctx.apply_vtl_protection_for_memory(response_rage, Vtl::Vtl1); + tmk_assert!(r.is_ok(), "apply_vtl_protection_for_memory should succeed"); + + log::info!("moving to vtl0 to attempt to read the heap memory"); + + ctx.switch_to_low_vtl(); + })); + + let cmd = TpmUtil::get_self_test_cmd(); + _tpm.copy_to_command_buffer(&cmd); + log::warn!("TPM self test command copied to buffer"); + log::warn!("about to execute TPM self test command.."); + Tpm::execute_command(); + log::warn!("TPM self test command executed"); + + loop {} +} diff --git a/opentmk/opentmk/src/tests/hv_tpm_read_cvm.rs b/opentmk/opentmk/src/tests/hv_tpm_read_cvm.rs new file mode 100644 index 0000000000..f8c27baff3 --- /dev/null +++ b/opentmk/opentmk/src/tests/hv_tpm_read_cvm.rs @@ -0,0 +1,88 @@ +use core::ops::Range; + +use hvdef::Vtl; + +use crate::arch::tpm::Tpm; +use crate::context::InterruptPlatformTrait; +use crate::context::SecureInterceptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecutor; +use crate::context::VtlPlatformTrait; +use crate::devices::tpm::TpmUtil; +use crate::tmk_assert; + +pub fn exec(ctx: &mut T) +where + T: InterruptPlatformTrait + + SecureInterceptPlatformTrait + + VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + let mut _tpm = Tpm::new(); + let protocol_version = Tpm::get_tcg_protocol_version(); + log::warn!("TPM protocol version: 0x{:x}", protocol_version); + + let tpm_gpa = Tpm::get_mapped_shared_memory(); + log::warn!("TPM CMD buffer from vTPM Device: 0x{:x}", tpm_gpa); + let tpm_ptr = (tpm_gpa as u64) as *mut u8; + + // build slice from pointer + let tpm_command = unsafe { core::slice::from_raw_parts_mut(tpm_ptr, 4096) }; + let tpm_response = unsafe { core::slice::from_raw_parts_mut(tpm_ptr.add(4096), 4096) }; + + _tpm.set_command_buffer(tpm_command); + _tpm.set_response_buffer(tpm_response); + + let result = _tpm.self_test(); + + log::warn!("TPM self test result: {:?}", result); + tmk_assert!(result.is_ok(), "TPM self test is successful"); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 8"); + let r = ctx.setup_interrupt_handler(); + tmk_assert!(r.is_ok(), "setup_interrupt_handler should succeed"); + log::info!("set intercept handler successfully!"); + let r = ctx.setup_partition_vtl(Vtl::Vtl1); + tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); + + let command_range = Range { + start: tpm_gpa as u64, + end: tpm_gpa as u64 + 4096, + }; + + let _r = ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + let r = ctx.setup_secure_intercept(0x30); + tmk_assert!(r.is_ok(), "setup_secure_intercept should succeed"); + + let r = ctx.setup_vtl_protection(); + tmk_assert!(r.is_ok(), "setup_vtl_protection should succeed"); + log::info!("enabled vtl protections for the partition."); + ctx.switch_to_low_vtl(); + })); + + let r = ctx.set_interrupt_idx(18, || { + log::warn!("successfully intercepted interrupt 18"); + panic!("MC should cause a system abort"); + }); + tmk_assert!(r.is_ok(), "set_interrupt_idx should succeed"); + + let cmd = TpmUtil::get_self_test_cmd(); + _tpm.copy_to_command_buffer(&cmd); + log::warn!("TPM self test command copied to buffer"); + + let r = ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + let r = ctx.apply_vtl_protection_for_memory(command_range, Vtl::Vtl1); + tmk_assert!(r.is_ok(), "apply_vtl_protection_for_memory should succeed"); + + ctx.switch_to_low_vtl(); + })); + tmk_assert!(r.is_ok(), "start_on_vp should succeed"); + + log::warn!("about to execute TPM self test command.."); + Tpm::execute_command(); + log::warn!("TPM self test command executed"); +} diff --git a/opentmk/opentmk/src/tests/hv_tpm_write_cvm.rs b/opentmk/opentmk/src/tests/hv_tpm_write_cvm.rs new file mode 100644 index 0000000000..c659c2c0b8 --- /dev/null +++ b/opentmk/opentmk/src/tests/hv_tpm_write_cvm.rs @@ -0,0 +1,86 @@ +use core::ops::Range; + +use hvdef::Vtl; + +use crate::arch::tpm::Tpm; +use crate::context::InterruptPlatformTrait; +use crate::context::SecureInterceptPlatformTrait; +use crate::context::VirtualProcessorPlatformTrait; +use crate::context::VpExecutor; +use crate::context::VtlPlatformTrait; +use crate::devices::tpm::TpmUtil; +use crate::tmk_assert; + +pub fn exec(ctx: &mut T) +where + T: InterruptPlatformTrait + + SecureInterceptPlatformTrait + + VtlPlatformTrait + + VirtualProcessorPlatformTrait, +{ + let mut _tpm = Tpm::new(); + let protocol_version = Tpm::get_tcg_protocol_version(); + log::warn!("TPM protocol version: 0x{:x}", protocol_version); + + let tpm_gpa = Tpm::get_mapped_shared_memory(); + log::warn!("TPM CMD buffer from vTPM Device: 0x{:x}", tpm_gpa); + let tpm_ptr = (tpm_gpa as u64) as *mut u8; + + // build slice from pointer + let tpm_command = unsafe { core::slice::from_raw_parts_mut(tpm_ptr, 4096) }; + let tpm_response = unsafe { core::slice::from_raw_parts_mut(tpm_ptr.add(4096), 4096) }; + + _tpm.set_command_buffer(tpm_command); + _tpm.set_response_buffer(tpm_response); + + let result = _tpm.self_test(); + + log::warn!("TPM self test result: {:?}", result); + tmk_assert!(result.is_ok(), "TPM self test is successful"); + + let vp_count = ctx.get_vp_count(); + tmk_assert!(vp_count.is_ok(), "get_vp_count should succeed"); + let vp_count = vp_count.unwrap(); + tmk_assert!(vp_count == 4, "vp count should be 8"); + let r = ctx.setup_interrupt_handler(); + tmk_assert!(r.is_ok(), "setup_interrupt_handler should succeed"); + log::info!("set intercept handler successfully!"); + let r = ctx.setup_partition_vtl(Vtl::Vtl1); + tmk_assert!(r.is_ok(), "setup_partition_vtl should succeed"); + + let response_rage = Range { + start: tpm_gpa as u64 + 4096, + end: tpm_gpa as u64 + 4096 * 2, + }; + + let _r = ctx.start_on_vp(VpExecutor::new(0, Vtl::Vtl1).command(move |ctx: &mut T| { + log::info!("successfully started running VTL1 on vp0."); + let r = ctx.setup_secure_intercept(0x30); + tmk_assert!(r.is_ok(), "setup_secure_intercept should succeed"); + + let r = ctx.setup_vtl_protection(); + tmk_assert!(r.is_ok(), "setup_vtl_protection should succeed"); + + log::info!("enabled vtl protections for the partition."); + + let r = ctx.apply_vtl_protection_for_memory(response_rage, Vtl::Vtl1); + tmk_assert!(r.is_ok(), "apply_vtl_protection_for_memory should succeed"); + + log::info!("moving to vtl0 to attempt to read the heap memory"); + + ctx.switch_to_low_vtl(); + })); + + let r = ctx.set_interrupt_idx(18, || { + log::warn!("successfully intercepted interrupt 18"); + panic!("MC should cause a system abort"); + }); + tmk_assert!(r.is_ok(), "set_interrupt_idx should succeed"); + + let cmd = TpmUtil::get_self_test_cmd(); + _tpm.copy_to_command_buffer(&cmd); + log::warn!("TPM self test command copied to buffer"); + log::warn!("about to execute TPM self test command.."); + Tpm::execute_command(); + log::warn!("TPM self test command executed"); +} diff --git a/opentmk/opentmk/src/tests/mod.rs b/opentmk/opentmk/src/tests/mod.rs new file mode 100644 index 0000000000..b4b70472c8 --- /dev/null +++ b/opentmk/opentmk/src/tests/mod.rs @@ -0,0 +1,16 @@ +#![expect(dead_code)] +use crate::platform::hypvctx::HvTestCtx; + +mod hv_cvm_mem_protect; +mod hv_error_vp_start; +mod hv_misc; +mod hv_processor; +mod hv_tpm; +mod hv_tpm_read_cvm; +mod hv_tpm_write_cvm; + +pub fn run_test() { + let mut ctx = HvTestCtx::new(); + ctx.init(hvdef::Vtl::Vtl0).expect("failed to init on BSP"); + hv_tpm_read_cvm::exec(&mut ctx); +} diff --git a/opentmk/opentmk/src/tmk_assert.rs b/opentmk/opentmk/src/tmk_assert.rs new file mode 100644 index 0000000000..04cf9544f6 --- /dev/null +++ b/opentmk/opentmk/src/tmk_assert.rs @@ -0,0 +1,84 @@ +use alloc::string::String; +use alloc::string::ToString; +use core::fmt::Write; + +use serde::Serialize; + +#[derive(Serialize)] +struct AssertJson<'a, T> +where + T: Serialize, +{ + #[serde(rename = "type")] + type_: &'a str, + level: &'a str, + message: &'a str, + line: String, + assertion_result: bool, + testname: &'a T, +} + +impl<'a, T> AssertJson<'a, T> +where + T: Serialize, +{ + fn new( + type_: &'a str, + level: &'a str, + message: &'a str, + line: String, + assertion_result: bool, + testname: &'a T, + ) -> Self { + Self { + type_, + level, + message, + line, + assertion_result, + testname, + } + } +} + +pub(crate) fn format_assert_json_string( + s: &str, + terminate_new_line: bool, + line: String, + assert_result: bool, + testname: &T, +) -> String +where + T: Serialize, +{ + let assert_json = AssertJson::new("assert", "WARN", s, line, assert_result, testname); + + let out = serde_json::to_string(&assert_json).expect("Failed to serialize assert JSON"); + let mut out = out.to_string(); + if terminate_new_line { + out.push('\n'); + } + out +} + +pub(crate) fn write_str(s: &str) { + _ = crate::tmk_logger::LOGGER.get_writter().write_str(s); +} + +#[macro_export] +macro_rules! tmk_assert { + ($condition:expr, $message:expr) => {{ + let file = core::file!(); + let line = line!(); + let file_line = format!("{}:{}", file, line); + let expn = stringify!($condition); + let result: bool = $condition; + let js = $crate::tmk_assert::format_assert_json_string( + &expn, true, file_line, result, &$message, + ); + $crate::tmk_assert::write_str(&js); + if !result { + panic!("Assertion failed: {}", $message); + } + }}; +} diff --git a/opentmk/opentmk/src/tmk_logger.rs b/opentmk/opentmk/src/tmk_logger.rs new file mode 100644 index 0000000000..e9c662fc8f --- /dev/null +++ b/opentmk/opentmk/src/tmk_logger.rs @@ -0,0 +1,101 @@ +use alloc::fmt::format; +use alloc::string::String; +use alloc::string::ToString; +use core::fmt::Write; + +use anyhow::Result; +use log::SetLoggerError; +use serde::Serialize; +use sync_nostd::Mutex; +use sync_nostd::MutexGuard; + +use crate::arch::serial::InstrIoAccess; +use crate::arch::serial::Serial; +use crate::arch::serial::SerialPort; + +#[derive(Serialize)] +struct LogEntry { + #[serde(rename = "type")] + log_type: &'static str, + level: String, + message: String, + line: String, +} + +impl LogEntry { + fn new(level: log::Level, message: &String, line: &String) -> Self { + LogEntry { + log_type: "log", + level: level.as_str().to_string(), + message: message.clone(), + line: line.clone(), + } + } +} + +pub(crate) fn format_log_string_to_json( + message: &String, + line: &String, + terminate_new_line: bool, + level: log::Level, +) -> String { + let log_entry = LogEntry::new(level, message, line); + let out = serde_json::to_string(&log_entry).unwrap(); + let mut out = out.to_string(); + if terminate_new_line { + out.push('\n'); + } + out +} + +pub struct TmkLogger { + pub writter: T, +} + +impl TmkLogger> +where + T: Write + Send, +{ + pub const fn new(provider: T) -> Self { + TmkLogger { + writter: Mutex::new(provider), + } + } + + pub fn get_writter(&self) -> MutexGuard<'_, T> + where + T: Write + Send, + { + self.writter.lock() + } +} + +impl log::Log for TmkLogger> +where + T: Write + Send, +{ + fn enabled(&self, _metadata: &log::Metadata<'_>) -> bool { + true + } + + fn log(&self, record: &log::Record<'_>) { + let str = format(*record.args()); + let line = format!( + "{}:{}", + record.file().unwrap_or_default(), + record.line().unwrap_or_default() + ); + let str = format_log_string_to_json(&str, &line, true, record.level()); + _ = self.writter.lock().write_str(str.as_str()); + } + + fn flush(&self) {} +} + +type SerialPortWriter = Serial; +pub static LOGGER: TmkLogger> = + TmkLogger::new(SerialPortWriter::new(SerialPort::COM2, InstrIoAccess)); + +pub fn init() -> Result<(), SetLoggerError> { + log::set_logger(&LOGGER).map(|()| log::set_max_level(log::LevelFilter::Debug)) +} diff --git a/opentmk/opentmk/src/tmkdefs.rs b/opentmk/opentmk/src/tmkdefs.rs new file mode 100644 index 0000000000..4bf42b1510 --- /dev/null +++ b/opentmk/opentmk/src/tmkdefs.rs @@ -0,0 +1,93 @@ +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum TmkErrorType { + AllocationFailed, + InvalidParameter, + EnableVtlFailed, + SetDefaultCtxFailed, + StartVpFailed, + QueueCommandFailed, + SetupVtlProtectionFailed, + SetupPartitionVtlFailed, + SetupInterruptHandlerFailed, + SetInterruptIdxFailed, + SetupSecureInterceptFailed, + ApplyVtlProtectionForMemoryFailed, + ReadMsrFailed, + WriteMsrFailed, + GetRegisterFailed, + InvalidHypercallCode, + InvalidHypercallInput, + InvalidAlignment, + AccessDenied, + InvalidPartitionState, + OperationDenied, + UnknownProperty, + PropertyValueOutOfRange, + InsufficientMemory, + PartitionTooDeep, + InvalidPartitionId, + InvalidVpIndex, + NotFound, + InvalidPortId, + InvalidConnectionId, + InsufficientBuffers, + NotAcknowledged, + InvalidVpState, + Acknowledged, + InvalidSaveRestoreState, + InvalidSynicState, + ObjectInUse, + InvalidProximityDomainInfo, + NoData, + Inactive, + NoResources, + FeatureUnavailable, + PartialPacket, + ProcessorFeatureNotSupported, + ProcessorCacheLineFlushSizeIncompatible, + InsufficientBuffer, + IncompatibleProcessor, + InsufficientDeviceDomains, + CpuidFeatureValidationError, + CpuidXsaveFeatureValidationError, + ProcessorStartupTimeout, + SmxEnabled, + InvalidLpIndex, + InvalidRegisterValue, + InvalidVtlState, + NxNotDetected, + InvalidDeviceId, + InvalidDeviceState, + PendingPageRequests, + PageRequestInvalid, + KeyAlreadyExists, + DeviceAlreadyInDomain, + InvalidCpuGroupId, + InvalidCpuGroupState, + OperationFailed, + NotAllowedWithNestedVirtActive, + InsufficientRootMemory, + EventBufferAlreadyFreed, + Timeout, + VtlAlreadyEnabled, + UnknownRegisterName, +} + +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub struct TmkError(pub TmkErrorType); + +pub type TmkResult = Result; + +impl core::error::Error for TmkError {} + +impl core::fmt::Display for TmkError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "TmkError({:?})", self.0) + } +} + +impl From for TmkError { + fn from(e: TmkErrorType) -> Self { + TmkError(e) + } +} diff --git a/opentmk/opentmk/src/uefi/alloc.rs b/opentmk/opentmk/src/uefi/alloc.rs new file mode 100644 index 0000000000..356b390278 --- /dev/null +++ b/opentmk/opentmk/src/uefi/alloc.rs @@ -0,0 +1,101 @@ +use core::alloc::GlobalAlloc; +use core::cell::RefCell; + +use linked_list_allocator::LockedHeap; +use sync_nostd::Mutex; +use uefi::allocator::Allocator; +use uefi::boot::AllocateType; +use uefi::boot::MemoryType; +use uefi::boot::{self}; + +pub const SIZE_1MB: usize = 1024 * 1024; +const PAGE_SIZE: usize = 4096; + +#[global_allocator] +pub static ALLOCATOR: MemoryAllocator = MemoryAllocator { + use_locked_heap: Mutex::new(RefCell::new(false)), + locked_heap: LockedHeap::empty(), + uefi_allocator: Allocator {}, +}; + +pub struct MemoryAllocator { + use_locked_heap: Mutex>, + locked_heap: LockedHeap, + uefi_allocator: Allocator, +} + +#[expect(unsafe_code)] +unsafe impl GlobalAlloc for MemoryAllocator { + #[allow(unsafe_code)] + unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 { + if *self.use_locked_heap.lock().borrow() { + unsafe { self.locked_heap.alloc(layout) } + } else { + unsafe { self.uefi_allocator.alloc(layout) } + } + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) { + if *self.use_locked_heap.lock().borrow() { + unsafe { self.locked_heap.dealloc(ptr, layout) } + } else { + unsafe { self.uefi_allocator.dealloc(ptr, layout) } + } + } + + unsafe fn alloc_zeroed(&self, layout: core::alloc::Layout) -> *mut u8 { + if *self.use_locked_heap.lock().borrow() { + unsafe { self.locked_heap.alloc_zeroed(layout) } + } else { + unsafe { self.uefi_allocator.alloc_zeroed(layout) } + } + } + + unsafe fn realloc( + &self, + ptr: *mut u8, + layout: core::alloc::Layout, + new_size: usize, + ) -> *mut u8 { + if *self.use_locked_heap.lock().borrow() { + unsafe { self.locked_heap.realloc(ptr, layout, new_size) } + } else { + unsafe { self.uefi_allocator.realloc(ptr, layout, new_size) } + } + } +} + +impl MemoryAllocator { + pub fn init(&self, size: usize) -> bool { + let pages = ((SIZE_1MB * size) / 4096) + 1; + let size = pages * 4096; + let mem: Result, uefi::Error> = boot::allocate_pages( + AllocateType::AnyPages, + MemoryType::BOOT_SERVICES_DATA, + pages, + ); + if mem.is_err() { + return false; + } + let ptr = mem.unwrap().as_ptr(); + unsafe { + self.locked_heap.lock().init(ptr, size); + } + *self.use_locked_heap.lock().borrow_mut() = true; + true + } + + #[allow(dead_code)] + pub fn get_page_alligned_memory(&self, size: usize) -> *mut u8 { + let pages = ((SIZE_1MB * size) / PAGE_SIZE) + 1; + let mem: Result, uefi::Error> = boot::allocate_pages( + AllocateType::AnyPages, + MemoryType::BOOT_SERVICES_DATA, + pages, + ); + if mem.is_err() { + return core::ptr::null_mut(); + } + mem.unwrap().as_ptr() + } +} diff --git a/opentmk/opentmk/src/uefi/init.rs b/opentmk/opentmk/src/uefi/init.rs new file mode 100644 index 0000000000..292dacd159 --- /dev/null +++ b/opentmk/opentmk/src/uefi/init.rs @@ -0,0 +1,60 @@ +use uefi::boot::exit_boot_services; +use uefi::boot::MemoryType; +use uefi::guid; +use uefi::CStr16; +use uefi::Status; + +use super::alloc::ALLOCATOR; + +const EFI_GUID: uefi::Guid = guid!("610b9e98-c6f6-47f8-8b47-2d2da0d52a91"); +const OS_LOADER_INDICATIONS: &str = "OsLoaderIndications"; + +fn enable_uefi_vtl_protection() { + let mut buf = vec![0u8; 1024]; + let mut str_buff = vec![0u16; 1024]; + let os_loader_indications_key = + CStr16::from_str_with_buf(OS_LOADER_INDICATIONS, str_buff.as_mut_slice()).unwrap(); + + let os_loader_indications_result = uefi::runtime::get_variable( + os_loader_indications_key, + &uefi::runtime::VariableVendor(EFI_GUID), + buf.as_mut(), + ) + .expect("Failed to get OsLoaderIndications"); + + let mut os_loader_indications = u32::from_le_bytes( + os_loader_indications_result.0[0..4] + .try_into() + .expect("error in output"), + ); + os_loader_indications |= 0x1u32; + + let os_loader_indications = os_loader_indications.to_le_bytes(); + + uefi::runtime::set_variable( + os_loader_indications_key, + &uefi::runtime::VariableVendor(EFI_GUID), + os_loader_indications_result.1, + &os_loader_indications, + ) + .expect("Failed to set OsLoaderIndications"); + + let _os_loader_indications_result = uefi::runtime::get_variable( + os_loader_indications_key, + &uefi::runtime::VariableVendor(EFI_GUID), + buf.as_mut(), + ) + .expect("Failed to get OsLoaderIndications"); + + let _memory_map = unsafe { exit_boot_services(MemoryType::BOOT_SERVICES_DATA) }; +} + +pub fn init() -> Result<(), Status> { + let r: bool = ALLOCATOR.init(2048); + if !r { + return Err(Status::ABORTED); + } + crate::tmk_logger::init().expect("Failed to init logger"); + enable_uefi_vtl_protection(); + Ok(()) +} diff --git a/opentmk/opentmk/src/uefi/mod.rs b/opentmk/opentmk/src/uefi/mod.rs new file mode 100644 index 0000000000..cbd6b96d92 --- /dev/null +++ b/opentmk/opentmk/src/uefi/mod.rs @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +mod alloc; +pub mod init; +mod rt; + +use init::init; +use uefi::entry; +use uefi::Status; + +use crate::tmk_assert; + +#[entry] +fn uefi_main() -> Status { + let r = init(); + tmk_assert!(r.is_ok(), "init should succeed"); + + log::warn!("TEST_START"); + crate::tests::run_test(); + log::warn!("TEST_END"); + loop {} +} diff --git a/opentmk/opentmk/src/uefi/rt.rs b/opentmk/opentmk/src/uefi/rt.rs new file mode 100644 index 0000000000..d115f97a88 --- /dev/null +++ b/opentmk/opentmk/src/uefi/rt.rs @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! Runtime support for the UEFI application environment. + +#![cfg(target_os = "uefi")] + +#[panic_handler] +fn panic_handler(panic: &core::panic::PanicInfo<'_>) -> ! { + log::error!("Panic at runtime: {}", panic); + log::warn!("TEST_END"); + loop {} +} diff --git a/opentmk/sync/Cargo.toml b/opentmk/sync/Cargo.toml new file mode 100644 index 0000000000..53f9ba2ad6 --- /dev/null +++ b/opentmk/sync/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "sync_nostd" +version = "0.1.0" +rust-version.workspace = true +edition.workspace = true + +[dependencies] +spin.workspace = true + + +[lints] +workspace = true diff --git a/opentmk/sync/src/lib.rs b/opentmk/sync/src/lib.rs new file mode 100644 index 0000000000..30e6ae2777 --- /dev/null +++ b/opentmk/sync/src/lib.rs @@ -0,0 +1,333 @@ +#![no_std] +#![allow(unsafe_code)] +extern crate alloc; +use core::sync::atomic::{AtomicUsize, Ordering}; +pub use spin::{Mutex, MutexGuard}; +use alloc::{sync::Arc, vec::Vec}; +use alloc::collections::VecDeque; +use core::error::Error; +use core::fmt; + +/// An unbounded channel implementation with priority send capability. +/// This implementation works in no_std environments using spin-rs. +/// It uses a VecDeque as the underlying buffer and ensures non-blocking operations. +pub struct Channel { + inner: Arc>, +} + +/// The inner data structure holding the channel state +struct ChannelInner { + /// The internal buffer using a VecDeque protected by its own mutex + buffer: Mutex>, + + /// Number of active senders + senders: AtomicUsize, + + /// Number of active receivers + receivers: AtomicUsize, +} + +// SAFETY: ChannelInner is safe to share across threads as it uses atomic operations for senders and receivers counts +unsafe impl Send for ChannelInner {} +// SAFETY: ChannelInner is safe to used across threads as it uses atomic operations for senders and receivers counts +unsafe impl Sync for ChannelInner {} + +/// Error type for sending operations +#[derive(Debug, Eq, PartialEq)] +pub enum SendError { + /// All receivers have been dropped + Disconnected(T), +} + +impl fmt::Display for SendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SendError::Disconnected(_) => write!(f, "send failed because receiver is disconnected"), + } + } +} + +impl Error for SendError {} + +/// Error type for receiving operations +#[derive(Debug, Eq, PartialEq)] +pub enum RecvError { + /// Channel is empty + Empty, + /// All senders have been dropped + Disconnected, +} + +impl fmt::Display for RecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RecvError::Empty => write!(f, "receive failed because channel is empty"), + RecvError::Disconnected => write!(f, "receive failed because sender is disconnected"), + } + } +} + +impl Error for RecvError {} + +/// Sender half of the channel +pub struct Sender { + inner: Arc>, +} + +/// Receiver half of the channel +pub struct Receiver { + inner: Arc>, +} + +// implement clone for Sender +impl Clone for Sender { + fn clone(&self) -> Self { + self.inner.senders.fetch_add(1, Ordering::SeqCst); + Sender { + inner: self.inner.clone(), + } + } +} + +// implement clone for Receiver +impl Clone for Receiver { + fn clone(&self) -> Self { + self.inner.receivers.fetch_add(1, Ordering::SeqCst); + Receiver { + inner: self.inner.clone(), + } + } +} + +impl Channel { + /// Creates a new unbounded channel + pub fn new() -> Self { + let inner = Arc::new(ChannelInner { + buffer: Mutex::new(VecDeque::new()), + senders: AtomicUsize::new(1), // Start with one sender + receivers: AtomicUsize::new(1), // Start with one receiver + }); + + Self { inner } + } + + /// Splits the channel into a sender and receiver pair + pub fn split(self) -> (Sender, Receiver) { + let sender = Sender { + inner: self.inner.clone(), + }; + + let receiver = Receiver { + inner: self.inner, + }; + + (sender, receiver) + } + + /// Returns the current number of elements in the channel + pub fn len(&self) -> usize { + self.inner.buffer.lock().len() + } + + /// Returns true if the channel is empty + pub fn is_empty(&self) -> bool { + self.inner.buffer.lock().is_empty() + } +} + +impl Sender { + /// Sends an element to the back of the queue + /// Returns Ok(()) if successful, Err(SendError) if all receivers have been dropped + pub fn send(&self, value: T) -> Result<(), SendError> { + // Check if there are any receivers left + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected(value)); + } + + // Lock the buffer - only locked during the actual send operation + let mut buffer = self.inner.buffer.lock(); + + // Check again after locking + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected(value)); + } + + // Push to the back of the queue - can't fail since we're unbounded + buffer.push_back(value); + + Ok(()) + } + + /// Sends an element to the front of the queue (highest priority) + /// Returns Ok(()) if successful, Err(SendError) if all receivers have been dropped + pub fn send_priority(&self, value: T) -> Result<(), SendError> { + // Check if there are any receivers left + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected(value)); + } + + // Lock the buffer - only locked during the actual send operation + let mut buffer = self.inner.buffer.lock(); + + // Check again after locking + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return Err(SendError::Disconnected(value)); + } + + // Push to the front of the queue - can't fail since we're unbounded + buffer.push_front(value); + + Ok(()) + } + + /// Send a batch of elements at once + /// Returns the number of elements successfully sent (all of them, unless disconnected) + pub fn send_batch(&self, items: I) -> usize + where + I: IntoIterator, + { + // Check if there are any receivers left + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return 0; + } + + // Lock the buffer once for the entire batch + let mut buffer = self.inner.buffer.lock(); + + // Check again after locking + if self.inner.receivers.load(Ordering::SeqCst) == 0 { + return 0; + } + + let mut count = 0; + + // Push each item to the back of the queue + for item in items { + buffer.push_back(item); + count += 1; + } + + count + } + + /// Returns the current number of elements in the channel + pub fn len(&self) -> usize { + self.inner.buffer.lock().len() + } + + /// Returns true if the channel is empty + pub fn is_empty(&self) -> bool { + self.inner.buffer.lock().is_empty() + } +} + +impl Receiver { + /// Tries to receive an element from the front of the queue while blocking + /// Returns Ok(value) if successful, Err(RecvError) otherwise + pub fn recv(&self) -> Result { + loop { + match self.try_recv() { + Ok(value) => return Ok(value), + Err(RecvError::Empty) => { + // Yield to the scheduler and try again + }, + Err(err) => return Err(err), + } + } + } + + /// Tries to receive an element from the front of the queue without blocking + /// Returns Ok(value) if successful, Err(RecvError) otherwise + pub fn try_recv(&self) -> Result { + // Use a separate scope for the lock to ensure it's released promptly + let result = { + let mut buffer = self.inner.buffer.lock(); + buffer.pop_front() + }; + + match result { + Some(val) => Ok(val), + None => { + // Check if there are any senders left + if self.inner.senders.load(Ordering::SeqCst) == 0 { + Err(RecvError::Disconnected) + } else { + Err(RecvError::Empty) + } + } + } + } + + + /// Tries to receive multiple elements at once, up to the specified limit + /// Returns a vector of received elements + pub fn recv_batch(&self, max_items: usize) -> Vec + where + T: Send, + { + // If max_items is 0, return an empty vector + if max_items == 0 { + return Vec::new(); + } + + let mut items = Vec::new(); + + // Lock the buffer once for the entire batch + let mut buffer = self.inner.buffer.lock(); + + // Calculate how many items to take + let count = max_items.min(buffer.len()); + + // Reserve capacity for efficiency + items.reserve(count); + + // Take items from the front of the queue + for _ in 0..count { + if let Some(item) = buffer.pop_front() { + items.push(item); + } else { + // This shouldn't happen due to the min() above, but just in case + break; + } + } + + items + } + + /// Peeks at the next element without removing it + pub fn peek(&self) -> Option + where + T: Clone, + { + let buffer = self.inner.buffer.lock(); + buffer.front().cloned() + } + + /// Returns the current number of elements in the channel + pub fn len(&self) -> usize { + self.inner.buffer.lock().len() + } + + /// Returns true if the channel is empty + pub fn is_empty(&self) -> bool { + self.inner.buffer.lock().is_empty() + } +} + +impl Drop for Sender { + fn drop(&mut self) { + self.inner.senders.fetch_sub(1, Ordering::SeqCst); + } +} + +impl Drop for Receiver { + fn drop(&mut self) { + self.inner.receivers.fetch_sub(1, Ordering::SeqCst); + } +} + +impl Default for Channel { + fn default() -> Self { + Self::new() + } +} \ No newline at end of file