commit 675082592540bbadd50b6ba796c19110e299b9c6
parent 1b56a12cf8851ca6118061624edd5fcd00754674
Author: Sylvia Ivory <git@sivory.net>
Date: Fri, 27 Feb 2026 19:19:38 -0800
Pinned VM basic
Diffstat:
7 files changed, 968 insertions(+), 9 deletions(-)
diff --git a/build.zig b/build.zig
@@ -115,11 +115,11 @@ fn build_program(b: *std.Build, target: std.Build.ResolvedTarget, optimize: std.
const path = try b.path("programs").getPath3(b, null).toString(b.allocator);
defer b.allocator.free(path);
- var dir = try std.Io.Dir.openDirAbsolute(b.graph.io, path, .{ .access_sub_paths = false, .iterate = true });
- defer dir.close(b.graph.io);
+ var dir = try std.fs.openDirAbsolute(path, .{ .access_sub_paths = false, .iterate = true });
+ defer dir.close();
var iter = dir.iterate();
- while (try iter.next(b.graph.io)) |entry| {
+ while (try iter.next()) |entry| {
if (entry.kind != .file) continue;
if (!std.mem.endsWith(u8, entry.name, ".zig")) continue;
const name = entry.name[0..(entry.name.len - 4)];
diff --git a/pi/interrupts.zig b/pi/interrupts.zig
@@ -96,7 +96,10 @@ pub inline fn pending_peripheral_interrupt(i: PeripheralsInterrupt) bool {
}
fn empty(regs: Registers) void {
- _ = regs;
+ const uart = @import("./devices/mini-uart.zig");
+
+ uart.print("unexpected interrupt\n", .{});
+ dump_registers(®s);
}
var reset_handler: *const fn (Registers) void = empty;
@@ -171,10 +174,6 @@ inline fn setup_reg(pc: u32, gp: *const [13]u32) Registers {
}
// I still hate this
-const stack_size = 256 * 16; // 16kb stacks
-pub export var exception_stack: [stack_size]usize align(8) = undefined;
-
-// Things were already cooked
export fn reset_stub(pc: u32, gp: *const [13]u32) void {
reset_handler(setup_reg(pc, gp));
}
@@ -210,7 +209,7 @@ fn create_trampoline(name: []const u8, offset: []const u8, ret: []const u8) []co
".global " ++ name ++ "\n" ++
".type " ++ name ++ ", %function\n" ++
name ++ ":\n" ++
- " ldr sp, =exception_stack\n" ++
+ " mov sp, 0x9000000\n" ++
" push {r0-r12, lr}\n" ++
" sub lr, lr, #" ++ offset ++ "\n" ++
" mov r0, lr\n" ++
diff --git a/pi/mmu.zig b/pi/mmu.zig
@@ -0,0 +1,549 @@
+pub const system = @import("./system.zig");
+pub const mem = @import("./mem.zig");
+
+pub const LockdownIndex = struct {
+ pub inline fn get() u3 {
+ return asm volatile ("MRC p15, 5, %[result], c15, c4, 2"
+ : [result] "=r" (-> u3),
+ );
+ }
+
+ pub inline fn set(index: u3) void {
+ return asm volatile ("MCR p15, 5, %[value], c15, c4, 2"
+ :
+ : [value] "r" (index),
+ );
+ }
+};
+
+pub const LockdownVA = packed struct(u32) {
+ pub const Scope = enum(u1) {
+ ApplicationSpecific = 0,
+ Global = 1,
+ };
+
+ // address space ID
+ asid: u8,
+ _reserved_8: u1,
+ scope: Scope,
+ _reserved_10_11: u2,
+ virtual_address: u20,
+
+ pub inline fn get() @This() {
+ return asm volatile ("MRC p15, 5, %[result], c15, c5, 2"
+ : [result] "=r" (-> @This()),
+ );
+ }
+
+ pub inline fn set(va: *const @This()) void {
+ return asm volatile ("MCR p15, 5, %[value], c15, c5, 2"
+ :
+ : [value] "r" (@as(u32, @bitCast(va.*))),
+ );
+ }
+};
+
+pub const AccessPermissions = enum(u2) {
+ TotalNoAccess = 0b00,
+ UserNoAccess = 0b01,
+ UserRead = 0b10,
+ UserReadWrite = 0b11,
+};
+
+pub const LockdownPA = packed struct(u32) {
+ pub const Size = enum(u2) {
+ @"16MB" = 0b00,
+ @"4KB" = 0b01,
+ @"64KB" = 0b10,
+ @"1MB" = 0b11,
+ };
+ pub const AccessPermissionsExtended = enum(u1) {
+ SupervisorRW = 0,
+ SupervisorRO = 1,
+ };
+ pub const Security = enum(u1) {
+ Secure = 0,
+ NonSecure = 1,
+ };
+
+ valid: bool,
+ ap0: AccessPermissions,
+ apx: AccessPermissionsExtended,
+ _reversed_5_4: u2,
+ size: Size,
+ nstid: Security,
+ nsa: Security,
+ _reserved_11_10: u2,
+ physical_address: u20,
+
+ pub inline fn get() @This() {
+ return asm volatile ("MRC p15, 5, %[result], c15, c6, 2"
+ : [result] "=r" (-> @This()),
+ );
+ }
+
+ pub inline fn set(pa: *const @This()) void {
+ return asm volatile ("MCR p15, 5, %[value], c15, c6, 2"
+ :
+ : [value] "r" (@as(u32, @bitCast(pa.*))),
+ );
+ }
+};
+
+pub const LockdownAttributesRegister = packed struct(u32) {
+ pub const PageTableEncodingPreset = enum(u5) {
+ StronglyOrdered = 0b000_0_0,
+ SharedDevice = 0b000_0_1,
+ OuterInnerWriteThrough = 0b000_1_0,
+ OuterInnerWriteBackNoAllocOnWrite = 0b000_1_1,
+ OuterInnerNoncacheable = 0b001_0_0,
+ OuterInnerWriteBackAllocOnWrite = 0b001_1_1,
+ NonSharedDevice = 0b010_0_0,
+ };
+
+ pub const PageTableEncodingCached = packed struct(u5) {
+ pub const CachePolicy = enum(u2) {
+ Noncacheable = 0b00,
+ WriteBackCachedWriteAlloc = 0b01,
+ WriteThroughCachedNoWriteAlloc = 0b10,
+ WriteBackCachedNoWriteAlloc = 0b11,
+ };
+
+ always_one: u1 = 1,
+ outer_policy: CachePolicy,
+ inner_policy: CachePolicy,
+ };
+
+ pub const PageTableEncodingManual = packed struct(u5) {
+ b: u1,
+ c: u1,
+ tex: u3,
+ };
+
+ pub const PageTableEncoding = packed union {
+ preset: PageTableEncodingPreset,
+ cached: PageTableEncodingCached,
+ manual: PageTableEncodingManual,
+ };
+
+ shared: bool,
+ page_table_encoding: PageTableEncoding,
+ execute_never: bool,
+ domain: u4,
+ _reserved_24_11: u14 = 0,
+ subpages_valid: bool,
+ ap1: AccessPermissions,
+ ap2: AccessPermissions,
+ ap3: AccessPermissions,
+
+ pub inline fn get() @This() {
+ return asm volatile ("MRC p15, 5, %[result], c15, c7, 2"
+ : [result] "=r" (-> @This()),
+ );
+ }
+
+ pub inline fn set(attr: *const @This()) void {
+ return asm volatile ("MCR p15, 5, %[value], c15, c7, 2"
+ :
+ : [value] "r" (@as(u32, @bitCast(attr.*))),
+ );
+ }
+};
+
+pub const DomainAccessControlRegister = packed struct(u32) {
+ pub const DomainAccess = enum(u2) {
+ NoAccess = 0b00,
+ Client = 0b01,
+ Manager = 0b11,
+ };
+
+ // Inconvenient but necessary to ensure ordering
+ d0: DomainAccess = .NoAccess,
+ d1: DomainAccess = .NoAccess,
+ d2: DomainAccess = .NoAccess,
+ d3: DomainAccess = .NoAccess,
+ d4: DomainAccess = .NoAccess,
+ d5: DomainAccess = .NoAccess,
+ d6: DomainAccess = .NoAccess,
+ d7: DomainAccess = .NoAccess,
+ d8: DomainAccess = .NoAccess,
+ d9: DomainAccess = .NoAccess,
+ d10: DomainAccess = .NoAccess,
+ d11: DomainAccess = .NoAccess,
+ d12: DomainAccess = .NoAccess,
+ d13: DomainAccess = .NoAccess,
+ d14: DomainAccess = .NoAccess,
+ d15: DomainAccess = .NoAccess,
+
+ pub inline fn get() @This() {
+ return asm volatile ("MRC p15, 0, %[result], c3, c0, 0"
+ : [result] "=r" (-> @This()),
+ );
+ }
+
+ pub inline fn set(domains: *const @This()) void {
+ return asm volatile ("MCR p15, 0, %[value], c3, c0, 0"
+ :
+ : [value] "r" (@as(u32, @bitCast(domains.*))),
+ );
+ }
+
+ pub inline fn set_all(access: DomainAccess) void {
+ const domains: @This() = .{
+ .d0 = access,
+ .d1 = access,
+ .d2 = access,
+ .d3 = access,
+ .d4 = access,
+ .d5 = access,
+ .d6 = access,
+ .d7 = access,
+ .d8 = access,
+ .d9 = access,
+ .d10 = access,
+ .d11 = access,
+ .d12 = access,
+ .d13 = access,
+ .d14 = access,
+ .d15 = access,
+ };
+
+ domains.set();
+ mem.barrier(.Write);
+ }
+};
+
+pub const TLBTypeRegister = packed struct(u32) {
+ unified: bool,
+ _reserved_7_1: u7,
+ data_lockable_size: u8,
+ instruction_lockable_size: u8,
+ _reserved_31_24: u8,
+
+ pub inline fn get() @This() {
+ return asm volatile ("MRC p15, 0, %[result], c0, c0, 3"
+ : [result] "=r" (-> @This()),
+ );
+ }
+
+ pub inline fn set(domains: *const @This()) void {
+ return asm volatile ("MCR p15, 0, %[value], c0, c0, 3"
+ :
+ : [value] "r" (@as(u32, @bitCast(domains.*))),
+ );
+ }
+};
+
+pub const OuterCacheableAttr = enum(u2) {
+ OuterNoncacheable = 0b00,
+ WriteBackWriteAlloc = 0b01,
+ WriteThrough = 0b10,
+ WriteBackNoWriteAlloc = 0b11,
+};
+
+pub const TranslationTableBaseRegister0 = packed struct(u32) {
+ // // This is pain
+ // pub const TranslationTableBase = packed union {
+ // @"16KB": packed struct {
+ // translation_table_base: u15,
+ // _reserved_13_5: u9,
+ // },
+ // @"8KB": packed struct {
+ // translation_table_base: u16,
+ // _reserved_12_5: u8,
+ // },
+ // @"4KB": packed struct {
+ // translation_table_base: u17,
+ // _reserved_11_5: u7,
+ // },
+ // @"2KB": packed struct {
+ // translation_table_base: u18,
+ // _reserved_10_5: u6,
+ // },
+ // @"1KB": packed struct {
+ // translation_table_base: u19,
+ // _reserved_9_5: u5,
+ // },
+ // @"512B": packed struct {
+ // translation_table_base: u20,
+ // _reserved_8_5: u4,
+ // },
+ // @"256B": packed struct {
+ // translation_table_base: u21,
+ // _reserved_7_5: u3,
+ // },
+ // @"128B": packed struct {
+ // translation_table_base: u22,
+ // _reserved_6_5: u2,
+ // },
+ // };
+
+ inner_cacheable: bool,
+ shared: bool,
+ ecc: bool,
+ outer_cacheable_attr: OuterCacheableAttr,
+ translation_table_base: u27,
+
+ pub inline fn get() @This() {
+ return asm volatile ("MRC p15, 0, %[result], c2, c0, 0"
+ : [result] "=r" (-> @This()),
+ );
+ }
+
+ pub inline fn set(ttbc: *const @This()) void {
+ return asm volatile ("MCR p15, 0, %[value], c2, c0, 0"
+ :
+ : [value] "r" (@as(u32, @bitCast(ttbc.*))),
+ );
+ }
+};
+
+pub const TranslationTableBaseRegister1 = packed struct(u32) {
+ inner_cacheable: bool,
+ shared: bool,
+ ecc: bool,
+ outer_cacheable_attr: OuterCacheableAttr,
+ // _reserved_13_5: u9,
+ translation_table_base: u27,
+
+ pub inline fn get() @This() {
+ return asm volatile ("MRC p15, 0, %[result], c2, c0, 1"
+ : [result] "=r" (-> @This()),
+ );
+ }
+
+ pub inline fn clear() void {
+ const zero: @This() = @bitCast(@as(u32, 0));
+ zero.set();
+ }
+
+ pub inline fn set(ttbc: *const @This()) void {
+ return asm volatile ("MCR p15, 0, %[value], c2, c0, 1"
+ :
+ : [value] "r" (@as(u32, @bitCast(ttbc.*))),
+ );
+ }
+};
+
+pub const TranslationTableBaseControl = packed struct(u32) {
+ pub const BoundarySize = enum(u3) {
+ @"16KB" = 0b000,
+ @"8KB" = 0b001,
+ @"4KB" = 0b010,
+ @"2KB" = 0b011,
+ @"1KB" = 0b100,
+ @"512B" = 0b101,
+ @"256B" = 0b110,
+ @"128B" = 0b111,
+ };
+
+ boundary_size: BoundarySize,
+ _reserved_3: u1,
+ page_table_walk_0: u1,
+ page_table_walk_1: u1,
+ _reserved_31_6: u26,
+
+ pub inline fn get() @This() {
+ return asm volatile ("MRC p15, 0, %[result], c2, c0, 2"
+ : [result] "=r" (-> @This()),
+ );
+ }
+
+ pub inline fn set(ttbc: *const @This()) void {
+ return asm volatile ("MCR p15, 0, %[value], c2, c0, 2"
+ :
+ : [value] "r" (@as(u32, @bitCast(ttbc.*))),
+ );
+ }
+};
+
+pub const ContextId = packed struct(u32) {
+ asid: u8 = 0,
+ pid: u24 = 0,
+
+ pub inline fn get() @This() {
+ return asm volatile ("MRC p15, 0, %[result], c13, c0, 1"
+ : [result] "=r" (-> @This()),
+ );
+ }
+
+ pub inline fn set(ctx: *const @This()) void {
+ return asm volatile ("MCR p15, 0, %[value], c13, c0, 1"
+ :
+ : [value] "r" (@as(u32, @bitCast(ctx.*))),
+ );
+ }
+
+ pub inline fn clear() void {
+ const ctx: ContextId = .{};
+ ctx.set();
+ }
+};
+
+pub const TranslationMode = enum {
+ PrivilegedRead,
+ PrivilegedWrite,
+ UserRead,
+ UserWrite,
+};
+
+pub inline fn va_translation_cw(va: u32, comptime mode: TranslationMode) u32 {
+ const mode_asm = switch (mode) {
+ .PrivilegedRead => "0",
+ .PrivilegedWrite => "1",
+ .UserRead => "2",
+ .UserWrite => "3",
+ };
+
+ asm volatile ("MRC p15, 0, %[value], c7, c8, " ++ mode_asm
+ :
+ : [value] "r" (va),
+ );
+
+ return asm volatile ("MCR p15, 0, %[result], c7, c4, 0"
+ : [result] "=r" (-> u32),
+ );
+}
+
+pub fn init() void {
+ // Reset MMU
+ reset();
+
+ // Read-Modify-Write XP in CP1
+ // Ensure MMU is disabled
+ var reg = system.SystemControlRegister.get();
+ reg.extended_page_tables = true;
+ reg.mmu_enabled = false;
+ reg.set();
+}
+
+// 1. Invalidate all caches
+// 2. B2 (memory) ordering operations
+pub fn reset() void {
+ // Instruction Cache/Data Cache
+ system.invalidate_all_caches();
+ // ITLB/DTLB
+ system.invalidate_TLB();
+
+ // B2 ordering
+ mem.barrier(.Write);
+
+ system.flush_btb();
+
+ mem.barrier(.Write);
+ mem.barrier(.Instruction);
+}
+
+// 6.4.1 - Enabling the MMU
+// 1. Program all relevant CP15 registers
+// 2. Program first-level and second-level page tables as required
+// 3. Disable and invalidate instruction cache
+// 4. Enable MMU by setting bit 0 in System Control Register
+// 5. Optionally reenable instruction cache
+pub noinline fn enable() void {
+ // 1. Program all relevant CP15 registers
+ // Should be done by reset/user
+
+ // 2. Program first-level and second-level page tables
+ // Should be done by user
+
+ // 3. Disable instruction cache
+ var reg = system.SystemControlRegister.get();
+ reg.level_one_instruction_cache = false;
+ reg.set();
+
+ // 3. Invalidate instruction cache
+ mem.barrier(.Instruction);
+ system.invalidate_icache();
+ // Ensure completion of instruction cache maintenance
+ mem.barrier(.Write);
+
+ // 4. Enable MMU
+ reg.mmu_enabled = true;
+ reg.set();
+
+ // Ensure completion of enabling
+ mem.barrier(.Write);
+ mem.barrier(.Instruction);
+ // Required after enabling MMU
+ system.flush_btb();
+ // Required after BTB flush
+ mem.barrier(.Instruction);
+}
+
+// 6.4.2 - Disabling the MMU
+// 1. Clear bit 2 to 0 in System Control Register (level one data cache)
+// 2. Clear bit 0 to 0 in System Control Register (MMU Enabled)
+pub noinline fn disable() void {
+ // 0. Clear cache so entries are properly written
+ system.clear_entire_data_cache();
+ mem.barrier(.Write);
+
+ var reg = system.SystemControlRegister.get();
+
+ // 1. Clear bit 2 to 0
+ reg.level_one_data_cache = false;
+ reg.set();
+
+ // Invalidate instruction cache
+ system.invalidate_icache();
+
+ // 2. Clear bit 0 to 0
+ reg.mmu_enabled = false;
+ reg.set();
+
+ // BTB is to be flushed after any change to MMU
+ system.flush_btb();
+ // Ensure sync of MMU maintenance operation (turning it off)
+ mem.barrier(.Write);
+ // Prefetch flush is required after BTB flush
+ mem.barrier(.Instruction);
+}
+
+pub fn is_enabled() bool {
+ return system.SystemControlRegister.get().mmu_enabled;
+}
+
+pub fn set_context(ctx: ContextId) void {
+ // 3-129
+ // "You must ensure that software performs a Data Synchronization
+ // Barrier operation before changes to this register."
+ mem.barrier(.Write);
+
+ ctx.set();
+
+ // "You must execute an IMB instruction immediately after changes
+ // to the Context ID Register."
+ system.flush_btb();
+ mem.barrier(.Instruction);
+}
+
+// B2-25 Synchronization of Changes of ASID and TTBR
+// 1. Change ASID to 0
+// 2. Prefetch Flush
+// 3. Change TTRB
+// 4. Prefetch Flush
+// 5. Change ASID to new value
+pub fn set_context_ttbr0(ctx: ContextId, ttrb0: TranslationTableBaseRegister0) void {
+ // 1. Change ASID to 0
+ const cleared: ContextId = .{};
+ set_context(cleared);
+ // 2. set_context already does a prefetch flush
+
+ // 3. Change TTRB0
+ ttrb0.set();
+ // Zero out TTRB1 as we're not using it
+ TranslationTableBaseRegister1.clear();
+
+ // Invalidate BTB after any change to TTRB0
+ system.flush_btb();
+ // 4. Prefetch flush required after BTB flush
+ mem.barrier(.Instruction);
+
+ // 5. Change ASID to new value
+ ctx.set();
+
+ // BTB flush required after change to ContextID
+ system.flush_btb();
+ mem.barrier(.Instruction);
+}
diff --git a/pi/pinned.zig b/pi/pinned.zig
@@ -0,0 +1,132 @@
+const system = @import("./system.zig");
+const mmu = @import("./mmu.zig");
+const mem = @import("./mem.zig");
+
+const Error = error{
+ MMUEnabled,
+ MMUDisabled,
+ InvalidMVA,
+ TranslationAborted,
+ BadSetVA,
+ BadSetAttr,
+ BadSetPA,
+};
+
+// Invalidate page table
+const null_pages: [4096 * 4]u8 align(1 << 14) = .{0} ** (4096 * 4);
+pub fn init(domain: mmu.DomainAccessControlRegister) Error!void {
+ if (mmu.is_enabled()) return Error.MMUEnabled;
+
+ // TODO
+ _ = domain;
+}
+
+// Map VA -> PA at IDX with attributes E
+pub fn set(idx: u3, va: usize, pa: usize, attr: PinnedAttribute) Error!void {
+ const cs = mem.enter_critical_section();
+ defer cs.exit();
+
+ // Initialize lockdown values
+ const lockdown_attr: mmu.LockdownAttributesRegister = .{
+ .shared = true,
+ .domain = attr.domain,
+ .page_table_encoding = .{ .preset = attr.mem_attributes },
+ // 0 initialize everything else
+ .execute_never = false,
+ .ap1 = .TotalNoAccess,
+ .ap2 = .TotalNoAccess,
+ .ap3 = .TotalNoAccess,
+ .subpages_valid = false,
+ };
+
+ // Make user pointers into real values
+ var lockdown_va: mmu.LockdownVA = @bitCast(va);
+ lockdown_va.asid = attr.asid;
+ lockdown_va.scope = attr.scope;
+
+ var lockdown_pa: mmu.LockdownPA = @bitCast(pa);
+ lockdown_pa.nsa = .Secure;
+ lockdown_pa.nstid = .Secure;
+ lockdown_pa.valid = true;
+ lockdown_pa.size = .@"1MB";
+ lockdown_pa.ap0 = attr.permission;
+ lockdown_pa.apx = attr.permission_x;
+
+ mmu.LockdownIndex.set(idx);
+
+ lockdown_va.set();
+ lockdown_attr.set();
+ lockdown_pa.set();
+
+ // Update to TLB, need prefetch flush
+ mem.barrier(.Instruction);
+
+ // Validate
+ if (mmu.LockdownVA.get() != lockdown_va) return Error.BadSetPA;
+ if (mmu.LockdownAttributesRegister.get() != lockdown_attr) return Error.BadSetAttr;
+ if (mmu.LockdownPA.get() != lockdown_pa) return Error.BadSetVA;
+}
+
+// Clear IDX
+pub fn clear(idx: u3) Error!void {
+ mmu.LockdownIndex.set(idx);
+
+ // Zero out everything
+ mmu.LockdownVA.set(@bitCast(0));
+ mmu.LockdownAttributesRegister.set(@bitCast(0));
+ mmu.LockdownPA.set(@bitCast(0));
+
+ // Update to TLB, need prefetch flush
+ mem.barrier(.Instruction);
+
+ // Validate
+ if (mmu.LockdownVA.get() != 0) return Error.BadSetPA;
+ if (mmu.LockdownAttributesRegister.get() != 0) return Error.BadSetAttr;
+ if (mmu.LockdownPA.get() != 0) return Error.BadSetVA;
+}
+
+// Get VA -> PA
+pub fn get(va: u32) Error!u32 {
+ if (!mmu.is_enabled()) return Error.MMUDisabled;
+ // 3-79: MVA 0:2 SBZ
+ if (va & 0b11 != 0) return Error.InvalidMVA;
+
+ const pa = mmu.va_translation_cw(va, .PrivilegedRead);
+ if (pa & 0b1 == 1) return Error.TranslationAborted;
+
+ return pa;
+}
+
+// Check if VA == PA
+pub fn is_pinned(va: u32) Error!bool {
+ return (try get(va)) == va;
+}
+
+pub const PinnedAttribute = struct {
+ asid: u8,
+ domain: u4,
+ scope: mmu.LockdownVA.Scope,
+ permission: mmu.AccessPermissions,
+ permission_x: mmu.LockdownPA.AccessPermissionsExtended,
+ mem_attributes: mmu.LockdownAttributesRegister.PageTableEncodingPreset,
+};
+
+const uart = @import("./devices/mini-uart.zig");
+pub fn lockdown_print_entry(idx: u3) void {
+ mmu.LockdownIndex.set(idx);
+ const pa = mmu.LockdownPA.get();
+ const va = mmu.LockdownVA.get();
+ const attr = mmu.LockdownAttributesRegister.get();
+
+ uart.print(" PA = 0x{X} (valid={any} size={s} ap={s} apx={s})\n", .{ pa.physical_address, pa.valid, @tagName(pa.size), @tagName(pa.ap0), @tagName(pa.apx) });
+ uart.print(" VA = 0x{X} (asid={d} scope={s})\n", .{ va.virtual_address, va.asid, @tagName(va.scope) });
+ uart.print(" Attributes: domain={d} XN={any} shared={any} tex={d} c={d} b={d}\n", .{ attr.domain, attr.execute_never, attr.shared, attr.page_table_encoding.manual.tex, attr.page_table_encoding.manual.c, attr.page_table_encoding.manual.b });
+ uart.flush();
+}
+
+pub fn lockdown_print_entries() void {
+ for (0..8) |i| {
+ uart.print("Entry {d}:\n", .{i});
+ lockdown_print_entry(@truncate(i));
+ }
+}
diff --git a/pi/root.zig b/pi/root.zig
@@ -9,8 +9,11 @@ pub const register = @import("./register.zig");
pub const journal = @import("./journal.zig");
pub const faults = @import("./faults.zig");
pub const thread = @import("./thread.zig");
+pub const pinned = @import("./pinned.zig");
+pub const system = @import("./system.zig");
pub const debug = @import("./debug.zig");
pub const mem = @import("./mem.zig");
+pub const mmu = @import("./mmu.zig");
pub const devices = struct {
pub const clock = @import("./devices/clock.zig");
@@ -21,6 +24,9 @@ pub const devices = struct {
pub const mailbox = @import("./devices/mailbox.zig");
};
+pub export const STACK_ADDRESS: usize = 0x8000000;
+pub export const STACK_INTERRUPT_ADDRESS: usize = 0x9000000;
+
pub inline fn cycle_counter_init() void {
asm volatile ("MCR p15, 0, %[in], c15, c12, 0"
:
diff --git a/pi/system.zig b/pi/system.zig
@@ -0,0 +1,150 @@
+const mem = @import("./mem.zig");
+
+pub const SystemControlRegister = packed struct {
+ pub const Endian = enum(u1) {
+ LittleEndian = 0,
+ BigEndian = 1,
+ };
+
+ pub const ExceptionVectorBaseAddress = enum(u1) {
+ Normal = 0,
+ High = 1,
+ };
+
+ pub const CacheReplacementStrategy = enum(u1) {
+ Random = 0,
+ RoundRobin = 1,
+ };
+
+ mmu_enabled: bool,
+ strict_alignment: bool,
+ level_one_data_cache: bool,
+ _reserved_3: bool,
+ _reserved_6_4: u3,
+ endian: Endian,
+ mmu_protection: bool,
+ rom_protection: bool,
+ _reserved_10: bool,
+ branch_prediction: bool,
+ level_one_instruction_cache: bool,
+ exception_vector_base_address: ExceptionVectorBaseAddress,
+ cache_replacement_strategy: CacheReplacementStrategy,
+ set_t_bit: bool,
+ _reserved_16: bool,
+ _deprecated_17: bool,
+ _reserved_18: bool,
+ _deprecated_18: bool,
+ _reserved_20_19: bool,
+ low_latency_fiq: bool,
+ unaligned_data_access: bool,
+ extended_page_tables: bool,
+ vic_interface: bool,
+ cpsr_e_bit: bool,
+ _reserved_27_26: u2,
+ tex_remap: bool,
+ force_access_protection: bool,
+ _reserved_31_30: u2,
+
+ pub inline fn get() @This() {
+ return asm volatile ("MRC p15, 0, %[result], c1, c0, 0"
+ : [result] "=r" (-> @This()),
+ );
+ }
+
+ pub inline fn set(reg: *const @This()) void {
+ return asm volatile ("MCR p15, 0, %[value], c1, c0, 0"
+ :
+ : [value] "r" (@as(u32, @bitCast(reg.*))),
+ );
+ }
+};
+
+inline fn invalidate_entire_instruction_cache() void {
+ asm volatile ("MCR p15, 0, %[value], c7, c5, 0"
+ :
+ : [value] "r" (0),
+ );
+}
+
+inline fn invalidate_entire_data_cache() void {
+ asm volatile ("MCR p15, 0, %[value], c7, c6, 0"
+ :
+ : [value] "r" (0),
+ );
+}
+
+pub inline fn clear_entire_data_cache() void {
+ asm volatile ("MCR p15, 0, %[value], c7, c14, 0"
+ :
+ : [value] "r" (0),
+ );
+}
+
+// Probably affected by ARM bug too
+// inline fn invalidate_both_caches() void {
+// asm volatile ("MCR p15, 0, %[value], c7, c7, 0"
+// :
+// : [value] "r" (0),
+// );
+// }
+
+pub inline fn invalidate_instruction_TLB() void {
+ asm volatile ("MCR p15, 0, %[value], c8, c5, 0"
+ :
+ : [value] "r" (0),
+ );
+}
+
+pub inline fn invalidate_data_TLB() void {
+ asm volatile ("MCR p15, 0, %[value], c8, c6, 0"
+ :
+ : [value] "r" (0),
+ );
+}
+
+pub inline fn invalidate_TLB() void {
+ asm volatile ("MCR p15, 0, %[value], c8, c7, 0"
+ :
+ : [value] "r" (0),
+ );
+}
+
+// https://elixir.bootlin.com/linux/latest/source/arch/arm/mm/cache-v6.S
+// How TF does this even happen
+pub inline fn invalidate_icache() void {
+ invalidate_entire_instruction_cache();
+ invalidate_entire_instruction_cache();
+ invalidate_entire_instruction_cache();
+ invalidate_entire_instruction_cache();
+
+ asm volatile (
+ \\ .rept 11
+ \\ nop
+ \\ .endr
+ );
+}
+
+pub inline fn invalidate_dcache() void {
+ invalidate_entire_data_cache();
+}
+
+pub inline fn invalidate_all_caches() void {
+ invalidate_dcache();
+ invalidate_icache();
+}
+
+/// Call after:
+///
+/// 1. Enabling/Disabling MMU
+/// 2. Writing new data to instruction memory
+/// 3. Writing new mapping to PTE
+/// 4. Changing TTBR0, TTBR1, TTBCR
+/// 5. Changing context ID
+///
+/// Always perform a prefetch buffer afterwards
+pub inline fn flush_btb() void {
+ asm volatile ("mcr p15, 0, %[value], c7, c5, 6"
+ :
+ : [value] "r" (0),
+ );
+}
diff --git a/programs/pinned-basic.zig b/programs/pinned-basic.zig
@@ -0,0 +1,123 @@
+const std = @import("std");
+const pi = @import("pi");
+
+const interrupts = pi.interrupts;
+const faults = pi.faults;
+const pinned = pi.pinned;
+const system = pi.system;
+const mmu = pi.mmu;
+const mem = pi.mem;
+
+const uart = pi.devices.mini_uart;
+
+inline fn mb(n: usize) usize {
+ return n * 1024 * 1024;
+}
+
+const Segments = struct {
+ const Code = mb(0);
+ const Heap = mb(1);
+ const Stack = pi.STACK_ADDRESS - mb(1);
+ const StackInterrupts = pi.STACK_INTERRUPT_ADDRESS - mb(1);
+ const BCM0 = mem.BASE_ADDRESS;
+ const BCM1 = mem.BASE_ADDRESS + mb(1);
+ const BCM2 = mem.BASE_ADDRESS + mb(2);
+
+ const Illegal = mb(2);
+};
+
+fn data_abort_handler(regs: interrupts.Registers) void {
+ _ = regs;
+
+ const far = faults.FAR.get();
+
+ uart.print("got fault on 0x{X}\n", .{far});
+ pi.reboot();
+}
+
+pub fn main() !void {
+ interrupts.set_exception_handler(.DataAbort, data_abort_handler);
+ uart.print("Set data abort!\n", .{});
+
+ const dev: pinned.PinnedAttribute = .{
+ .asid = 0,
+ .domain = 1,
+ .scope = .Global,
+ .permission = .UserNoAccess,
+ .permission_x = .SupervisorRW,
+ .mem_attributes = .StronglyOrdered,
+ };
+ const kern: pinned.PinnedAttribute = .{
+ .asid = 0,
+ .domain = 1,
+ .scope = .Global,
+ .permission = .UserNoAccess,
+ .permission_x = .SupervisorRW,
+ .mem_attributes = .OuterInnerNoncacheable,
+ };
+
+ mmu.init();
+ uart.print("Initialized MMU!\n", .{});
+
+ try pinned.set(0, @bitCast(Segments.Code), @bitCast(Segments.Code), kern);
+ try pinned.set(1, @bitCast(Segments.Heap), @bitCast(Segments.Heap), kern);
+ try pinned.set(2, @bitCast(Segments.Stack), @bitCast(Segments.Stack), kern);
+ try pinned.set(3, @bitCast(Segments.StackInterrupts), @bitCast(Segments.StackInterrupts), kern);
+ try pinned.set(4, @bitCast(Segments.BCM0), @bitCast(Segments.BCM0), dev);
+ try pinned.set(5, @bitCast(Segments.BCM1), @bitCast(Segments.BCM1), dev);
+ try pinned.set(6, @bitCast(Segments.BCM2), @bitCast(Segments.BCM2), dev);
+
+ uart.print("Setup TLB!\n", .{});
+
+ const dacr: mmu.DomainAccessControlRegister = .{
+ .d1 = .Client,
+ };
+ dacr.set();
+
+ // mmu.set_context(.{
+ // .asid = 1,
+ // .pid = 128,
+ // });
+ var null_ptr: [4096 * 4]u8 align(1 << 14) = .{0} ** (4096 * 4);
+ mmu.set_context_ttbr0(.{
+ .asid = 1,
+ .pid = 128 << 8,
+ }, @bitCast(@intFromPtr(&null_ptr)));
+
+ uart.print("Set context!\n", .{});
+
+ pinned.lockdown_print_entries();
+
+ for (0..10) |i| {
+ mmu.enable();
+
+ if (mmu.is_enabled()) {
+ uart.print("MMU ON: hello from virtual memory! count = {}\n", .{i});
+ } else {
+ uart.print("MMU is off?\n", .{});
+ }
+
+ mmu.disable();
+
+ if (!mmu.is_enabled()) {
+ uart.print("MMU is off!\n", .{});
+ } else {
+ uart.print("MMU is on?\n", .{});
+ }
+ }
+
+ const illegal: *u32 = @ptrFromInt(Segments.Illegal);
+
+ mem.put_u32(illegal, 0xdeadbeef);
+
+ uart.print("wrote without vm: got 0x{X}\n", .{mem.get_u32(illegal)});
+
+ mmu.enable();
+
+ uart.print("mmu.is_enabled() = {any}\n", .{mmu.is_enabled()});
+ uart.print("about to write to 0x{X} (illegal)\n", .{Segments.Illegal});
+
+ mem.put_u32(illegal, 0xdeadbeef);
+
+ uart.print("should be unreachable\n", .{});
+}