commit e69b68e0abbb1777e8d83267057d60861049c7c5
parent 1015568f100147044a06db957771b48b3ebb071c
Author: Sylvia Ivory <git@sivory.net>
Date: Sat, 14 Mar 2026 02:24:04 -0700
Add page mapper
Diffstat:
5 files changed, 247 insertions(+), 10 deletions(-)
diff --git a/Justfile b/Justfile
@@ -12,6 +12,8 @@ build program mode="Bootable":
check program="all":
if [ "{{ program }}" = "all" ]; then \
zig build -Drelease=false; \
+ elif [ "{{ program }}" = "sylveos" ]; then \
+ zig build -Drelease=false -Dprogram={{ program }}; \
else \
zig build -Dmode=Bootable -Drelease=false -Dprogram={{ program }}; \
fi \
diff --git a/pi/mmu.zig b/pi/mmu.zig
@@ -360,9 +360,11 @@ pub const FirstLevelDescriptor = packed struct(u32) {
_ignore_31_2: u30 = 0,
};
pub const Coarse = packed struct(u30) {
- _reserved_4_2: u3,
+ _reserved_0: u1 = 0,
+ not_secure: bool = false,
+ _reserved_1: u1 = 0,
domain: u4,
- _implementation_9: u1 = 0,
+ ecc: bool = false,
coarse_base_address: u22,
};
pub const Section = packed struct(u30) {
@@ -370,14 +372,14 @@ pub const FirstLevelDescriptor = packed struct(u32) {
c: u1,
never_execute: bool,
domain: u4,
- _implementation_9: u1 = 0,
+ ecc: bool = false,
ap: AccessPermissions,
tex: u3,
apx: AccessPermissionsExtended,
shared: bool,
not_global: bool,
is_supersection: bool = false,
- _reserved_19: u1 = 0,
+ not_secure: bool = false,
section_base_address: u12,
};
pub const SuperSection = packed struct(u30) {
@@ -385,14 +387,14 @@ pub const FirstLevelDescriptor = packed struct(u32) {
c: u1,
never_execute: bool,
base_address_39_36: u4,
- _implementation_9: u1,
+ ecc: bool = false,
ap: AccessPermissions,
tex: u3,
apx: AccessPermissionsExtended,
shared: bool,
not_global: bool,
is_supersection: bool = true,
- _reserved_19: u1 = 0,
+ not_secure: bool = false,
base_address_35_32: u4,
section_base_address: u8,
};
@@ -412,6 +414,47 @@ pub const FirstLevelDescriptor = packed struct(u32) {
},
};
+pub const SecondLevelDescriptor = packed struct(u32) {
+ pub const Fault = packed struct(u30) {
+ _ignore_31_2: u32 = 0,
+ };
+ pub const LargePage = packed struct(u30) {
+ b: u1,
+ c: u1,
+ ap: AccessPermissions,
+ _reserved_8_6: u3 = 0,
+ apx: AccessPermissionsExtended,
+ shared: bool,
+ not_global: bool,
+ tex: u3,
+ never_execute: bool,
+ base_address: u16,
+ };
+ pub const SmallPage = packed struct(u30) {
+ b: u1,
+ c: u1,
+ ap: AccessPermissions,
+ tex: u3,
+ apx: AccessPermissionsExtended,
+ shared: bool,
+ not_global: bool,
+ base_address: u20,
+ };
+
+ pub const Type = enum(u2) {
+ Fault = 0b00,
+ LargePage = 0b01,
+ SmallPageNeverExecute = 0b11,
+ SmallPageExecutable = 0b10,
+ };
+ ty: Type,
+ descriptor: packed union {
+ fault: Fault,
+ large_page: LargePage,
+ small_page: SmallPage,
+ },
+};
+
pub const TranslationMode = enum {
PrivilegedRead,
PrivilegedWrite,
diff --git a/sylveos/memory.zig b/sylveos/memory.zig
@@ -177,11 +177,16 @@ pub fn print_regions_virtual() void {
pub fn get_allocator() std.mem.Allocator {
const heap = Region.heap();
- var fba: std.heap.FixedBufferAllocator = .init(heap.raw[0..heap.size]);
+ var fba: std.heap.FixedBufferAllocator = .init(@ptrCast(heap.raw[1..heap.size]));
return fba.allocator();
}
+pub fn get_page_table() []mmu.FirstLevelDescriptor {
+ var page_table_raw: [*]mmu.FirstLevelDescriptor = @ptrFromInt(Region.page_table().start);
+ return page_table_raw[0..4096];
+}
+
pub fn translate(va: u32) !u32 {
const res = mmu.va_translation_cw(va, .PrivilegedRead);
if (res.aborted) return error.FailedTranslation;
diff --git a/sylveos/pages.zig b/sylveos/pages.zig
@@ -0,0 +1,179 @@
+const std = @import("std");
+const pi = @import("pi");
+
+const mmu = pi.mmu;
+const pt = pi.pt;
+
+const SECOND_LEVEL_FAULT: mmu.SecondLevelDescriptor = .{ .ty = .Fault, .descriptor = .{ .fault = .{} } };
+const SectionType = enum {
+ Large,
+ Small,
+};
+const LargePageSection = struct {
+ node: std.SinglyLinkedList.Node,
+ ty: SectionType,
+ descriptor: [16]mmu.SecondLevelDescriptor align(1 << 10),
+};
+const SmallPageSection = struct {
+ node: std.SinglyLinkedList.Node,
+ ty: SectionType,
+ descriptor: [1024]mmu.SecondLevelDescriptor align(1 << 10),
+};
+
+alloc: std.mem.Allocator,
+root: []mmu.FirstLevelDescriptor,
+sectors: []mmu.FirstLevelDescriptor,
+// TODO; this should be a linked list as they don't need to be contiguous
+pages: std.SinglyLinkedList,
+asid: u8,
+
+pub const Error = error{
+ InvalidSection,
+ InvalidLargePage,
+ InvalidSmallPage,
+ TossingCoarsePage,
+ TossingSection,
+ Map64To4,
+ Map4To64,
+} || std.mem.Allocator.Error;
+
+// XKCD 927
+pub const Attributes = struct {
+ encoding: mmu.LockdownAttributesRegister.PageTableEncoding = .{ .preset = .StronglyOrdered },
+ access_permission: mmu.AccessPermissions = .UserReadWrite,
+ access_permission_x: mmu.AccessPermissionsExtended = .SupervisorRW,
+ scope: mmu.LockdownVA.Scope = .Global,
+ never_execute: bool = false,
+ shared: bool = false,
+ domain: u4 = 1,
+};
+
+const Self = @This();
+
+pub fn fork(alloc: std.mem.Allocator, root: []mmu.FirstLevelDescriptor, asid: u8) !Self {
+ return .{
+ .sectors = pt.dupe(alloc, root),
+ .pages = .{},
+ .alloc = alloc,
+ .root = root,
+ .asid = asid,
+ };
+}
+
+fn va_to_index(va: u32) usize {
+ return @intCast(va >> 20);
+}
+
+pub fn map_1mb(self: *Self, va: u32, pa: u32, attr: Attributes) !void {
+ // Whole section, yippee
+ // Just check we aren't chucking a coarse page
+ const idx = va_to_index(va);
+ var current = &self.sectors[idx];
+
+ if (current.ty == .CoarsePageTable) return Error.TossingCoarsePage;
+
+ const mem_attr = attr.encoding.manual;
+
+ current.ty = .Section;
+ current.descriptor = .{ .section = .{
+ .not_global = if (attr.scope == .Global) false else true,
+ .ap = attr.access_permission,
+ .apx = attr.access_permission_x,
+ .b = mem_attr.b,
+ .c = mem_attr.c,
+ .tex = mem_attr.tex,
+ .domain = attr.domain,
+ .never_execute = attr.never_execute,
+ .shared = attr.shared,
+ .section_base_address = @truncate(pa >> 20),
+ } };
+ // TODO; only do 1 line
+ mmu.sync_pte();
+}
+// Assume user isn't stupid and tries to map 4kb then 64kb
+pub fn map_64kb(self: *Self, va: u32, pa: u32, attr: Attributes) !void {
+ const idx = va_to_index(va);
+ var current = &self.sectors[idx];
+
+ if (current.ty == .Section) return Error.TossingSection;
+
+ if (current.ty == .Fault) {
+ // Need to alloc LargePageSection
+ var section = try self.alloc.create(LargePageSection);
+ self.pages.prepend(§ion.node);
+
+ current.ty = .CoarsePageTable;
+ current.descriptor = .{ .coarse = .{
+ .domain = attr.domain,
+ .coarse_base_address = @truncate(@intFromPtr(§ion.descriptor)),
+ } };
+ }
+
+ var descriptor: *[16]mmu.SecondLevelDescriptor align(1 << 10) = @ptrFromInt(@as(u32, current.descriptor.coarse.coarse_base_address) << 10);
+ const sector: *LargePageSection = @fieldParentPtr("descriptor", descriptor);
+ if (sector.ty != .Large) return Error.Map64To4;
+
+ // Get lower 16 bits
+ const page_idx: u4 = @truncate(va & 0xFFFF);
+ const mem_attr = attr.encoding.manual;
+
+ descriptor[page_idx].ty = .LargePage;
+ descriptor[page_idx].descriptor.large_page = .{
+ .not_global = if (attr.scope == .Global) false else true,
+ .ap = attr.access_permission,
+ .apx = attr.access_permission_x,
+ .b = mem_attr.b,
+ .c = mem_attr.c,
+ .tex = mem_attr.tex,
+ .never_execute = attr.never_execute,
+ .shared = attr.shared,
+ .base_address = @truncate(pa >> 16),
+ };
+ // TODO; only do 1 line
+ mmu.sync_pte();
+}
+pub fn map_4kb(self: *Self, va: u32, pa: u32, attr: Attributes) !void {
+ const idx = va_to_index(va);
+ var current = &self.sectors[idx];
+
+ if (current.ty == .Section) return Error.TossingSection;
+
+ if (current.ty == .Fault) {
+ // Need to alloc SmallPageSection
+ var section = try self.alloc.create(SmallPageSection);
+ self.pages.prepend(§ion.node);
+
+ current.ty = .CoarsePageTable;
+ current.descriptor = .{ .coarse = .{
+ .domain = attr.domain,
+ .coarse_base_address = @truncate(@intFromPtr(§ion.descriptor)),
+ } };
+ }
+
+ var descriptor: *[1024]mmu.SecondLevelDescriptor align(1 << 10) = @ptrFromInt(@as(u32, current.descriptor.coarse.coarse_base_address) << 10);
+ const sector: *SmallPageSection = @fieldParentPtr("descriptor", descriptor);
+ if (sector.ty != .Small) return Error.Map4To64;
+
+ // Get lower 16 bits
+ const page_idx: u10 = @truncate(va & 0xFFFF);
+ const mem_attr = attr.encoding.manual;
+
+ if (attr.never_execute) {
+ descriptor[page_idx].ty = .SmallPageNeverExecute;
+ } else {
+ descriptor[page_idx].ty = .SmallPageExecutable;
+ }
+
+ descriptor[page_idx].descriptor.small_page = .{
+ .not_global = if (attr.scope == .Global) false else true,
+ .ap = attr.access_permission,
+ .apx = attr.access_permission_x,
+ .b = mem_attr.b,
+ .c = mem_attr.c,
+ .tex = mem_attr.tex,
+ .shared = attr.shared,
+ .base_address = @truncate(pa >> 16),
+ };
+ // TODO; only do 1 line
+ mmu.sync_pte();
+}
diff --git a/sylveos/root.zig b/sylveos/root.zig
@@ -46,9 +46,7 @@ export fn kmain(memory_size: u32) void {
interrupts.set_exception_handler(.DataAbort, abort_handler);
interrupts.set_exception_handler(.PrefetchAbort, abort_handler);
- var page_table_raw: [*]mmu.FirstLevelDescriptor = @ptrFromInt(Region.page_table().start);
- const pt = page_table_raw[0..4096];
-
+ const pt = memory.get_page_table();
pi.pt.remove(pt, 0x0000_0000) catch {};
pi.pt.remove(pt, 0x0010_0000) catch {};
pi.pt.remove(pt, 0x0020_0000) catch {};
@@ -63,6 +61,11 @@ export fn kmain(memory_size: u32) void {
memory.print_regions_virtual();
+ // TODO; set TTRBC to be N=3 so that
+ // TTRB0 is 512 entries, mapping 512Mb
+ // TTRB1 is OS
+ // Cheaper context switches
+
main() catch |e| {
uart.print("main returned error: {t}\n", .{e});
};
@@ -76,5 +79,10 @@ export fn abort() noreturn {
}
fn main() !void {
+ const alloc = memory.get_allocator();
+ const pt = memory.get_page_table();
+
+ _ = alloc;
+ _ = pt;
uart.print("Hello World from VM!\n", .{});
}