commit c3b423b4dc515a9bc96aac2a779ab289897e0d20
parent 0114e36802535d7acfd9e7f98e1810defe4d6d5b
Author: Sylvia Ivory <git@sivory.net>
Date: Sat, 14 Mar 2026 17:51:11 -0700
Add rough elf loader
Diffstat:
3 files changed, 127 insertions(+), 14 deletions(-)
diff --git a/sylveos/loader.zig b/sylveos/loader.zig
@@ -2,6 +2,8 @@ const std = @import("std");
const pi = @import("pi");
const process = @import("./process.zig");
+const memory = @import("./memory.zig");
+const Pages = @import("./pages.zig");
const Page = process.Page;
const mailbox = pi.devices.mailbox;
@@ -9,15 +11,107 @@ const uart = pi.devices.mini_uart;
const mmu = pi.mmu;
const pt = pi.pt;
-// Kernel: 0x20FF_FFFF -> 0xFFFF_FFFF
-// IO : 0x2000_0000 -> 0x20FF_FFFF
-// User : 0x0000_0000 -> 0x2000_0000 (512mb, fine enough)
-
-// Program contains a static series of pages (unfortunately 1mb each)
pub const Program = struct {
- pages: std.ArrayList(Page),
- pt: []mmu.FirstLevelDescriptor,
+ pages: Pages,
+ alloc: std.mem.Allocator,
entrypoint: u32,
+ stack_pointer: u32,
};
-// We map kernel into high addresses
+pub fn init(alloc: std.mem.Allocator, root: []mmu.FirstLevelDescriptor, elf: []const u8) !Program {
+ // TODO; proper allocator
+ // TODO; asid
+ // TODO; pid
+ // TODO; memory attributes
+
+ const arena: std.heap.ArenaAllocator = .init(alloc);
+ const arena_alloc = arena.allocator();
+
+ // Load into page table so we can work with VA addresses
+ var pages = try Pages.fork(arena_alloc, root, 1);
+ pages.switch_into(1);
+ defer pages.switch_out_of();
+
+ var elf_reader = std.Io.Reader.fixed(elf);
+ const header = try std.elf.Header.read(&elf_reader);
+
+ var it = header.iterateProgramHeadersBuffer(elf_reader.buffer[elf_reader.seek..elf_reader.end]);
+ while (try it.next()) |program_header| {
+ if (program_header.p_type != std.elf.PT_LOAD) continue;
+ var va = std.mem.alignBackward(u32, program_header.p_vaddr, memory.KB4);
+ const end = std.mem.alignForward(u32, program_header.p_vaddr + program_header.p_memsz, memory.KB4);
+
+ while (va < end) : (va += memory.KB4) {
+ const pa = try memory.request_4kb(arena_alloc);
+
+ try pages.map_4kb(va, pa, .{});
+
+ // Zero page
+ const dst: [*]u8 = @ptrFromInt(va);
+ @memset(dst[0..memory.KB4], 0);
+
+ // Copy
+ const page_start = va;
+ const page_end = va + memory.KB4;
+ const segment_start = program_header.p_vaddr;
+ const segment_end = program_header.p_vaddr + program_header.p_filesz;
+
+ const copy_start = @max(page_start, segment_start);
+ const copy_end = @min(page_end, segment_end);
+
+ if (copy_start < copy_end) {
+ const offset_in_page = copy_start - page_start;
+ const offset_in_segment = copy_start - segment_start;
+ const copy_size = copy_end - copy_start;
+ const file_offset = program_header.p_offset + offset_in_segment;
+
+ if (file_offset + copy_size <= elf.len) {
+ const src = elf[file_offset..][0..copy_size];
+ @memcpy(dst[offset_in_page..][0..copy_size], src);
+ }
+ }
+ }
+ }
+
+ // Configure stack
+ const stack_size = memory.KB64;
+ // End of heap (physical)
+ const stack_top = memory.Region.heap().end;
+
+ {
+ const va = stack_top - stack_size;
+ const pa = try memory.request_4kb(alloc);
+
+ try pages.map_64kb(va, pa, .{});
+
+ const dst: [*]u8 = @ptrFromInt(va);
+ @memset(dst[0..memory.KB4], 0);
+ }
+
+ return .{
+ .alloc = arena_alloc,
+ .entrypoint = header.entry,
+ .pages = pages,
+ .stack_pointer = stack_top,
+ };
+}
+
+pub fn execute(self: *Program, pid: u24, args: [][]u8, env: [][]u8) noreturn {
+ // TODO; args, env
+ _ = args;
+ _ = env;
+ self.pages.switch_into(pid);
+
+ var psr = pi.PSR.get_c();
+ psr.mode = .User;
+ psr.i = false;
+ const registers: pi.interrupts.Registers = .{
+ .gp = .{0} ** 13,
+ .lr = self.entrypoint,
+ .pc = self.entrypoint,
+ .sp = self.stack_pointer,
+ .psr = .psr,
+ };
+
+ pi.switching.restore_state_user(registers);
+}
diff --git a/sylveos/memory.zig b/sylveos/memory.zig
@@ -199,12 +199,15 @@ pub fn translate(va: u32) !u32 {
return physical_base | offset;
}
-pub fn request_1mb(alloc: std.mem.Allocator) ![]u8 {
- return try alloc.alignedAlloc(u8, std.mem.Alignment.fromByteUnits(MB), MB);
+pub fn request_1mb(alloc: std.mem.Allocator) !u32 {
+ const slice = try alloc.alignedAlloc(u8, std.mem.Alignment.fromByteUnits(MB), MB);
+ return @intFromPtr(slice.ptr);
}
-pub fn request_64kb(alloc: std.mem.Allocator) ![]u8 {
- return try alloc.alignedAlloc(u8, std.mem.Alignment.fromByteUnits(KB64), KB64);
+pub fn request_64kb(alloc: std.mem.Allocator) !u32 {
+ const slice = try alloc.alignedAlloc(u8, std.mem.Alignment.fromByteUnits(KB64), KB64);
+ return @intFromPtr(slice.ptr);
}
-pub fn request_4kb(alloc: std.mem.Allocator) ![]u8 {
- return try alloc.alignedAlloc(u8, std.mem.Alignment.fromByteUnits(KB4), KB4);
+pub fn request_4kb(alloc: std.mem.Allocator) !u32 {
+ const slice = try alloc.alignedAlloc(u8, std.mem.Alignment.fromByteUnits(KB4), KB4);
+ return @intFromPtr(slice.ptr);
}
diff --git a/sylveos/pages.zig b/sylveos/pages.zig
@@ -29,6 +29,8 @@ sectors: []mmu.FirstLevelDescriptor,
pages: std.SinglyLinkedList,
asid: u8,
+old_context: mmu.ContextId = .{},
+
pub const Error = error{
InvalidSection,
InvalidLargePage,
@@ -186,3 +188,17 @@ pub fn map_4kb(self: *Self, va: u32, pa: u32, attr: Attributes) !void {
// TODO; only do 1 line
mmu.sync_pte();
}
+
+pub fn switch_into(self: *Self, pid: u24) !void {
+ self.old_context = .get();
+
+ mmu.set_context_ttbr0(.{ .asid = self.asid, .pid = pid }, .{
+ .translation_table_base = @truncate(@intFromPtr(self.sectors.ptr) >> 5),
+ });
+}
+
+pub fn switch_out_of(self: *const Self) !void {
+ mmu.set_context_ttbr0(self.old_context, .{
+ .translation_table_base = @truncate(@intFromPtr(self.root.ptr) >> 5),
+ });
+}