sylveos

Toy Operating System
Log | Files | Refs

commit 6cffe08340b0c4926d993dd17346255ab6150f60
parent c3b423b4dc515a9bc96aac2a779ab289897e0d20
Author: Sylvia Ivory <git@sivory.net>
Date:   Sat, 14 Mar 2026 18:04:57 -0700

Fix compile errors

Diffstat:
M.gitignore | 1+
Mpi/mmu.zig | 2+-
Msylveos/loader.zig | 61+++++++++++++++++++++++++++++++++++++++++++++++++++----------
Msylveos/pages.zig | 10+++++-----
Msylveos/root.zig | 15++++++++++++---
5 files changed, 70 insertions(+), 19 deletions(-)

diff --git a/.gitignore b/.gitignore @@ -2,3 +2,4 @@ /zig-out /src/*.runner.zig target +/sylveos/lua diff --git a/pi/mmu.zig b/pi/mmu.zig @@ -416,7 +416,7 @@ pub const FirstLevelDescriptor = packed struct(u32) { pub const SecondLevelDescriptor = packed struct(u32) { pub const Fault = packed struct(u30) { - _ignore_31_2: u32 = 0, + _ignore_31_2: u30 = 0, }; pub const LargePage = packed struct(u30) { b: u1, diff --git a/sylveos/loader.zig b/sylveos/loader.zig @@ -24,7 +24,7 @@ pub fn init(alloc: std.mem.Allocator, root: []mmu.FirstLevelDescriptor, elf: []c // TODO; pid // TODO; memory attributes - const arena: std.heap.ArenaAllocator = .init(alloc); + var arena: std.heap.ArenaAllocator = .init(alloc); const arena_alloc = arena.allocator(); // Load into page table so we can work with VA addresses @@ -35,11 +35,15 @@ pub fn init(alloc: std.mem.Allocator, root: []mmu.FirstLevelDescriptor, elf: []c var elf_reader = std.Io.Reader.fixed(elf); const header = try std.elf.Header.read(&elf_reader); - var it = header.iterateProgramHeadersBuffer(elf_reader.buffer[elf_reader.seek..elf_reader.end]); + var it: ProgramHeaderBufferIterator = .{ + .elf_header = header, + .buf = elf_reader.buffer[elf_reader.seek..elf_reader.end], + }; + while (try it.next()) |program_header| { if (program_header.p_type != std.elf.PT_LOAD) continue; - var va = std.mem.alignBackward(u32, program_header.p_vaddr, memory.KB4); - const end = std.mem.alignForward(u32, program_header.p_vaddr + program_header.p_memsz, memory.KB4); + var va = std.mem.alignBackward(u32, @intCast(program_header.p_vaddr), memory.KB4); + const end = std.mem.alignForward(u32, @intCast(program_header.p_vaddr + program_header.p_memsz), memory.KB4); while (va < end) : (va += memory.KB4) { const pa = try memory.request_4kb(arena_alloc); @@ -53,8 +57,8 @@ pub fn init(alloc: std.mem.Allocator, root: []mmu.FirstLevelDescriptor, elf: []c // Copy const page_start = va; const page_end = va + memory.KB4; - const segment_start = program_header.p_vaddr; - const segment_end = program_header.p_vaddr + program_header.p_filesz; + const segment_start: u32 = @intCast(program_header.p_vaddr); + const segment_end: u32 = @intCast(program_header.p_vaddr + program_header.p_filesz); const copy_start = @max(page_start, segment_start); const copy_end = @min(page_end, segment_end); @@ -63,7 +67,7 @@ pub fn init(alloc: std.mem.Allocator, root: []mmu.FirstLevelDescriptor, elf: []c const offset_in_page = copy_start - page_start; const offset_in_segment = copy_start - segment_start; const copy_size = copy_end - copy_start; - const file_offset = program_header.p_offset + offset_in_segment; + const file_offset: u32 = @intCast(program_header.p_offset + offset_in_segment); if (file_offset + copy_size <= elf.len) { const src = elf[file_offset..][0..copy_size]; @@ -90,7 +94,7 @@ pub fn init(alloc: std.mem.Allocator, root: []mmu.FirstLevelDescriptor, elf: []c return .{ .alloc = arena_alloc, - .entrypoint = header.entry, + .entrypoint = @intCast(header.entry), .pages = pages, .stack_pointer = stack_top, }; @@ -110,8 +114,45 @@ pub fn execute(self: *Program, pid: u24, args: [][]u8, env: [][]u8) noreturn { .lr = self.entrypoint, .pc = self.entrypoint, .sp = self.stack_pointer, - .psr = .psr, + .psr = psr, }; - pi.switching.restore_state_user(registers); + pi.switching.restore_state_user(&registers); +} + +// std.elf fix +pub const ProgramHeaderBufferIterator = struct { + elf_header: std.elf.Header, + buf: []const u8, + index: usize = 0, + + pub fn next(it: *ProgramHeaderBufferIterator) !?std.elf.Elf64_Phdr { + if (it.index >= it.elf_header.phnum) return null; + defer it.index += 1; + + const size: u64 = if (it.elf_header.is_64) @sizeOf(std.elf.Elf64_Phdr) else @sizeOf(std.elf.Elf32_Phdr); + const offset = it.elf_header.phoff + size * it.index; + var reader = std.Io.Reader.fixed(it.buf[@intCast(offset)..]); + + return takePhdr(&reader, it.elf_header); + } +}; + +fn takePhdr(reader: *std.io.Reader, elf_header: std.elf.Header) !?std.elf.Elf64_Phdr { + if (elf_header.is_64) { + const phdr = try reader.takeStruct(std.elf.Elf64_Phdr, elf_header.endian); + return phdr; + } + + const phdr = try reader.takeStruct(std.elf.Elf32_Phdr, elf_header.endian); + return .{ + .p_type = phdr.p_type, + .p_offset = phdr.p_offset, + .p_vaddr = phdr.p_vaddr, + .p_paddr = phdr.p_paddr, + .p_filesz = phdr.p_filesz, + .p_memsz = phdr.p_memsz, + .p_flags = phdr.p_flags, + .p_align = phdr.p_align, + }; } diff --git a/sylveos/pages.zig b/sylveos/pages.zig @@ -57,7 +57,7 @@ const Self = @This(); pub fn fork(alloc: std.mem.Allocator, root: []mmu.FirstLevelDescriptor, asid: u8) !Self { return .{ - .sectors = pt.dupe(alloc, root), + .sectors = try pt.dupe(alloc, root), .pages = .{}, .alloc = alloc, .root = root, @@ -119,7 +119,7 @@ pub fn map_64kb(self: *Self, va: u32, pa: u32, attr: Attributes) !void { } var descriptor: *[16]mmu.SecondLevelDescriptor align(1 << 10) = @ptrFromInt(@as(u32, current.descriptor.coarse.coarse_base_address) << 10); - const sector: *LargePageSection = @fieldParentPtr("descriptor", descriptor); + const sector: *LargePageSection = @alignCast(@fieldParentPtr("descriptor", descriptor)); if (sector.ty != .Large) return Error.Map64To4; // Get lower 16 bits @@ -162,7 +162,7 @@ pub fn map_4kb(self: *Self, va: u32, pa: u32, attr: Attributes) !void { } var descriptor: *[1024]mmu.SecondLevelDescriptor align(1 << 10) = @ptrFromInt(@as(u32, current.descriptor.coarse.coarse_base_address) << 10); - const sector: *SmallPageSection = @fieldParentPtr("descriptor", descriptor); + const sector: *SmallPageSection = @alignCast(@fieldParentPtr("descriptor", descriptor)); if (sector.ty != .Small) return Error.Map4To64; // Get lower 16 bits @@ -189,7 +189,7 @@ pub fn map_4kb(self: *Self, va: u32, pa: u32, attr: Attributes) !void { mmu.sync_pte(); } -pub fn switch_into(self: *Self, pid: u24) !void { +pub fn switch_into(self: *Self, pid: u24) void { self.old_context = .get(); mmu.set_context_ttbr0(.{ .asid = self.asid, .pid = pid }, .{ @@ -197,7 +197,7 @@ pub fn switch_into(self: *Self, pid: u24) !void { }); } -pub fn switch_out_of(self: *const Self) !void { +pub fn switch_out_of(self: *const Self) void { mmu.set_context_ttbr0(self.old_context, .{ .translation_table_base = @truncate(@intFromPtr(self.root.ptr) >> 5), }); diff --git a/sylveos/root.zig b/sylveos/root.zig @@ -1,6 +1,9 @@ const pi = @import("pi"); const memory = @import("./memory.zig"); +const loader = @import("./loader.zig"); +const lua_binary = @embedFile("./lua"); + const uart = pi.devices.mini_uart; const interrupts = pi.interrupts; const Region = memory.Region; @@ -79,10 +82,16 @@ export fn abort() noreturn { } fn main() !void { + uart.print("Hello World from VM!\n", .{}); + const alloc = memory.get_allocator(); const pt = memory.get_page_table(); - _ = alloc; - _ = pt; - uart.print("Hello World from VM!\n", .{}); + var lua = try loader.init(alloc, pt, lua_binary); + uart.print( + \\Stack Pointer: {X:0>8} + \\ Entrypoint: {X:0>8} + \\ + , .{ lua.stack_pointer, lua.entrypoint }); + loader.execute(&lua, 0, &[_][]u8{}, &[_][]u8{}); }