commit 2257dee555b44129f375ce78d8b726414d521d8e
parent 6cffe08340b0c4926d993dd17346255ab6150f60
Author: Sylvia Ivory <git@sivory.net>
Date: Sun, 15 Mar 2026 12:29:27 -0700
Load Linux ELF
Diffstat:
6 files changed, 372 insertions(+), 119 deletions(-)
diff --git a/.gitignore b/.gitignore
@@ -2,4 +2,4 @@
/zig-out
/src/*.runner.zig
target
-/sylveos/lua
+/sylveos/hello
diff --git a/pi/pt.zig b/pi/pt.zig
@@ -18,7 +18,7 @@ pub fn init(alloc: std.mem.Allocator, count: u16) Error![]mmu.FirstLevelDescript
const page_table = try alloc.alignedAlloc(
mmu.FirstLevelDescriptor,
- std.mem.Alignment.fromByteUnits(1 << 14),
+ std.mem.Alignment.fromByteUnits(1 << 5),
count,
);
@@ -29,11 +29,14 @@ pub fn init(alloc: std.mem.Allocator, count: u16) Error![]mmu.FirstLevelDescript
}
pub fn dupe(alloc: std.mem.Allocator, pt: []mmu.FirstLevelDescriptor) Error![]mmu.FirstLevelDescriptor {
+ @import("devices/mini-uart.zig").print("requesting {Bi}\n", .{pt.len * @sizeOf(mmu.FirstLevelDescriptor)});
+
const page_table = try alloc.alignedAlloc(
mmu.FirstLevelDescriptor,
- std.mem.Alignment.fromByteUnits(1 << 14),
+ std.mem.Alignment.fromByteUnits(1 << 5),
pt.len,
);
+ @import("devices/mini-uart.zig").print("got pt @ 0x{X:0>8}\n", .{@intFromPtr(page_table.ptr)});
@memcpy(page_table, pt);
mmu.sync_pte();
diff --git a/sylveos/loader.zig b/sylveos/loader.zig
@@ -13,42 +13,53 @@ const pt = pi.pt;
pub const Program = struct {
pages: Pages,
- alloc: std.mem.Allocator,
+ pt_alloc: std.mem.Allocator,
+ heap_alloc: std.mem.Allocator,
entrypoint: u32,
stack_pointer: u32,
+ elf_header: std.elf.Header,
+ header_location: u32,
};
-pub fn init(alloc: std.mem.Allocator, root: []mmu.FirstLevelDescriptor, elf: []const u8) !Program {
+pub fn init(pt_alloc: std.mem.Allocator, heap_alloc: std.mem.Allocator, root: []mmu.FirstLevelDescriptor, elf: []const u8) !Program {
// TODO; proper allocator
// TODO; asid
// TODO; pid
// TODO; memory attributes
- var arena: std.heap.ArenaAllocator = .init(alloc);
- const arena_alloc = arena.allocator();
-
// Load into page table so we can work with VA addresses
- var pages = try Pages.fork(arena_alloc, root, 1);
+ var pages = try Pages.fork(pt_alloc, root, 1);
pages.switch_into(1);
defer pages.switch_out_of();
var elf_reader = std.Io.Reader.fixed(elf);
const header = try std.elf.Header.read(&elf_reader);
+ uart.print("phnum: {d}\n", .{header.phnum});
+ uart.print("elf len: {d}\n", .{elf.len});
+ uart.print("seek: {d}\n", .{elf_reader.seek});
+ uart.print("end: {d}\n", .{elf_reader.end});
+
var it: ProgramHeaderBufferIterator = .{
.elf_header = header,
- .buf = elf_reader.buffer[elf_reader.seek..elf_reader.end],
+ .buf = elf,
};
+ var header_location: ?u32 = null;
+
while (try it.next()) |program_header| {
if (program_header.p_type != std.elf.PT_LOAD) continue;
+
var va = std.mem.alignBackward(u32, @intCast(program_header.p_vaddr), memory.KB4);
const end = std.mem.alignForward(u32, @intCast(program_header.p_vaddr + program_header.p_memsz), memory.KB4);
while (va < end) : (va += memory.KB4) {
- const pa = try memory.request_4kb(arena_alloc);
+ uart.print("requesting 4kb: ", .{});
+ const pa = try memory.request_4kb(heap_alloc);
+ uart.print("VA(0x{X:0>8}) -> PA(0x{X:0>8})\n", .{ va, pa });
try pages.map_4kb(va, pa, .{});
+ uart.print(" mapped: VA(0x{X:0>8}) -> PA(0x{X:0>8})\n", .{ va, memory.translate(va) catch 0 });
// Zero page
const dst: [*]u8 = @ptrFromInt(va);
@@ -70,42 +81,205 @@ pub fn init(alloc: std.mem.Allocator, root: []mmu.FirstLevelDescriptor, elf: []c
const file_offset: u32 = @intCast(program_header.p_offset + offset_in_segment);
if (file_offset + copy_size <= elf.len) {
- const src = elf[file_offset..][0..copy_size];
- @memcpy(dst[offset_in_page..][0..copy_size], src);
+ const src = elf[file_offset .. file_offset + copy_size];
+
+ // Copying header
+ if (file_offset == 0) {
+ header_location = va + offset_in_page;
+ }
+
+ uart.print(" writing to 0x{X} from 0x{X} (0x{X})\n", .{ va + offset_in_page, file_offset, copy_size });
+ @memcpy(dst[offset_in_page .. offset_in_page + copy_size], src);
}
}
}
}
+ if (header_location == null) {
+ // TODO; store header in VM
+ }
+
// Configure stack
const stack_size = memory.KB64;
// End of heap (physical)
const stack_top = memory.Region.heap().end;
{
- const va = stack_top - stack_size;
- const pa = try memory.request_4kb(alloc);
+ const sp = stack_top - stack_size;
+ uart.print("requesting 64kb: ", .{});
+ const pa = try memory.request_64kb(heap_alloc);
+ uart.print("0x{X:0>8}\n", .{pa});
- try pages.map_64kb(va, pa, .{});
+ try pages.map_64kb(sp, pa, .{});
- const dst: [*]u8 = @ptrFromInt(va);
+ const dst: [*]u8 = @ptrFromInt(sp);
@memset(dst[0..memory.KB4], 0);
}
+ // kuser
+ {
+ uart.print("requesting 4kb: ", .{});
+ const pa = try memory.request_4kb(heap_alloc);
+ uart.print("0x{X:0>8}\n", .{pa});
+
+ // TODO; read only
+ try pages.map_4kb(0xFFFF_0000, pa, .{});
+ pi.mem.put_u32(@ptrFromInt(0xFFFF_0FFC), 2);
+ }
+
return .{
- .alloc = arena_alloc,
+ .heap_alloc = heap_alloc,
+ .pt_alloc = pt_alloc,
.entrypoint = @intCast(header.entry),
.pages = pages,
.stack_pointer = stack_top,
+ .elf_header = header,
+ .header_location = header_location orelse unreachable,
};
}
-pub fn execute(self: *Program, pid: u24, args: [][]u8, env: [][]u8) noreturn {
+pub fn execute(self: *Program, pid: u24, args: []const []const u8, env: []const []const u8) !noreturn {
// TODO; args, env
- _ = args;
- _ = env;
self.pages.switch_into(pid);
+ // Temporarily store env/arg pointers for future reference
+ var arena: std.heap.ArenaAllocator = .init(self.pt_alloc);
+ defer arena.deinit();
+
+ const alloc = arena.allocator();
+
+ var sp = self.stack_pointer;
+ // Setup what a Linux ELF expects
+ // https://articles.manugarg.com/aboutelfauxiliaryvectors
+ // 0. Null Marker
+ // 1. Environment Strings
+ // 2. Argument Strings
+ // 3. Padding
+ // 4. Aux
+ // 5. Environment
+ // 6. Arguments
+ // 7. Argc
+
+ uart.print("initial sp: {X}\n", .{sp});
+
+ sp = align_down(sp, 8);
+ uart.print("initial alignment: {X}\n", .{sp});
+
+ // https://xkcd.com/221/
+ const at_random_ptr = push_stack_bytes(sp, &[_]u8{4} ** 16);
+ sp = at_random_ptr;
+ uart.print("at_random ptr: {X}\n", .{sp});
+
+ const env_ptrs = try alloc.alloc(u32, env.len);
+ for (env, 0..) |e, idx| {
+ sp = push_cstring(sp, e);
+ env_ptrs[idx] = sp;
+ }
+ uart.print("env ptrs: {X}\n", .{sp});
+
+ const arg_ptrs = try alloc.alloc(u32, args.len);
+ for (args, 0..) |a, idx| {
+ sp = push_cstring(sp, a);
+ arg_ptrs[idx] = sp;
+ }
+ uart.print("arg ptrs: {X}\n", .{sp});
+
+ sp = align_down(sp, 8);
+
+ // AT_NULL
+ sp = push_u32(sp, 0);
+ sp = push_u32(sp, @intFromEnum(AuxTag.AT_NULL));
+ uart.print("at_null sp: {X}\n", .{sp});
+
+ // AT_PHDR
+ sp = push_u32(sp, self.header_location);
+ sp = push_u32(sp, @intFromEnum(AuxTag.AT_PHDR));
+ uart.print("at_phdr sp: {X}\n", .{sp});
+
+ // AT_PHENT
+ sp = push_u32(sp, self.elf_header.phentsize);
+ sp = push_u32(sp, @intFromEnum(AuxTag.AT_PHENT));
+ uart.print("at_phent sp: {X}\n", .{sp});
+
+ // AT_PHNUM
+ sp = push_u32(sp, self.elf_header.phnum);
+ sp = push_u32(sp, @intFromEnum(AuxTag.AT_PHNUM));
+ uart.print("at_phnum sp: {X}\n", .{sp});
+
+ // AT_PAGESZ
+ sp = push_u32(sp, memory.KB4);
+ sp = push_u32(sp, @intFromEnum(AuxTag.AT_PAGESZ));
+ uart.print("at_pagesz sp: {X}\n", .{sp});
+
+ // AT_ENTRY
+ sp = push_u32(sp, self.entrypoint);
+ sp = push_u32(sp, @intFromEnum(AuxTag.AT_ENTRY));
+ uart.print("at_entry sp: {X}\n", .{sp});
+
+ // AT_UID
+ // TODO; let user specify UID
+ sp = push_u32(sp, 0);
+ sp = push_u32(sp, @intFromEnum(AuxTag.AT_UID));
+ uart.print("at_uid sp: {X}\n", .{sp});
+
+ // AT_EUID
+ // TODO; let user specify EUID
+ sp = push_u32(sp, 0);
+ sp = push_u32(sp, @intFromEnum(AuxTag.AT_EUID));
+ uart.print("at_euid sp: {X}\n", .{sp});
+
+ // AT_GID
+ // TODO; let user specify GID
+ sp = push_u32(sp, 0);
+ sp = push_u32(sp, @intFromEnum(AuxTag.AT_GID));
+ uart.print("at_gid sp: {X}\n", .{sp});
+
+ // AT_EGID
+ // TODO; let user specify EUID
+ sp = push_u32(sp, 0);
+ sp = push_u32(sp, @intFromEnum(AuxTag.AT_EGID));
+ uart.print("at_egid sp: {X}\n", .{sp});
+
+ // AT_RANDOM
+ sp = push_u32(sp, at_random_ptr);
+ sp = push_u32(sp, @intFromEnum(AuxTag.AT_RANDOM));
+ uart.print("at_random sp: {X}\n", .{sp});
+
+ // AT_EXECFN
+ if (args.len != 0) {
+ sp = push_u32(sp, arg_ptrs[0]);
+ sp = push_u32(sp, @intFromEnum(AuxTag.AT_EXECFN));
+ }
+ uart.print("at_execfn sp: {X}\n", .{sp});
+
+ // env_p
+ sp = push_u32(sp, 0);
+ {
+ var i = env_ptrs.len;
+ while (i > 0) : (i -= 1) sp = push_u32(sp, env_ptrs[i - 1]);
+ }
+ uart.print("envp sp: {X}\n", .{sp});
+
+ // arg_v
+ sp = push_u32(sp, 0);
+ {
+ var i = arg_ptrs.len;
+ while (i > 0) : (i -= 1) sp = push_u32(sp, arg_ptrs[i - 1]);
+ }
+ uart.print("argv sp: {X}\n", .{sp});
+
+ // arg_c
+ sp = push_u32(sp, args.len);
+ uart.print("argc sp: {X}\n", .{sp});
+
+ // sp = align_down(sp, 8);
+ // uart.print("final sp: {X}\n", .{sp});
+
+ var it = sp;
+ while (it < self.stack_pointer) : (it += 4) {
+ uart.print("0x{X:8>0}: {X}\n", .{ it, pi.mem.get_u32(@ptrFromInt(it)) });
+ }
+
var psr = pi.PSR.get_c();
psr.mode = .User;
psr.i = false;
@@ -113,11 +287,18 @@ pub fn execute(self: *Program, pid: u24, args: [][]u8, env: [][]u8) noreturn {
.gp = .{0} ** 13,
.lr = self.entrypoint,
.pc = self.entrypoint,
- .sp = self.stack_pointer,
+ .sp = sp,
.psr = psr,
};
- pi.switching.restore_state_user(®isters);
+ uart.print("switching into!\n", .{});
+ uart.print("VA(0x{X}) -> PA(0x{X})\n", .{ self.entrypoint, memory.translate(self.entrypoint) catch 0 });
+
+ for (0..16) |idx| {
+ uart.print("{X:8>0}\n", .{pi.mem.get_u32(@ptrFromInt(self.entrypoint + idx * 4))});
+ }
+
+ pi.switching.restore_state_privileged(®isters);
}
// std.elf fix
@@ -156,3 +337,59 @@ fn takePhdr(reader: *std.io.Reader, elf_header: std.elf.Header) !?std.elf.Elf64_
.p_align = phdr.p_align,
};
}
+
+fn align_down(sp: u32, alignment: u8) u32 {
+ return std.mem.alignBackward(u32, sp, alignment);
+}
+
+fn push_stack_bytes(sp: u32, bytes: []const u8) u32 {
+ var new_sp = sp;
+ new_sp -= bytes.len;
+ const dst: [*]u8 = @ptrFromInt(new_sp);
+ @memcpy(dst[0..bytes.len], bytes);
+ return new_sp;
+}
+
+fn push_u32(sp: u32, v: u32) u32 {
+ var buffer: [4]u8 = undefined;
+ std.mem.writeInt(u32, &buffer, v, .little);
+ return push_stack_bytes(sp, &buffer);
+}
+
+fn push_cstring(sp: u32, s: []const u8) u32 {
+ const null_byte: [1]u8 = .{0};
+ const new_sp = push_stack_bytes(sp, &null_byte);
+ return push_stack_bytes(new_sp, s);
+}
+
+// https://github.com/torvalds/linux/blob/master/include/uapi/linux/auxvec.h
+pub const AuxTag = enum(u32) {
+ AT_NULL = 0,
+ AT_IGNORE = 1,
+ AT_EXEC_FD = 2,
+ AT_PHDR = 3,
+ AT_PHENT = 4,
+ AT_PHNUM = 5,
+ AT_PAGESZ = 6,
+ AT_BASE = 7,
+ AT_FLAGS = 8,
+ AT_ENTRY = 9,
+ AT_NOTELF = 10,
+ AT_UID = 11,
+ AT_EUID = 12,
+ AT_GID = 13,
+ AT_EGID = 14,
+ AT_PLATFORM = 15,
+ AT_HWCAP = 16,
+ AT_CLKTCK = 17,
+ AT_SECURE = 23,
+ AT_BASE_PLATFORM = 24,
+ AT_RANDOM = 25,
+ AT_HWCAP2 = 26,
+ AT_RSEQ_FEATURE_SIZE = 27,
+ AT_RSEQ_ALIGN = 28,
+ AT_HWCAP3 = 29,
+ AT_HWCAP4 = 30,
+ AT_EXECFN = 31,
+ AT_MINSIGSTKSZ = 51,
+};
diff --git a/sylveos/memory.zig b/sylveos/memory.zig
@@ -49,13 +49,20 @@ pub const Region = struct {
return Region.init(program_start, program_end);
}
- pub fn page_table() Region {
+ pub fn os_page_table() Region {
const page_table_start = translate(@intFromPtr(&__page_table_start__)) catch 0;
const page_table_end = translate(@intFromPtr(&__page_table_end__)) catch 0;
return Region.init(page_table_start, page_table_end);
}
+ pub fn process_page_table() Region {
+ const page_table_start = os_page_table().start - MB;
+ const page_table_end = os_page_table().start;
+
+ return Region.init(page_table_start, page_table_end);
+ }
+
pub fn stack() Region {
const stack_start = @intFromPtr(&__stack_start__);
const stack_end = @intFromPtr(&__stack_end__);
@@ -90,7 +97,7 @@ pub const Region = struct {
pub fn heap() Region {
const heap_start = std.mem.alignBackward(u32, @intFromPtr(&__heap_start__), MB);
- const heap_end = std.mem.alignBackward(u32, memory().end, MB);
+ const heap_end = std.mem.alignBackward(u32, memory().end - process_page_table().size, MB);
return Region.init(heap_start, heap_end);
}
@@ -117,7 +124,7 @@ pub const Region = struct {
pub fn set_memory(size: u32) void {
MEMORY_START = 0;
- MEMORY_END = size - (program().end - page_table().start);
+ MEMORY_END = size - (program().end - os_page_table().start);
}
};
@@ -154,12 +161,14 @@ pub fn print_regions_virtual() void {
const int_stack = Region.interrupt_stack();
const heap = Region.heap();
const io = Region.io();
- const pt = Region.page_table();
+ const os_pt = Region.os_page_table();
+ const p_pt = Region.process_page_table();
uart.print(
\\ Virtual Memory Layout:
\\ HEAP: 0x{X:0>8} - 0x{X:0>8} ({Bi})
- \\ PAGE TABLE: 0x{X:0>8} - 0x{X:0>8} ({Bi})
+ \\ PROCESS PAGE TABLE: 0x{X:0>8} - 0x{X:0>8} ({Bi})
+ \\ OS ROOT PAGE TABLE: 0x{X:0>8} - 0x{X:0>8} ({Bi})
\\ BCM2835 IO: 0x{X:0>8} - 0x{X:0>8} ({Bi})
\\ KERNEL STACK: 0x{X:0>8} - 0x{X:0>8} ({Bi})
\\KERNEL INTERRUPT STACK: 0x{X:0>8} - 0x{X:0>8} ({Bi})
@@ -168,7 +177,8 @@ pub fn print_regions_virtual() void {
\\
, .{
heap.start, heap.end, heap.size,
- pt.start, pt.end, pt.size,
+ p_pt.start, p_pt.end, p_pt.size,
+ os_pt.start, os_pt.end, os_pt.size,
io.start, io.end, io.size,
stack.start, stack.end, stack.size,
int_stack.start, int_stack.end, int_stack.size,
@@ -176,16 +186,26 @@ pub fn print_regions_virtual() void {
});
}
-pub fn get_allocator() std.mem.Allocator {
+pub fn get_allocator() std.heap.FixedBufferAllocator {
const heap = Region.heap();
- var fba: std.heap.FixedBufferAllocator = .init(@ptrCast(heap.raw[1..heap.size]));
+ const fba: std.heap.FixedBufferAllocator = .init(@ptrCast(heap.raw[MB..heap.size]));
+ uart.print("Main alloc on 0x{X} (size: {Bi})\n", .{ @intFromPtr(fba.buffer.ptr), heap.size - MB });
+
+ return fba;
+}
+
+pub fn get_pt_allocator() std.heap.FixedBufferAllocator {
+ const p_pt = Region.process_page_table();
+
+ const fba: std.heap.FixedBufferAllocator = .init(@ptrCast(p_pt.raw[0..p_pt.size]));
+ uart.print("PT alloc on 0x{X} (size: {Bi})\n", .{ @intFromPtr(fba.buffer.ptr), p_pt.size });
- return fba.allocator();
+ return fba;
}
pub fn get_page_table() []mmu.FirstLevelDescriptor {
- var page_table_raw: [*]mmu.FirstLevelDescriptor = @ptrFromInt(Region.page_table().start);
+ var page_table_raw: [*]mmu.FirstLevelDescriptor = @ptrFromInt(Region.os_page_table().start);
return page_table_raw[0..4096];
}
@@ -199,15 +219,17 @@ pub fn translate(va: u32) !u32 {
return physical_base | offset;
}
+// TODO; these two cause... issues (can't read ptr)
+// see if sentential fixes it
pub fn request_1mb(alloc: std.mem.Allocator) !u32 {
- const slice = try alloc.alignedAlloc(u8, std.mem.Alignment.fromByteUnits(MB), MB);
- return @intFromPtr(slice.ptr);
+ const page = alloc.rawAlloc(MB, std.mem.Alignment.fromByteUnits(MB), @returnAddress()) orelse return std.mem.Allocator.Error.OutOfMemory;
+ return @intFromPtr(page);
}
pub fn request_64kb(alloc: std.mem.Allocator) !u32 {
- const slice = try alloc.alignedAlloc(u8, std.mem.Alignment.fromByteUnits(KB64), KB64);
- return @intFromPtr(slice.ptr);
+ const page = alloc.rawAlloc(KB64, std.mem.Alignment.fromByteUnits(KB64), @returnAddress()) orelse return std.mem.Allocator.Error.OutOfMemory;
+ return @intFromPtr(page);
}
pub fn request_4kb(alloc: std.mem.Allocator) !u32 {
- const slice = try alloc.alignedAlloc(u8, std.mem.Alignment.fromByteUnits(KB4), KB4);
- return @intFromPtr(slice.ptr);
+ const page = alloc.rawAlloc(KB4, std.mem.Alignment.fromByteUnits(KB4), @returnAddress()) orelse return std.mem.Allocator.Error.OutOfMemory;
+ return @intFromPtr(page);
}
diff --git a/sylveos/pages.zig b/sylveos/pages.zig
@@ -7,19 +7,9 @@ const mmu = pi.mmu;
const pt = pi.pt;
const SECOND_LEVEL_FAULT: mmu.SecondLevelDescriptor = .{ .ty = .Fault, .descriptor = .{ .fault = .{} } };
-const SectionType = enum {
- Large,
- Small,
-};
-const LargePageSection = struct {
- node: std.SinglyLinkedList.Node,
- ty: SectionType,
- descriptor: [16]mmu.SecondLevelDescriptor align(1 << 10),
-};
-const SmallPageSection = struct {
+const CoarseTable = struct {
node: std.SinglyLinkedList.Node,
- ty: SectionType,
- descriptor: [1024]mmu.SecondLevelDescriptor align(1 << 10),
+ descriptor: [256]mmu.SecondLevelDescriptor align(1 << 10),
};
alloc: std.mem.Allocator,
@@ -69,8 +59,25 @@ fn va_to_index(va: u32) usize {
return @intCast(va >> 20);
}
+fn get_descriptor(self: *Self, current: *mmu.FirstLevelDescriptor, domain: u4) !*[256]mmu.SecondLevelDescriptor {
+ if (current.ty == .Section) return Error.TossingSection;
+ if (current.ty == .Fault) {
+ var coarse_table = try self.alloc.create(CoarseTable);
+ self.pages.prepend(&coarse_table.node);
+ @memset(std.mem.asBytes(&coarse_table.descriptor), 0);
+
+ current.ty = .CoarsePageTable;
+ current.descriptor = .{ .coarse = .{
+ .domain = domain,
+ .coarse_base_address = @truncate(@intFromPtr(&coarse_table.*.descriptor) >> 10),
+ } };
+ }
+
+ return @ptrFromInt(@as(u32, current.descriptor.coarse.coarse_base_address) << 10);
+}
+
pub fn map_1mb(self: *Self, va: u32, pa: u32, attr: Attributes) !void {
- if (std.mem.isAligned(pa, memory.MB)) return Error.Unaligned;
+ if (!std.mem.isAligned(pa, memory.MB) or !std.mem.isAligned(va, memory.MB)) return Error.Unaligned;
// Whole section, yippee
// Just check we aren't chucking a coarse page
@@ -99,82 +106,42 @@ pub fn map_1mb(self: *Self, va: u32, pa: u32, attr: Attributes) !void {
}
// Assume user isn't stupid and tries to map 4kb then 64kb
pub fn map_64kb(self: *Self, va: u32, pa: u32, attr: Attributes) !void {
- if (std.mem.isAligned(pa, memory.KB64)) return Error.Unaligned;
+ if (!std.mem.isAligned(pa, memory.KB64) or !std.mem.isAligned(va, memory.KB64)) return Error.Unaligned;
const idx = va_to_index(va);
- var current = &self.sectors[idx];
-
- if (current.ty == .Section) return Error.TossingSection;
+ const descriptor = try self.get_descriptor(&self.sectors[idx], attr.domain);
- if (current.ty == .Fault) {
- // Need to alloc LargePageSection
- var section = try self.alloc.create(LargePageSection);
- self.pages.prepend(§ion.node);
+ const base_entry = ((va >> 12) & 0xFF) & ~@as(usize, 0xF);
+ const mem_attr = attr.encoding.manual;
- current.ty = .CoarsePageTable;
- current.descriptor = .{ .coarse = .{
- .domain = attr.domain,
- .coarse_base_address = @truncate(@intFromPtr(§ion.descriptor)),
- } };
+ for (0..16) |i| {
+ descriptor[base_entry + i].ty = .LargePage;
+ descriptor[base_entry + i].descriptor.large_page = .{
+ .not_global = if (attr.scope == .Global) false else true,
+ .ap = attr.access_permission,
+ .apx = attr.access_permission_x,
+ .b = mem_attr.b,
+ .c = mem_attr.c,
+ .tex = mem_attr.tex,
+ .never_execute = attr.never_execute,
+ .shared = attr.shared,
+ .base_address = @truncate(pa >> 16),
+ };
}
- var descriptor: *[16]mmu.SecondLevelDescriptor align(1 << 10) = @ptrFromInt(@as(u32, current.descriptor.coarse.coarse_base_address) << 10);
- const sector: *LargePageSection = @alignCast(@fieldParentPtr("descriptor", descriptor));
- if (sector.ty != .Large) return Error.Map64To4;
-
- // Get lower 16 bits
- const page_idx: u4 = @truncate(va & 0xFFFF);
- const mem_attr = attr.encoding.manual;
-
- descriptor[page_idx].ty = .LargePage;
- descriptor[page_idx].descriptor.large_page = .{
- .not_global = if (attr.scope == .Global) false else true,
- .ap = attr.access_permission,
- .apx = attr.access_permission_x,
- .b = mem_attr.b,
- .c = mem_attr.c,
- .tex = mem_attr.tex,
- .never_execute = attr.never_execute,
- .shared = attr.shared,
- .base_address = @truncate(pa >> 16),
- };
// TODO; only do 1 line
mmu.sync_pte();
}
pub fn map_4kb(self: *Self, va: u32, pa: u32, attr: Attributes) !void {
- if (std.mem.isAligned(pa, memory.KB4)) return Error.Unaligned;
+ if (!std.mem.isAligned(pa, memory.KB4) or !std.mem.isAligned(va, memory.KB4)) return Error.Unaligned;
const idx = va_to_index(va);
- var current = &self.sectors[idx];
+ const descriptor = try self.get_descriptor(&self.sectors[idx], attr.domain);
- if (current.ty == .Section) return Error.TossingSection;
-
- if (current.ty == .Fault) {
- // Need to alloc SmallPageSection
- var section = try self.alloc.create(SmallPageSection);
- self.pages.prepend(§ion.node);
-
- current.ty = .CoarsePageTable;
- current.descriptor = .{ .coarse = .{
- .domain = attr.domain,
- .coarse_base_address = @truncate(@intFromPtr(§ion.descriptor)),
- } };
- }
-
- var descriptor: *[1024]mmu.SecondLevelDescriptor align(1 << 10) = @ptrFromInt(@as(u32, current.descriptor.coarse.coarse_base_address) << 10);
- const sector: *SmallPageSection = @alignCast(@fieldParentPtr("descriptor", descriptor));
- if (sector.ty != .Small) return Error.Map4To64;
-
- // Get lower 16 bits
- const page_idx: u10 = @truncate(va & 0xFFFF);
+ const page_idx = (va >> 12) & 0xFF;
const mem_attr = attr.encoding.manual;
- if (attr.never_execute) {
- descriptor[page_idx].ty = .SmallPageNeverExecute;
- } else {
- descriptor[page_idx].ty = .SmallPageExecutable;
- }
-
+ descriptor[page_idx].ty = if (attr.never_execute) .SmallPageNeverExecute else .SmallPageExecutable;
descriptor[page_idx].descriptor.small_page = .{
.not_global = if (attr.scope == .Global) false else true,
.ap = attr.access_permission,
@@ -183,7 +150,7 @@ pub fn map_4kb(self: *Self, va: u32, pa: u32, attr: Attributes) !void {
.c = mem_attr.c,
.tex = mem_attr.tex,
.shared = attr.shared,
- .base_address = @truncate(pa >> 16),
+ .base_address = @truncate(pa >> 12),
};
// TODO; only do 1 line
mmu.sync_pte();
diff --git a/sylveos/root.zig b/sylveos/root.zig
@@ -1,8 +1,9 @@
+const std = @import("std");
const pi = @import("pi");
const memory = @import("./memory.zig");
const loader = @import("./loader.zig");
-const lua_binary = @embedFile("./lua");
+const hello_binary = @embedFile("./hello");
const uart = pi.devices.mini_uart;
const interrupts = pi.interrupts;
@@ -33,6 +34,19 @@ pub export fn _start() linksection(".kmain") callconv(.naked) noreturn {
);
}
+fn panic_handler(msg: []const u8, trace_addr: ?usize) noreturn {
+ @branchHint(.cold);
+
+ if (uart.is_initialized()) {
+ uart.print("kernel panic: {s} @ 0x{X}\n", .{ msg, trace_addr orelse 0 });
+ uart.flush();
+ }
+
+ abort();
+}
+
+pub const panic = std.debug.FullPanic(panic_handler);
+
// figure out MMU issues
fn abort_handler(regs: interrupts.Registers, _: interrupts.ExceptionVector) void {
const far = pi.faults.FAR.get();
@@ -54,6 +68,8 @@ export fn kmain(memory_size: u32) void {
pi.pt.remove(pt, 0x0010_0000) catch {};
pi.pt.remove(pt, 0x0020_0000) catch {};
+ Region.process_page_table().map_identity(pt, pi.procmap.READ_WRITE_ATTR) catch {};
+
// Keep interrupt stack on top of normal stack
const base_address = Region.program().start;
@@ -84,14 +100,22 @@ export fn abort() noreturn {
fn main() !void {
uart.print("Hello World from VM!\n", .{});
- const alloc = memory.get_allocator();
+ var heap_fba = memory.get_allocator();
+ const heap_alloc = heap_fba.allocator();
+
+ var pt_fba = memory.get_pt_allocator();
+ const pt_alloc = pt_fba.allocator();
+
const pt = memory.get_page_table();
- var lua = try loader.init(alloc, pt, lua_binary);
+ // Just create a 1mb allocation we'll identity map
+ uart.print("page_table @ 0x{X}\n", .{@intFromPtr(pt.ptr)});
+
+ var empty = try loader.init(pt_alloc, heap_alloc, pt, hello_binary);
uart.print(
\\Stack Pointer: {X:0>8}
\\ Entrypoint: {X:0>8}
\\
- , .{ lua.stack_pointer, lua.entrypoint });
- loader.execute(&lua, 0, &[_][]u8{}, &[_][]u8{});
+ , .{ empty.stack_pointer, empty.entrypoint });
+ try loader.execute(&empty, 0, &[_][]const u8{"hello"}, &[_][]u8{});
}