sylveos

Toy Operating System
Log | Files | Refs

memory.zig (7934B)


      1 const std = @import("std");
      2 const pi = @import("pi");
      3 
      4 const mailbox = pi.devices.mailbox;
      5 const uart = pi.devices.mini_uart;
      6 const pinned = pi.pinned;
      7 const mmu = pi.mmu;
      8 
      9 pub const MB: u32 = 1024 * 1024;
     10 pub const KB64: u32 = 1024 * 32;
     11 pub const KB4: u32 = 1024 * 4;
     12 
     13 extern const __program_start__: u32;
     14 extern const __program_end__: u32;
     15 extern const __page_table_start__: u32;
     16 extern const __page_table_end__: u32;
     17 extern const __stack_start__: u32;
     18 extern const __stack_end__: u32;
     19 extern const __int_stack_start__: u32;
     20 extern const __int_stack_end__: u32;
     21 extern const __heap_start__: u32;
     22 
     23 pub const Region = struct {
     24     var MEMORY_START: ?u32 = null;
     25     var MEMORY_END: ?u32 = null;
     26 
     27     start: u32,
     28     end: u32,
     29     size: u32,
     30     raw: []allowzero u8,
     31 
     32     pub fn init(start: u32, end: u32) Region {
     33         const size = end - start;
     34         var raw: [*]allowzero u8 = @ptrFromInt(start);
     35 
     36         return .{
     37             .start = start,
     38             .end = end,
     39             .size = size,
     40             .raw = raw[0..size],
     41         };
     42     }
     43 
     44     pub fn program() Region {
     45         // Program code is aligned to 1mb
     46         const program_start = std.mem.alignBackward(u32, @intFromPtr(&__program_start__), MB);
     47         const program_end = std.mem.alignForward(u32, @intFromPtr(&__program_end__), MB);
     48 
     49         return Region.init(program_start, program_end);
     50     }
     51 
     52     pub fn os_page_table() Region {
     53         const page_table_start = translate(@intFromPtr(&__page_table_start__)) catch 0;
     54         const page_table_end = translate(@intFromPtr(&__page_table_end__)) catch 0;
     55 
     56         return Region.init(page_table_start, page_table_end);
     57     }
     58 
     59     pub fn process_page_table() Region {
     60         const page_table_start = os_page_table().start - MB;
     61         const page_table_end = os_page_table().start;
     62 
     63         return Region.init(page_table_start, page_table_end);
     64     }
     65 
     66     pub fn stack() Region {
     67         const stack_start = @intFromPtr(&__stack_start__);
     68         const stack_end = @intFromPtr(&__stack_end__);
     69 
     70         return Region.init(stack_start, stack_end);
     71     }
     72 
     73     pub fn interrupt_stack() Region {
     74         const int_stack_start = @intFromPtr(&__int_stack_start__);
     75         const int_stack_end = @intFromPtr(&__int_stack_end__);
     76 
     77         return Region.init(int_stack_start, int_stack_end);
     78     }
     79 
     80     pub fn io() Region {
     81         const io_start = std.mem.alignBackward(u32, 0x2000_0000, MB);
     82         const io_end = std.mem.alignForward(u32, 0x20FF_FFFF, MB);
     83 
     84         return Region.init(io_start, io_end);
     85     }
     86 
     87     pub fn memory() Region {
     88         if (MEMORY_START == null) {
     89             const memory_info = mailbox.get_arm_memory_info() catch unreachable;
     90 
     91             MEMORY_START = memory_info.base_address;
     92             MEMORY_END = memory_info.base_address + memory_info.memory_size;
     93         }
     94 
     95         return Region.init(MEMORY_START orelse unreachable, MEMORY_END orelse unreachable);
     96     }
     97 
     98     pub fn heap() Region {
     99         const heap_start = std.mem.alignBackward(u32, @intFromPtr(&__heap_start__), MB);
    100         const heap_end = std.mem.alignBackward(u32, memory().end - process_page_table().size, MB);
    101 
    102         return Region.init(heap_start, heap_end);
    103     }
    104 
    105     pub fn unmap(self: *const Region, pt: []mmu.FirstLevelDescriptor) !void {
    106         const size = (self.end / MB) - (self.start / MB);
    107 
    108         for (0..size) |offset| {
    109             try pi.pt.remove(pt, self.start + offset * MB);
    110         }
    111     }
    112 
    113     pub fn map_to(self: *const Region, pt: []mmu.FirstLevelDescriptor, to: u32, attr: pinned.PinnedAttribute) !void {
    114         const size = (self.end / MB) - (self.start / MB);
    115 
    116         for (0..size) |offset| {
    117             _ = try pi.pt.set(pt, to + offset * MB, self.start + offset * MB, attr);
    118         }
    119     }
    120 
    121     pub fn map_identity(self: *const Region, pt: []mmu.FirstLevelDescriptor, attr: pinned.PinnedAttribute) !void {
    122         try self.map_to(pt, self.start, attr);
    123     }
    124 
    125     pub fn set_memory(size: u32) void {
    126         MEMORY_START = 0;
    127         MEMORY_END = size - (program().end - os_page_table().start);
    128     }
    129 };
    130 
    131 pub fn print_regions_physical() void {
    132     const memory = Region.memory();
    133     const program = Region.program();
    134     const stack = Region.stack();
    135     const int_stack = Region.interrupt_stack();
    136     const heap = Region.heap();
    137     const io = Region.io();
    138 
    139     uart.print(
    140         \\Physical Memory Layout: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    141         \\                KERNEL: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    142         \\          KERNEL STACK: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    143         \\KERNEL INTERRUPT STACK: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    144         \\                  HEAP: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    145         \\            BCM2835 IO: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    146         \\
    147         \\
    148     , .{
    149         memory.start,    memory.end,    memory.size,
    150         program.start,   program.end,   program.size,
    151         stack.start,     stack.end,     stack.size,
    152         int_stack.start, int_stack.end, int_stack.size,
    153         heap.start,      heap.end,      heap.size,
    154         io.start,        io.end,        io.size,
    155     });
    156 }
    157 
    158 pub fn print_regions_virtual() void {
    159     const program = Region.program();
    160     const stack = Region.stack();
    161     const int_stack = Region.interrupt_stack();
    162     const heap = Region.heap();
    163     const io = Region.io();
    164     const os_pt = Region.os_page_table();
    165     const p_pt = Region.process_page_table();
    166 
    167     uart.print(
    168         \\ Virtual Memory Layout:
    169         \\                  HEAP: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    170         \\    PROCESS PAGE TABLE: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    171         \\    OS ROOT PAGE TABLE: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    172         \\            BCM2835 IO: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    173         \\          KERNEL STACK: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    174         \\KERNEL INTERRUPT STACK: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    175         \\                KERNEL: 0x{X:0>8} - 0x{X:0>8} ({Bi})
    176         \\
    177         \\
    178     , .{
    179         heap.start,      heap.end,      heap.size,
    180         p_pt.start,      p_pt.end,      p_pt.size,
    181         os_pt.start,     os_pt.end,     os_pt.size,
    182         io.start,        io.end,        io.size,
    183         stack.start,     stack.end,     stack.size,
    184         int_stack.start, int_stack.end, int_stack.size,
    185         program.start,   program.end,   program.size,
    186     });
    187 }
    188 
    189 pub fn get_allocator() std.heap.FixedBufferAllocator {
    190     const heap = Region.heap();
    191 
    192     const fba: std.heap.FixedBufferAllocator = .init(@ptrCast(heap.raw[MB..heap.size]));
    193 
    194     return fba;
    195 }
    196 
    197 pub fn get_pt_allocator() std.heap.FixedBufferAllocator {
    198     const p_pt = Region.process_page_table();
    199 
    200     const fba: std.heap.FixedBufferAllocator = .init(@ptrCast(p_pt.raw[0..p_pt.size]));
    201 
    202     return fba;
    203 }
    204 
    205 pub fn get_page_table() []mmu.FirstLevelDescriptor {
    206     var page_table_raw: [*]mmu.FirstLevelDescriptor = @ptrFromInt(Region.os_page_table().start);
    207     return page_table_raw[0..4096];
    208 }
    209 
    210 pub fn translate(va: u32) !u32 {
    211     const res = mmu.va_translation_cw(va, .PrivilegedRead);
    212     if (res.aborted) return error.FailedTranslation;
    213 
    214     const physical_base = @as(u32, res.inner.success.address) << 10;
    215     const offset = va & 0xFFF;
    216 
    217     return physical_base | offset;
    218 }
    219 
    220 // TODO; these two cause... issues (can't read ptr)
    221 //       see if sentential fixes it
    222 pub fn request_1mb(alloc: std.mem.Allocator) !u32 {
    223     const page = alloc.rawAlloc(MB, std.mem.Alignment.fromByteUnits(MB), @returnAddress()) orelse return std.mem.Allocator.Error.OutOfMemory;
    224     return @intFromPtr(page);
    225 }
    226 pub fn request_64kb(alloc: std.mem.Allocator) !u32 {
    227     const page = alloc.rawAlloc(KB64, std.mem.Alignment.fromByteUnits(KB64), @returnAddress()) orelse return std.mem.Allocator.Error.OutOfMemory;
    228     return @intFromPtr(page);
    229 }
    230 pub fn request_4kb(alloc: std.mem.Allocator) !u32 {
    231     const page = alloc.rawAlloc(KB4, std.mem.Alignment.fromByteUnits(KB4), @returnAddress()) orelse return std.mem.Allocator.Error.OutOfMemory;
    232     return @intFromPtr(page);
    233 }