sylveos

Toy Operating System
Log | Files | Refs

pages.zig (5584B)


      1 const std = @import("std");
      2 const pi = @import("pi");
      3 
      4 const memory = @import("./memory.zig");
      5 
      6 const mmu = pi.mmu;
      7 const pt = pi.pt;
      8 
      9 const SECOND_LEVEL_FAULT: mmu.SecondLevelDescriptor = .{ .ty = .Fault, .descriptor = .{ .fault = .{} } };
     10 const CoarseTable = struct {
     11     node: std.SinglyLinkedList.Node,
     12     descriptor: [256]mmu.SecondLevelDescriptor align(1 << 10),
     13 };
     14 
     15 alloc: std.mem.Allocator,
     16 root: []mmu.FirstLevelDescriptor,
     17 sectors: []mmu.FirstLevelDescriptor,
     18 // TODO; this should be a linked list as they don't need to be contiguous
     19 pages: std.SinglyLinkedList,
     20 asid: u8,
     21 
     22 old_context: mmu.ContextId = .{},
     23 
     24 pub const Error = error{
     25     InvalidSection,
     26     InvalidLargePage,
     27     InvalidSmallPage,
     28     TossingCoarsePage,
     29     TossingSection,
     30     Map64To4,
     31     Map4To64,
     32     Unaligned,
     33 } || std.mem.Allocator.Error;
     34 
     35 // XKCD 927
     36 pub const Attributes = struct {
     37     encoding: mmu.LockdownAttributesRegister.PageTableEncoding = .{ .preset = .StronglyOrdered },
     38     access_permission: mmu.AccessPermissions = .UserReadWrite,
     39     access_permission_x: mmu.AccessPermissionsExtended = .SupervisorRW,
     40     scope: mmu.LockdownVA.Scope = .Global,
     41     never_execute: bool = false,
     42     shared: bool = false,
     43     domain: u4 = 1,
     44 };
     45 
     46 const Self = @This();
     47 
     48 pub fn fork(alloc: std.mem.Allocator, root: []mmu.FirstLevelDescriptor, asid: u8) !Self {
     49     return .{
     50         .sectors = try pt.dupe(alloc, root),
     51         .pages = .{},
     52         .alloc = alloc,
     53         .root = root,
     54         .asid = asid,
     55     };
     56 }
     57 
     58 fn va_to_index(va: u32) usize {
     59     return @intCast(va >> 20);
     60 }
     61 
     62 fn get_descriptor(self: *Self, current: *mmu.FirstLevelDescriptor, domain: u4) !*[256]mmu.SecondLevelDescriptor {
     63     if (current.ty == .Section) return Error.TossingSection;
     64     if (current.ty == .Fault) {
     65         var coarse_table = try self.alloc.create(CoarseTable);
     66         self.pages.prepend(&coarse_table.node);
     67         @memset(std.mem.asBytes(&coarse_table.descriptor), 0);
     68 
     69         current.ty = .CoarsePageTable;
     70         current.descriptor = .{ .coarse = .{
     71             .domain = domain,
     72             .coarse_base_address = @truncate(@intFromPtr(&coarse_table.*.descriptor) >> 10),
     73         } };
     74     }
     75 
     76     return @ptrFromInt(@as(u32, current.descriptor.coarse.coarse_base_address) << 10);
     77 }
     78 
     79 pub fn map_1mb(self: *Self, va: u32, pa: u32, attr: Attributes) !void {
     80     if (!std.mem.isAligned(pa, memory.MB) or !std.mem.isAligned(va, memory.MB)) return Error.Unaligned;
     81 
     82     // Whole section, yippee
     83     // Just check we aren't chucking a coarse page
     84     const idx = va_to_index(va);
     85     var current = &self.sectors[idx];
     86 
     87     if (current.ty == .CoarsePageTable) return Error.TossingCoarsePage;
     88 
     89     const mem_attr = attr.encoding.manual;
     90 
     91     current.ty = .Section;
     92     current.descriptor = .{ .section = .{
     93         .not_global = if (attr.scope == .Global) false else true,
     94         .ap = attr.access_permission,
     95         .apx = attr.access_permission_x,
     96         .b = mem_attr.b,
     97         .c = mem_attr.c,
     98         .tex = mem_attr.tex,
     99         .domain = attr.domain,
    100         .never_execute = attr.never_execute,
    101         .shared = attr.shared,
    102         .section_base_address = @truncate(pa >> 20),
    103     } };
    104     // TODO; only do 1 line
    105     mmu.sync_pte();
    106 }
    107 // Assume user isn't stupid and tries to map 4kb then 64kb
    108 pub fn map_64kb(self: *Self, va: u32, pa: u32, attr: Attributes) !void {
    109     if (!std.mem.isAligned(pa, memory.KB64) or !std.mem.isAligned(va, memory.KB64)) return Error.Unaligned;
    110 
    111     const idx = va_to_index(va);
    112     const descriptor = try self.get_descriptor(&self.sectors[idx], attr.domain);
    113 
    114     const base_entry = ((va >> 12) & 0xFF) & ~@as(usize, 0xF);
    115     const mem_attr = attr.encoding.manual;
    116 
    117     for (0..16) |i| {
    118         descriptor[base_entry + i].ty = .LargePage;
    119         descriptor[base_entry + i].descriptor.large_page = .{
    120             .not_global = if (attr.scope == .Global) false else true,
    121             .ap = attr.access_permission,
    122             .apx = attr.access_permission_x,
    123             .b = mem_attr.b,
    124             .c = mem_attr.c,
    125             .tex = mem_attr.tex,
    126             .never_execute = attr.never_execute,
    127             .shared = attr.shared,
    128             .base_address = @truncate(pa >> 16),
    129         };
    130     }
    131 
    132     // TODO; only do 1 line
    133     mmu.sync_pte();
    134 }
    135 pub fn map_4kb(self: *Self, va: u32, pa: u32, attr: Attributes) !void {
    136     if (!std.mem.isAligned(pa, memory.KB4) or !std.mem.isAligned(va, memory.KB4)) return Error.Unaligned;
    137 
    138     const idx = va_to_index(va);
    139     const descriptor = try self.get_descriptor(&self.sectors[idx], attr.domain);
    140 
    141     const page_idx = (va >> 12) & 0xFF;
    142     const mem_attr = attr.encoding.manual;
    143 
    144     descriptor[page_idx].ty = if (attr.never_execute) .SmallPageNeverExecute else .SmallPageExecutable;
    145     descriptor[page_idx].descriptor.small_page = .{
    146         .not_global = if (attr.scope == .Global) false else true,
    147         .ap = attr.access_permission,
    148         .apx = attr.access_permission_x,
    149         .b = mem_attr.b,
    150         .c = mem_attr.c,
    151         .tex = mem_attr.tex,
    152         .shared = attr.shared,
    153         .base_address = @truncate(pa >> 12),
    154     };
    155     // TODO; only do 1 line
    156     mmu.sync_pte();
    157 }
    158 
    159 pub fn switch_into(self: *Self, pid: u24) void {
    160     self.old_context = .get();
    161 
    162     mmu.set_context_ttbr0(.{ .asid = self.asid, .pid = pid }, .{
    163         .translation_table_base = @truncate(@intFromPtr(self.sectors.ptr) >> 5),
    164     });
    165 }
    166 
    167 pub fn switch_out_of(self: *const Self) void {
    168     mmu.set_context_ttbr0(self.old_context, .{
    169         .translation_table_base = @truncate(@intFromPtr(self.root.ptr) >> 5),
    170     });
    171 }