thread.zig (3372B)
1 const std = @import("std"); 2 3 const interrupts = @import("./interrupts.zig"); 4 const switching = @import("./switching.zig"); 5 const PSR = @import("./psr.zig").PSR; 6 const mem = @import("./mem.zig"); 7 8 var tid_counter: usize = 1; 9 var ready: bool = false; 10 var threads: std.DoublyLinkedList = .{}; 11 var scheduler_thread: *Thread = undefined; 12 var allocator: std.mem.Allocator = undefined; 13 14 pub const Thread = struct { 15 tid: usize, 16 node: std.DoublyLinkedList.Node, 17 18 // Store them all 19 registers: interrupts.Registers, 20 stack: [1024]usize align(8), 21 }; 22 23 pub fn init(alloc: std.mem.Allocator) !void { 24 allocator = alloc; 25 } 26 27 pub fn destroy() void { 28 var thread_node = threads.first; 29 30 while (thread_node) |n| { 31 thread_node = n.next; 32 allocator.destroy(get_current()); 33 } 34 } 35 36 pub fn fork(f: *const fn (?*anyopaque) callconv(.c) void, arg: ?*anyopaque) !void { 37 var thread = try allocator.create(Thread); 38 39 thread.tid = tid_counter; 40 tid_counter += 1; 41 42 const stack_top = @intFromPtr(&thread.stack) + thread.stack.len; 43 var registers: interrupts.Registers = .{ 44 .gp = .{0} ** 13, 45 .sp = stack_top, 46 // trampoline will handle exit (hopefully) 47 .lr = 0, 48 .pc = @intFromPtr(&thread_trampoline), 49 .psr = PSR.get_c(), 50 }; 51 52 registers.gp[0] = @intFromPtr(f); 53 if (arg) |a| { 54 registers.gp[1] = @intFromPtr(a); 55 } 56 57 thread.registers = registers; 58 59 threads.append(&thread.node); 60 } 61 62 fn get_current() *Thread { 63 return @alignCast(@fieldParentPtr("node", threads.first.?)); 64 } 65 66 pub fn join() !void { 67 if (threads.first == null) { 68 // Nothing to do 69 return; 70 } 71 72 var thread = try allocator.create(Thread); 73 thread.tid = 0; 74 75 scheduler_thread = thread; 76 77 @import("devices/mini-uart.zig").print("switching to PC=0x{X}\n", .{get_current().registers.pc}); 78 79 switching.switch_state(&thread.registers, &get_current().registers); 80 81 ready = false; 82 } 83 84 // Comes in from exception 85 pub fn preempt(regs: *const interrupts.Registers) void { 86 // Haven't joined yet 87 if (!ready) return; 88 89 // Single task 90 if (threads.first == threads.last) { 91 return; 92 } 93 94 if (threads.first == null) { 95 ready = false; 96 97 return; 98 } 99 100 const node_task = threads.popFirst().?; 101 threads.append(node_task); 102 103 const current: *Thread = @alignCast(@fieldParentPtr("node", node_task)); 104 current.registers = regs.*; 105 106 // Switch to new thread 107 switching.restore_state(&get_current().registers); 108 } 109 110 pub fn get_tid() usize { 111 return get_current().tid; 112 } 113 114 pub fn is_ready() bool { 115 return ready; 116 } 117 118 fn thread_trampoline(func: *const fn (?*anyopaque) callconv(.c) void, arg: ?*anyopaque) callconv(.c) noreturn { 119 // Won't release as next's CPSR will already reenable 120 // _ = mem.enter_critical_section(); 121 @import("devices/mini-uart.zig").print("in trampoline, running PC=0x{X}\n", .{@intFromPtr(func)}); 122 123 ready = true; 124 125 func(arg); 126 127 const current: *Thread = @alignCast(@fieldParentPtr("node", threads.popFirst().?)); 128 allocator.destroy(current); 129 130 if (threads.first) |node_task| { 131 const next: *Thread = @alignCast(@fieldParentPtr("node", node_task)); 132 switching.restore_state(&next.registers); 133 } else { 134 switching.restore_state(&scheduler_thread.registers); 135 } 136 }