Microkernel thing OS experiment (Zig ⚡)
1const limine = @import("limine");
2const std = @import("std");
3const arch = @import("root.zig");
4const common = @import("common");
5const console = @import("console");
6const log = std.log.scoped(.amd64_init);
7const StandardGdt = arch.structures.gdt.StandardGdt;
8const Tss = arch.structures.tss.Tss;
9
10pub const limine_requests = struct {
11 export var start_marker: limine.RequestsStartMarker linksection(".limine_reqs_start") = .{};
12 export var end_marker: limine.RequestsEndMarker linksection(".limine_reqs_end") = .{};
13
14 pub export var base_revision: limine.BaseRevision linksection(".limine_reqs") = .{ .revision = 3 };
15 pub export var framebuffer: limine.FramebufferRequest linksection(".limine_reqs") = .{};
16 pub export var hhdm: limine.HhdmRequest linksection(".limine_reqs") = .{};
17 pub export var memmap: limine.MemoryMapRequest linksection(".limine_reqs") = .{};
18 pub export var rsdp_req: limine.RsdpRequest linksection(".limine_reqs") = .{};
19 pub export var dtb_req: limine.DtbRequest linksection(".limine_reqs") = .{};
20 pub export var modules: limine.ModuleRequest linksection(".limine_reqs") = .{};
21 pub export var mp: limine.SmpMpFeature.MpRequest linksection(".limine_reqs") = .{ .flags = .{ .x2apic = true } };
22};
23
24pub fn early_init() void {
25 // Don't optimize away the limine requests
26 inline for (@typeInfo(limine_requests).@"struct".decls) |decl| {
27 std.mem.doNotOptimizeAway(&@field(limine_requests, decl.name));
28 }
29
30 // If the base revision isn't supported, we can't boot
31 if (!limine_requests.base_revision.isSupported()) {
32 @branchHint(.cold);
33 arch.instructions.die();
34 }
35
36 // If the base revision isn't supported, we can't boot
37 if (!limine_requests.base_revision.isSupported()) {
38 @branchHint(.cold);
39 arch.instructions.die();
40 }
41
42 // Die if we don't have a memory map or Higher Half Direct Mapping
43 if (limine_requests.memmap.response == null) {
44 @branchHint(.cold);
45 arch.instructions.die();
46 }
47
48 if (limine_requests.hhdm.response == null) {
49 @branchHint(.cold);
50 arch.instructions.die();
51 }
52 const hhdm_offset = limine_requests.hhdm.response.?.offset;
53 common.init_data.hhdm_slide = hhdm_offset;
54
55 // Get CPUID info
56 arch.instructions.cpuid.init();
57
58 // Set up the kernel paging context
59 common.init_data.kernel_paging_ctx = arch.mm.paging.Context.get_current();
60}
61
62pub fn bsp_init() void {
63 // Set up per-cpu data
64 arch.per_cpu_init_data.init(limine_requests.mp.response.?.cpu_count);
65
66 // Set up our own GDT and TSS
67 const gdt = &arch.per_cpu_init_data.gdt_buf[0];
68 gdt.* = .{};
69 const tss = &arch.per_cpu_init_data.tss_buf[0];
70 // TODO: create a fixed mapping for the pages maybe?
71 tss.* = .{
72 .rsp0 = common.init_data.hhdm_slide + arch.per_cpu_init_data.getStackPhys(0),
73 };
74
75 gdt.tss_desc.set_tss_addr(tss);
76 gdt.load();
77
78 // Add in the framebuffer
79 if (limine_requests.framebuffer.response) |fb_response| {
80 if (fb_response.framebuffer_count > 0) {
81 const fb = fb_response.getFramebuffers()[0];
82 common.init_data.framebuffer = .{
83 .address = @ptrCast(@alignCast(fb.address)),
84 .width = fb.width,
85 .height = fb.height,
86 .pitch = fb.pitch,
87 .red_mask_size = fb.red_mask_size,
88 .red_mask_shift = fb.red_mask_shift,
89 .green_mask_size = fb.green_mask_size,
90 .green_mask_shift = fb.green_mask_shift,
91 .blue_mask_size = fb.blue_mask_size,
92 .blue_mask_shift = fb.blue_mask_shift,
93 .bypp = fb.bpp / 8,
94 };
95 }
96 }
97
98 // Add in ACPI/dtb if found, prefer ACPI
99 initHwDesc();
100
101 // Attach the root task
102 if (limine_requests.modules.response) |module_response| {
103 if (module_response.module_count > 0) {
104 const mod = module_response.modules.?[0];
105 const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
106 const mod_size = mod.size;
107 log.info("Loading root task with {s} @ {*}", .{ mod.path, mod.address });
108 common.init_data.root_task_elf = mod_addr[0..mod_size];
109 }
110 } else {
111 @branchHint(.unlikely);
112 @panic("No root task found!");
113 }
114
115 bootstrapAPs();
116}
117
118pub fn loadTasks() void {
119 const tasks_buf: [*]arch.structures.Task = @ptrFromInt(common.init_data.bootmem.allocMem(std.heap.pageSize()) catch {
120 std.log.err("Couldn't allocate tasks!", .{});
121 @panic("allocPhys");
122 });
123 const tasks_scratch: []arch.structures.Task = tasks_buf[0 .. std.heap.pageSize() / @sizeOf(arch.structures.Task)];
124
125 if (limine_requests.modules.response) |module_response| {
126 if (module_response.module_count > 0) {
127 for (module_response.modules.?[0..module_response.module_count], 0..) |mod, i| {
128 const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
129 const mod_size = mod.size;
130 common.loadTask(&tasks_scratch[i], mod_addr[0..mod_size]);
131 }
132 }
133 }
134}
135
136fn initHwDesc() void {
137 if (limine_requests.dtb_req.response) |dtb_response| {
138 common.init_data.hardware_description = .{ .dtb = dtb_response.dtb_ptr };
139 }
140 if (limine_requests.rsdp_req.response) |rsdp_response| {
141 common.init_data.hardware_description = .{ .acpi_rsdp = rsdp_response.address };
142 }
143}
144
145fn bootstrapAPs() void {
146 log.info("Bootstrapping APs...", .{});
147 const cpus = limine_requests.mp.response.?.getCpus();
148 for (cpus) |cpu| {
149 cpu.goto_address = ap_init;
150 }
151}
152
153fn ap_init(mp_info: *limine.SmpMpFeature.MpInfo) callconv(.c) noreturn {
154 // Set up the IDT
155 arch.interrupts.idt.load();
156
157 // Set up our GDT and TSS
158 const gdt = &arch.per_cpu_init_data.gdt_buf[mp_info.processor_id];
159 gdt.* = .{};
160 const tss = &arch.per_cpu_init_data.tss_buf[mp_info.processor_id];
161 tss.* = .{};
162
163 gdt.tss_desc.set_tss_addr(tss);
164 gdt.load();
165
166 log.info("CPU {}: setup GDT and TSS, killing myself rn...", .{mp_info.processor_id});
167
168 arch.instructions.die();
169}