const limine = @import("limine"); const std = @import("std"); const arch = @import("root.zig"); const common = @import("common"); const console = @import("console"); const log = std.log.scoped(.amd64_init); const StandardGdt = arch.structures.gdt.StandardGdt; const Tss = arch.structures.tss.Tss; pub const limine_requests = struct { export var start_marker: limine.RequestsStartMarker linksection(".limine_reqs_start") = .{}; export var end_marker: limine.RequestsEndMarker linksection(".limine_reqs_end") = .{}; pub export var base_revision: limine.BaseRevision linksection(".limine_reqs") = .{ .revision = 3 }; pub export var framebuffer: limine.FramebufferRequest linksection(".limine_reqs") = .{}; pub export var hhdm: limine.HhdmRequest linksection(".limine_reqs") = .{}; pub export var memmap: limine.MemoryMapRequest linksection(".limine_reqs") = .{}; pub export var rsdp_req: limine.RsdpRequest linksection(".limine_reqs") = .{}; pub export var dtb_req: limine.DtbRequest linksection(".limine_reqs") = .{}; pub export var modules: limine.ModuleRequest linksection(".limine_reqs") = .{}; pub export var mp: limine.SmpMpFeature.MpRequest linksection(".limine_reqs") = .{ .flags = .{ .x2apic = true } }; }; pub fn early_init() void { // Don't optimize away the limine requests inline for (@typeInfo(limine_requests).@"struct".decls) |decl| { std.mem.doNotOptimizeAway(&@field(limine_requests, decl.name)); } // If the base revision isn't supported, we can't boot if (!limine_requests.base_revision.isSupported()) { @branchHint(.cold); arch.instructions.die(); } // If the base revision isn't supported, we can't boot if (!limine_requests.base_revision.isSupported()) { @branchHint(.cold); arch.instructions.die(); } // Die if we don't have a memory map or Higher Half Direct Mapping if (limine_requests.memmap.response == null) { @branchHint(.cold); arch.instructions.die(); } if (limine_requests.hhdm.response == null) { @branchHint(.cold); arch.instructions.die(); } const hhdm_offset = limine_requests.hhdm.response.?.offset; common.init_data.hhdm_slide = hhdm_offset; // Get CPUID info arch.instructions.cpuid.init(); // Set up the kernel paging context common.init_data.kernel_paging_ctx = arch.mm.paging.Context.get_current(); } pub fn bsp_init() void { // Set up per-cpu data arch.per_cpu_init_data.init(limine_requests.mp.response.?.cpu_count); // Set up our own GDT and TSS const gdt = &arch.per_cpu_init_data.gdt_buf[0]; gdt.* = .{}; const tss = &arch.per_cpu_init_data.tss_buf[0]; // TODO: create a fixed mapping for the pages maybe? tss.* = .{ .rsp0 = common.init_data.hhdm_slide + arch.per_cpu_init_data.getStackPhys(0), }; gdt.tss_desc.set_tss_addr(tss); gdt.load(); // Add in the framebuffer if (limine_requests.framebuffer.response) |fb_response| { if (fb_response.framebuffer_count > 0) { const fb = fb_response.getFramebuffers()[0]; common.init_data.framebuffer = .{ .address = @ptrCast(@alignCast(fb.address)), .width = fb.width, .height = fb.height, .pitch = fb.pitch, .red_mask_size = fb.red_mask_size, .red_mask_shift = fb.red_mask_shift, .green_mask_size = fb.green_mask_size, .green_mask_shift = fb.green_mask_shift, .blue_mask_size = fb.blue_mask_size, .blue_mask_shift = fb.blue_mask_shift, .bypp = fb.bpp / 8, }; } } // Add in ACPI/dtb if found, prefer ACPI initHwDesc(); // Attach the root task if (limine_requests.modules.response) |module_response| { if (module_response.module_count > 0) { const mod = module_response.modules.?[0]; const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address); const mod_size = mod.size; log.info("Loading root task with {s} @ {*}", .{ mod.path, mod.address }); common.init_data.root_task_elf = mod_addr[0..mod_size]; } } else { @branchHint(.unlikely); @panic("No root task found!"); } bootstrapAPs(); } pub fn loadTasks() void { const tasks_buf: [*]arch.structures.Task = @ptrFromInt(common.init_data.bootmem.allocMem(std.heap.pageSize()) catch { std.log.err("Couldn't allocate tasks!", .{}); @panic("allocPhys"); }); const tasks_scratch: []arch.structures.Task = tasks_buf[0 .. std.heap.pageSize() / @sizeOf(arch.structures.Task)]; if (limine_requests.modules.response) |module_response| { if (module_response.module_count > 0) { for (module_response.modules.?[0..module_response.module_count], 0..) |mod, i| { const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address); const mod_size = mod.size; common.loadTask(&tasks_scratch[i], mod_addr[0..mod_size]); } } } } fn initHwDesc() void { if (limine_requests.dtb_req.response) |dtb_response| { common.init_data.hardware_description = .{ .dtb = dtb_response.dtb_ptr }; } if (limine_requests.rsdp_req.response) |rsdp_response| { common.init_data.hardware_description = .{ .acpi_rsdp = rsdp_response.address }; } } fn bootstrapAPs() void { log.info("Bootstrapping APs...", .{}); const cpus = limine_requests.mp.response.?.getCpus(); for (cpus) |cpu| { cpu.goto_address = ap_init; } } fn ap_init(mp_info: *limine.SmpMpFeature.MpInfo) callconv(.c) noreturn { // Set up the IDT arch.interrupts.idt.load(); // Set up our GDT and TSS const gdt = &arch.per_cpu_init_data.gdt_buf[mp_info.processor_id]; gdt.* = .{}; const tss = &arch.per_cpu_init_data.tss_buf[mp_info.processor_id]; tss.* = .{}; gdt.tss_desc.set_tss_addr(tss); gdt.load(); log.info("CPU {}: setup GDT and TSS, killing myself rn...", .{mp_info.processor_id}); arch.instructions.die(); }