const arch = @import("../root.zig"); const common = @import("common"); const std = @import("std"); const Cr3 = arch.registers.ControlRegisters.Cr3; const Cr4 = arch.registers.ControlRegisters.Cr4; const idt = arch.interrupts.idt; const Perms = common.mm.paging.Perms; pub const PageTable = extern struct { entries: [512]Entry, pub const Entry = packed struct(u64) { present: bool, writable: bool, user_accessible: bool, write_through: bool, disable_cache: bool, accessed: bool, dirty: bool, huge: bool, global: bool, idk: u3, phys_addr: u40, idk2: u11, nx: bool, const Self = @This(); pub fn getAddr(self: *const Self) u64 { return self.phys_addr << 12; } pub fn setAddr(self: *Self, phys_addr: u64) void { const addr = phys_addr >> 12; self.phys_addr = @truncate(addr); } }; }; pub const MemoryType = enum { DeviceUncacheable, DeviceWriteCombining, MemoryWritethrough, MemoryWriteBack, }; pub fn detect_5level() bool { const bits: u64 = 1 << 12; return Cr4.read() & bits != 0; } pub const Context = struct { cr3_val: u64, level5: bool, const Self = @This(); pub fn apply(self: *const Self) void { // NX Enable const IA32_EFER = arch.registers.MSR(u64, 0xC0000080); const efer_val = IA32_EFER.read() | (0b1 << 11); IA32_EFER.write(efer_val); // Set the level 5 bit accordingly const cr4 = Cr4.read(); const level5mask: u64 = 1 << 12; Cr4.write(if (self.level5) cr4 | level5mask else cr4 & ~level5mask); Cr3.write(self.cr3_val); } pub fn get_current() Context { return .{ .cr3_val = Cr3.read(), .level5 = detect_5level(), }; } pub fn make_user() !Context { // Make a new root page table const user_root_paddr = try make_page_table(); const user_root = common.mm.physToHHDM(*PageTable, user_root_paddr); // Copy the entire higher half entries const higher_half = common.init_data.kernel_paging_ctx.root_table(0).get_children(); @memcpy(user_root.entries[256..], higher_half[256..]); return .{ .cr3_val = user_root_paddr, .level5 = common.init_data.kernel_paging_ctx.level5, }; } pub fn can_map_at(_: *const Self, level: u3) bool { return level < 2; } // We need the parameter because aarch64 has 2 root page tables pub fn root_table(self: *Self, _: u64) TableHandle { return .{ // Mask out the cr3 value .paddr = self.cr3_val & 0xFFFFFFFF_FFFFF000, .level = if (self.level5) 5 else 4, .context = self, .perms = .{ .x = true, .w = true, .u = true, }, .underlying = null, }; } pub fn decode(self: *Self, pte: *PageTable.Entry, level: u3) SomePteHandle { if (!pte.present) { return .Empty; } if (!pte.huge and level != 0) { return .{ .Table = self.parse_table(pte, level) }; } return .{ .Mapping = self.parse_mapping(pte, level) }; } pub fn parse_mapping(self: *Self, pte: *PageTable.Entry, level: u3) MappingHandle { const memory_type = self.decode_memory_type(pte, level); return .{ .context = self, .paddr = pte.getAddr(), .level = level, .memory_type = memory_type, .underlying = pte, .perms = .{ .w = pte.writable, .x = !pte.nx, .u = pte.user_accessible, }, }; } pub fn decode_memory_type(_: *Self, pte: *PageTable.Entry, _: u3) ?MemoryType { return switch (pte.disable_cache) { true => .DeviceUncacheable, false => switch (pte.write_through) { true => .MemoryWritethrough, false => .MemoryWriteBack, }, }; } pub fn encode_memory_type(_: *Self, pte: *PageTable.Entry, mapping_handle: MappingHandle) void { switch (mapping_handle.memory_type.?) { .MemoryWritethrough => pte.write_through = true, .DeviceUncacheable => pte.disable_cache = true, .MemoryWriteBack => {}, else => @panic("bad memory type"), } } pub fn parse_table(self: *Self, pte: *PageTable.Entry, level: u3) TableHandle { return .{ .context = self, .paddr = pte.getAddr(), .level = level, .underlying = pte, .perms = .{ .w = pte.writable, .x = !pte.nx, .u = pte.user_accessible, }, }; } pub fn encode_mapping(self: *Self, mapping_handle: MappingHandle) PageTable.Entry { var pte = std.mem.zeroes(PageTable.Entry); pte.setAddr(mapping_handle.paddr); pte.present = true; if (mapping_handle.level != 0) { pte.huge = true; } pte.writable = mapping_handle.perms.w; pte.user_accessible = mapping_handle.perms.u; pte.nx = !mapping_handle.perms.x; self.encode_memory_type(&pte, mapping_handle); return pte; } pub fn encode_table(_: *Self, table_handle: TableHandle) PageTable.Entry { var pte = std.mem.zeroes(PageTable.Entry); pte.writable = table_handle.perms.w; pte.user_accessible = table_handle.perms.u; pte.nx = !table_handle.perms.x; pte.setAddr(table_handle.paddr); pte.present = true; pte.huge = false; return pte; } pub fn invalidate(_: *const Self, vaddr: u64) void { asm volatile ( \\ invlpg (%[vaddr]) : : [vaddr] "r" (vaddr), : .{ .memory = true }); } pub fn domain(_: *const Self, level: u3, vaddr: u64) StupidSlice { return .{ .ptr = vaddr & ~(page_sizes[level] - 1), .len = page_sizes[level], }; } pub fn virt_to_phys(context: *Context, vaddr: usize) ?usize { const root = context.root_table(0).get_children(); const indexes = [_]usize{ (vaddr >> 39) & 0x1FF, (vaddr >> 30) & 0x1FF, (vaddr >> 21) & 0x1FF, (vaddr >> 12) & 0x1FF, }; var pte_ptr = &root[indexes[0]]; std.log.warn("{*}: {any}, addr 0x{x}", .{ pte_ptr, pte_ptr, pte_ptr.getAddr() }); for (0..3) |i| { if (!pte_ptr.present) { return null; } const next_page_table = common.mm.physToHHDM(*PageTable, pte_ptr.getAddr()); pte_ptr = &next_page_table.entries[indexes[i + 1]]; std.log.warn("{*}: {any}, addr 0x{x}", .{ pte_ptr, pte_ptr, pte_ptr.getAddr() }); } return pte_ptr.getAddr() + (vaddr & 0xFFF); } }; fn idx_from_level(vaddr: u64, level: u6) u9 { const shamt = 12 + level * 9; return @truncate(vaddr >> shamt); } pub fn make_page_table() !usize { const page_size = std.heap.pageSize(); const paddr = try common.init_data.bootmem.allocPhys(page_size); const pt_ptr = common.mm.physToHHDM([*]u8, paddr); @memset(pt_ptr[0..page_size], 0); return paddr; } pub const page_sizes = [_]usize{ 0x1000, 0x200000, 0x40000000, 0x8000000000, 0x1000000000000, }; const MappingHandle = struct { paddr: u64, level: u3, memory_type: ?MemoryType, context: *Context, perms: Perms, underlying: *PageTable.Entry, }; pub const TableHandle = struct { paddr: u64, level: u3, context: *Context, perms: Perms, underlying: ?*PageTable.Entry, const Self = @This(); pub fn get_children(self: *const Self) []PageTable.Entry { const pt = common.mm.physToHHDM(*PageTable, self.paddr); return pt.entries[0..]; } pub fn skip_to(self: *const Self, vaddr: u64) []PageTable.Entry { return self.get_children()[idx_from_level(vaddr, self.level - 1)..]; } pub fn decode_child(self: *const Self, pte: *PageTable.Entry) SomePteHandle { return self.context.decode(pte, self.level - 1); } pub fn addPerms(self: *const Self, perms: Perms) void { if (perms.x) { self.underlying.?.nx = false; } if (perms.w) { self.underlying.?.writable = true; } if (perms.u) { self.underlying.?.user_accessible = true; } } pub fn make_child_table(self: *const Self, pte: *PageTable.Entry, perms: Perms) !TableHandle { const pmem = try make_page_table(); const result: TableHandle = .{ .paddr = pmem, .context = self.context, .level = self.level - 1, .perms = perms, .underlying = pte, }; pte.* = self.context.encode_table(result); return result; } pub fn make_child_mapping(self: *const Self, pte: *PageTable.Entry, paddr: ?u64, perms: Perms, memory_type: MemoryType) !MappingHandle { const page_size = page_sizes[self.level - 1]; const pmem = paddr orelse try common.init_data.bootmem.allocPhys(page_size); const result: MappingHandle = .{ .level = self.level - 1, .memory_type = memory_type, .context = self.context, .perms = perms, .underlying = pte, .paddr = pmem, }; pte.* = self.context.encode_mapping(result); return result; } pub fn child_domain(self: *const Self, vaddr: u64) StupidSlice { return self.context.domain(self.level - 1, vaddr); } }; pub const SomePteHandle = union(common.mm.paging.PTEType) { Mapping: MappingHandle, Table: TableHandle, Empty, }; pub const StupidSlice = struct { len: usize, ptr: usize, }; pub fn page_fault_handler(stack_frame: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void { std.log.err("Page Fault @ 0x{x}, dying...", .{stack_frame.rip}); arch.interrupts.print_regs(stack_frame.normalize()); std.log.err("Error CR2: 0x{x:0>16}, Error Code: 0x{x:0>16}", .{ arch.registers.ControlRegisters.Cr2.read(), stack_frame.error_code }); arch.instructions.die(); }