//! The Global Descriptor Table (GDT) structure for AMD64 const std = @import("std"); const arch = @import("../root.zig"); pub const Descriptor = packed struct(u64) { limit_low: u16 = 0, base_low: u16 = 0, base_mid: u8 = 0, access: Access, limit_high: u4 = 0, flags: Flags = .{}, base_high: u8 = 0, const Self = @This(); pub const Access = packed struct(u8) { // Accessed accessed: bool = true, // Readable/Writable rw: bool = false, // Direction bit or Conforming bit dc: bool = false, // Executable executable: bool, // Descriptor Type bit descriptor_type: DescriptorType = .code_or_data, // Descriptor Privilege Level dpl: u2, // Present bit p: bool = true, pub const DescriptorType = enum(u1) { tss = 0, code_or_data = 1, }; }; pub const Flags = packed struct(u4) { // Reserved _reserved: u1 = 0, // Long Mode code flag long_mode: bool = true, // Size flag (16 vs 32) db: DB = .protected_16, // Granularity flag granularity: Granularity = .byte, pub const Granularity = enum(u1) { byte = 0, page = 1, }; pub const DB = enum(u1) { protected_16 = 0, protected_32 = 1, }; }; pub const null_desc = std.mem.zeroes(Descriptor); pub const kernel_code: Self = .{ .access = .{ .dpl = 0, .executable = true } }; pub const kernel_data: Self = .{ .access = .{ .dpl = 0, .executable = false, .rw = true } }; pub const user_code: Self = .{ .access = .{ .dpl = 3, .executable = true } }; pub const user_data: Self = .{ .access = .{ .dpl = 3, .executable = false, .rw = true } }; }; pub const StandardGdt = extern struct { null_desc: Descriptor = .null_desc, kernel_code: Descriptor = .kernel_code, kernel_data: Descriptor = .kernel_data, tss_desc: TssDescriptor align(@alignOf(Descriptor)) = .{}, user_data: Descriptor = .user_data, user_code: Descriptor = .user_code, pub const selectors = struct { pub const null_desc = @offsetOf(StandardGdt, "null_desc"); pub const kernel_code = @offsetOf(StandardGdt, "kernel_code"); pub const kernel_data = @offsetOf(StandardGdt, "kernel_data"); pub const tss_desc = @offsetOf(StandardGdt, "tss_desc"); pub const user_data = @offsetOf(StandardGdt, "user_data") | 0b11; pub const user_code = @offsetOf(StandardGdt, "user_code") | 0b11; }; const Self = @This(); pub fn load(self: *Self) void { // 1. Load the GDTR const gdtr: Gdtr = .{ .limit = @truncate(@sizeOf(StandardGdt) - 1), .base = @intFromPtr(self), }; gdtr.load(); // 2. Set the kernel data segments asm volatile ( \\ mov %[sel], %%ds \\ mov %[sel], %%es \\ mov %[sel], %%fs \\ mov %[sel], %%gs \\ mov %[sel], %%ss : : [sel] "rm" (@as(u16, selectors.kernel_data)), ); // 3. Reload kernel code segments (far return) asm volatile ( \\ push %[sel] \\ lea 1f(%%rip), %%rax \\ push %%rax \\ .byte 0x48, 0xcb // retfq \\ 1: : : [sel] "i" (@as(u16, selectors.kernel_code)), : .{ .rax = true }); // 4. Set the TSS descriptor asm volatile ( \\ ltr %[sel] : : [sel] "r" (@as(u16, selectors.tss_desc)), ); } }; pub const Gdtr = packed struct(u80) { limit: u16, base: u64, pub fn load(self: *const Gdtr) void { asm volatile ("lgdt %[gdtr_ptr]" : : [gdtr_ptr] "*p" (self), ); } }; const TssDescriptor = extern struct { const Low = packed struct(u64) { limit_low: u16 = 0, base_low: u16 = 0, base_mid: u8 = 0, seg_type: u4 = 0b1001, _reserved0: u1 = 0b0, dpl: u2 = 0, p: bool = true, limit_high: u4 = 0, unused: u1 = 0, _reserved1: u2 = 0b00, granularity: u1 = 0, base_high: u8 = 0, }; descriptor: Low = .{}, base_top: u32 = 0, _reserved: u32 = 0, const Self = @This(); pub fn set_tss_addr(self: *Self, tss: *const arch.structures.tss.Tss) void { const tss_ptr: usize = @intFromPtr(tss); // Set the base self.descriptor.base_low = @truncate(tss_ptr); self.descriptor.base_mid = @truncate(tss_ptr >> 16); self.descriptor.base_high = @truncate(tss_ptr >> 24); self.base_top = @truncate(tss_ptr >> 32); // Set the limit self.descriptor.limit_low = @sizeOf(arch.structures.tss.Tss); } };