Microkernel thing OS experiment (Zig ⚡)
1const arch = @import("../root.zig");
2const common = @import("common");
3const std = @import("std");
4const Cr3 = arch.registers.ControlRegisters.Cr3;
5const Cr4 = arch.registers.ControlRegisters.Cr4;
6const idt = arch.interrupts.idt;
7const Perms = common.mm.paging.Perms;
8
9pub const PageTable = extern struct {
10 entries: [512]Entry,
11
12 pub const Entry = packed struct(u64) {
13 present: bool,
14 writable: bool,
15 user_accessible: bool,
16 write_through: bool,
17 disable_cache: bool,
18 accessed: bool,
19 dirty: bool,
20 huge: bool,
21 global: bool,
22 idk: u3,
23 phys_addr: u40,
24 idk2: u11,
25 nx: bool,
26
27 const Self = @This();
28
29 pub fn getAddr(self: *const Self) u64 {
30 return self.phys_addr << 12;
31 }
32
33 pub fn setAddr(self: *Self, phys_addr: u64) void {
34 const addr = phys_addr >> 12;
35 self.phys_addr = @truncate(addr);
36 }
37 };
38};
39
40pub const MemoryType = enum {
41 DeviceUncacheable,
42 DeviceWriteCombining,
43 MemoryWritethrough,
44 MemoryWriteBack,
45};
46
47pub fn detect_5level() bool {
48 const bits: u64 = 1 << 12;
49 return Cr4.read() & bits != 0;
50}
51
52pub const Context = struct {
53 cr3_val: u64,
54 level5: bool,
55
56 const Self = @This();
57 pub fn apply(self: *const Self) void {
58 // NX Enable
59 const IA32_EFER = arch.registers.MSR(u64, 0xC0000080);
60 const efer_val = IA32_EFER.read() | (0b1 << 11);
61 IA32_EFER.write(efer_val);
62
63 // Set the level 5 bit accordingly
64 const cr4 = Cr4.read();
65 const level5mask: u64 = 1 << 12;
66 Cr4.write(if (self.level5) cr4 | level5mask else cr4 & ~level5mask);
67
68 Cr3.write(self.cr3_val);
69 }
70
71 pub fn get_current() Context {
72 return .{
73 .cr3_val = Cr3.read(),
74 .level5 = detect_5level(),
75 };
76 }
77
78 pub fn make_user() !Context {
79 // Make a new root page table
80 const user_root_paddr = try make_page_table();
81 const user_root = common.mm.physToHHDM(*PageTable, user_root_paddr);
82 // Copy the entire higher half entries
83 const higher_half = common.init_data.kernel_paging_ctx.root_table(0).get_children();
84 @memcpy(user_root.entries[256..], higher_half[256..]);
85 return .{
86 .cr3_val = user_root_paddr,
87 .level5 = common.init_data.kernel_paging_ctx.level5,
88 };
89 }
90
91 pub fn can_map_at(_: *const Self, level: u3) bool {
92 return level < 2;
93 }
94
95 // We need the parameter because aarch64 has 2 root page tables
96 pub fn root_table(self: *Self, _: u64) TableHandle {
97 return .{
98 // Mask out the cr3 value
99 .paddr = self.cr3_val & 0xFFFFFFFF_FFFFF000,
100 .level = if (self.level5) 5 else 4,
101 .context = self,
102 .perms = .{
103 .x = true,
104 .w = true,
105 .u = true,
106 },
107 .underlying = null,
108 };
109 }
110
111 pub fn decode(self: *Self, pte: *PageTable.Entry, level: u3) SomePteHandle {
112 if (!pte.present) {
113 return .Empty;
114 }
115 if (!pte.huge and level != 0) {
116 return .{ .Table = self.parse_table(pte, level) };
117 }
118 return .{ .Mapping = self.parse_mapping(pte, level) };
119 }
120
121 pub fn parse_mapping(self: *Self, pte: *PageTable.Entry, level: u3) MappingHandle {
122 const memory_type = self.decode_memory_type(pte, level);
123 return .{
124 .context = self,
125 .paddr = pte.getAddr(),
126 .level = level,
127 .memory_type = memory_type,
128 .underlying = pte,
129 .perms = .{
130 .w = pte.writable,
131 .x = !pte.nx,
132 .u = pte.user_accessible,
133 },
134 };
135 }
136
137 pub fn decode_memory_type(_: *Self, pte: *PageTable.Entry, _: u3) ?MemoryType {
138 return switch (pte.disable_cache) {
139 true => .DeviceUncacheable,
140 false => switch (pte.write_through) {
141 true => .MemoryWritethrough,
142 false => .MemoryWriteBack,
143 },
144 };
145 }
146
147 pub fn encode_memory_type(_: *Self, pte: *PageTable.Entry, mapping_handle: MappingHandle) void {
148 switch (mapping_handle.memory_type.?) {
149 .MemoryWritethrough => pte.write_through = true,
150 .DeviceUncacheable => pte.disable_cache = true,
151 .MemoryWriteBack => {},
152 else => @panic("bad memory type"),
153 }
154 }
155
156 pub fn parse_table(self: *Self, pte: *PageTable.Entry, level: u3) TableHandle {
157 return .{
158 .context = self,
159 .paddr = pte.getAddr(),
160 .level = level,
161 .underlying = pte,
162 .perms = .{
163 .w = pte.writable,
164 .x = !pte.nx,
165 .u = pte.user_accessible,
166 },
167 };
168 }
169
170 pub fn encode_mapping(self: *Self, mapping_handle: MappingHandle) PageTable.Entry {
171 var pte = std.mem.zeroes(PageTable.Entry);
172 pte.setAddr(mapping_handle.paddr);
173 pte.present = true;
174 if (mapping_handle.level != 0) {
175 pte.huge = true;
176 }
177
178 pte.writable = mapping_handle.perms.w;
179 pte.user_accessible = mapping_handle.perms.u;
180 pte.nx = !mapping_handle.perms.x;
181
182 self.encode_memory_type(&pte, mapping_handle);
183 return pte;
184 }
185
186 pub fn encode_table(_: *Self, table_handle: TableHandle) PageTable.Entry {
187 var pte = std.mem.zeroes(PageTable.Entry);
188 pte.writable = table_handle.perms.w;
189 pte.user_accessible = table_handle.perms.u;
190 pte.nx = !table_handle.perms.x;
191 pte.setAddr(table_handle.paddr);
192
193 pte.present = true;
194 pte.huge = false;
195
196 return pte;
197 }
198
199 pub fn invalidate(_: *const Self, vaddr: u64) void {
200 asm volatile (
201 \\ invlpg (%[vaddr])
202 :
203 : [vaddr] "r" (vaddr),
204 : .{ .memory = true });
205 }
206
207 pub fn domain(_: *const Self, level: u3, vaddr: u64) StupidSlice {
208 return .{
209 .ptr = vaddr & ~(page_sizes[level] - 1),
210 .len = page_sizes[level],
211 };
212 }
213
214 pub fn virt_to_phys(context: *Context, vaddr: usize) ?usize {
215 const root = context.root_table(0).get_children();
216 const indexes = [_]usize{
217 (vaddr >> 39) & 0x1FF,
218 (vaddr >> 30) & 0x1FF,
219 (vaddr >> 21) & 0x1FF,
220 (vaddr >> 12) & 0x1FF,
221 };
222 var pte_ptr = &root[indexes[0]];
223 std.log.warn("{*}: {any}, addr 0x{x}", .{ pte_ptr, pte_ptr, pte_ptr.getAddr() });
224 for (0..3) |i| {
225 if (!pte_ptr.present) {
226 return null;
227 }
228 const next_page_table = common.mm.physToHHDM(*PageTable, pte_ptr.getAddr());
229 pte_ptr = &next_page_table.entries[indexes[i + 1]];
230 std.log.warn("{*}: {any}, addr 0x{x}", .{ pte_ptr, pte_ptr, pte_ptr.getAddr() });
231 }
232 return pte_ptr.getAddr() + (vaddr & 0xFFF);
233 }
234};
235
236fn idx_from_level(vaddr: u64, level: u6) u9 {
237 const shamt = 12 + level * 9;
238 return @truncate(vaddr >> shamt);
239}
240
241pub fn make_page_table() !usize {
242 const page_size = std.heap.pageSize();
243 const paddr = try common.init_data.bootmem.allocPhys(page_size);
244 const pt_ptr = common.mm.physToHHDM([*]u8, paddr);
245 @memset(pt_ptr[0..page_size], 0);
246 return paddr;
247}
248
249pub const page_sizes = [_]usize{
250 0x1000,
251 0x200000,
252 0x40000000,
253 0x8000000000,
254 0x1000000000000,
255};
256
257const MappingHandle = struct {
258 paddr: u64,
259 level: u3,
260 memory_type: ?MemoryType,
261 context: *Context,
262 perms: Perms,
263 underlying: *PageTable.Entry,
264};
265
266pub const TableHandle = struct {
267 paddr: u64,
268 level: u3,
269 context: *Context,
270 perms: Perms,
271 underlying: ?*PageTable.Entry,
272
273 const Self = @This();
274 pub fn get_children(self: *const Self) []PageTable.Entry {
275 const pt = common.mm.physToHHDM(*PageTable, self.paddr);
276 return pt.entries[0..];
277 }
278
279 pub fn skip_to(self: *const Self, vaddr: u64) []PageTable.Entry {
280 return self.get_children()[idx_from_level(vaddr, self.level - 1)..];
281 }
282
283 pub fn decode_child(self: *const Self, pte: *PageTable.Entry) SomePteHandle {
284 return self.context.decode(pte, self.level - 1);
285 }
286
287 pub fn addPerms(self: *const Self, perms: Perms) void {
288 if (perms.x) {
289 self.underlying.?.nx = false;
290 }
291 if (perms.w) {
292 self.underlying.?.writable = true;
293 }
294 if (perms.u) {
295 self.underlying.?.user_accessible = true;
296 }
297 }
298
299 pub fn make_child_table(self: *const Self, pte: *PageTable.Entry, perms: Perms) !TableHandle {
300 const pmem = try make_page_table();
301
302 const result: TableHandle = .{
303 .paddr = pmem,
304 .context = self.context,
305 .level = self.level - 1,
306 .perms = perms,
307 .underlying = pte,
308 };
309 pte.* = self.context.encode_table(result);
310
311 return result;
312 }
313
314 pub fn make_child_mapping(self: *const Self, pte: *PageTable.Entry, paddr: ?u64, perms: Perms, memory_type: MemoryType) !MappingHandle {
315 const page_size = page_sizes[self.level - 1];
316 const pmem = paddr orelse try common.init_data.bootmem.allocPhys(page_size);
317
318 const result: MappingHandle = .{
319 .level = self.level - 1,
320 .memory_type = memory_type,
321 .context = self.context,
322 .perms = perms,
323 .underlying = pte,
324 .paddr = pmem,
325 };
326
327 pte.* = self.context.encode_mapping(result);
328
329 return result;
330 }
331
332 pub fn child_domain(self: *const Self, vaddr: u64) StupidSlice {
333 return self.context.domain(self.level - 1, vaddr);
334 }
335};
336
337pub const SomePteHandle = union(common.mm.paging.PTEType) {
338 Mapping: MappingHandle,
339 Table: TableHandle,
340 Empty,
341};
342
343pub const StupidSlice = struct {
344 len: usize,
345 ptr: usize,
346};
347
348pub fn page_fault_handler(stack_frame: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
349 std.log.err("Page Fault @ 0x{x}, dying...", .{stack_frame.rip});
350 arch.interrupts.print_regs(stack_frame.normalize());
351 std.log.err("Error CR2: 0x{x:0>16}, Error Code: 0x{x:0>16}", .{ arch.registers.ControlRegisters.Cr2.read(), stack_frame.error_code });
352 arch.instructions.die();
353}