Microkernel thing OS experiment (Zig ⚡)
1const arch = @import("../root.zig");
2const common = @import("common");
3const std = @import("std");
4const Cr3 = arch.registers.ControlRegisters.Cr3;
5const Cr4 = arch.registers.ControlRegisters.Cr4;
6const idt = arch.interrupts.idt;
7const Perms = common.mm.paging.Perms;
8
9pub const PageTable = extern struct {
10 entries: [512]Entry,
11
12 pub const Entry = packed struct(u64) {
13 present: bool,
14 writable: bool,
15 user_accessible: bool,
16 write_through: bool,
17 disable_cache: bool,
18 accessed: bool,
19 dirty: bool,
20 huge: bool,
21 global: bool,
22 idk: u3,
23 phys_addr: u40,
24 idk2: u11,
25 nx: bool,
26
27 const Self = @This();
28
29 pub fn getAddr(self: *const Self) u64 {
30 return self.phys_addr << 12;
31 }
32
33 pub fn setAddr(self: *Self, phys_addr: u64) void {
34 const addr = phys_addr >> 12;
35 self.phys_addr = @truncate(addr);
36 }
37 };
38};
39
40pub const MemoryType = enum {
41 DeviceUncacheable,
42 DeviceWriteCombining,
43 MemoryWritethrough,
44 MemoryWriteBack,
45};
46
47pub fn detect_5level() bool {
48 const bits: u64 = 1 << 12;
49 return Cr4.read() & bits != 0;
50}
51
52pub const Context = struct {
53 cr3_val: u64,
54 level5: bool,
55
56 const Self = @This();
57 pub fn apply(self: *Self) void {
58 // NX Enable
59 const IA32_EFER = arch.registers.MSR(u64, 0xC0000080);
60 const efer_val = IA32_EFER.read() | (0b1 << 11);
61 IA32_EFER.write(efer_val);
62
63 // Set the level 5 bit accordingly
64 const cr4 = Cr4.read();
65 const level5mask: u64 = 1 << 12;
66 Cr4.write(if (self.level5) cr4 | level5mask else cr4 & ~level5mask);
67
68 Cr3.write(self.cr3_val);
69 }
70
71 pub fn get_current() Context {
72 return .{
73 .cr3_val = Cr3.read(),
74 .level5 = detect_5level(),
75 };
76 }
77
78 pub fn can_map_at(_: *const Self, level: u3) bool {
79 return level < 2;
80 }
81
82 // We need the parameter because aarch64 has 2 root page tables
83 pub fn root_table(self: *Self, _: u64) TableHandle {
84 return .{
85 .paddr = self.cr3_val,
86 .level = if (self.level5) 5 else 4,
87 .context = self,
88 .perms = .{
89 .x = true,
90 .w = true,
91 .u = true,
92 },
93 .underlying = null,
94 };
95 }
96
97 pub fn decode(self: *Self, pte: *PageTable.Entry, level: u3) SomePteHandle {
98 if (!pte.present) {
99 return .Empty;
100 }
101 if (!pte.huge and level != 0) {
102 return .{ .Table = self.parse_table(pte, level) };
103 }
104 return .{ .Mapping = self.parse_mapping(pte, level) };
105 }
106
107 pub fn parse_mapping(self: *Self, pte: *PageTable.Entry, level: u3) MappingHandle {
108 const memory_type = self.decode_memory_type(pte, level);
109 return .{
110 .context = self,
111 .paddr = pte.getAddr(),
112 .level = level,
113 .memory_type = memory_type,
114 .underlying = pte,
115 .perms = .{
116 .w = pte.writable,
117 .x = !pte.nx,
118 .u = pte.user_accessible,
119 },
120 };
121 }
122
123 pub fn decode_memory_type(_: *Self, pte: *PageTable.Entry, _: u3) ?MemoryType {
124 return switch (pte.disable_cache) {
125 true => .DeviceUncacheable,
126 false => switch (pte.write_through) {
127 true => .MemoryWritethrough,
128 false => .MemoryWriteBack,
129 },
130 };
131 }
132
133 pub fn encode_memory_type(_: *Self, pte: *PageTable.Entry, mapping_handle: MappingHandle) void {
134 switch (mapping_handle.memory_type.?) {
135 .MemoryWritethrough => pte.write_through = true,
136 .DeviceUncacheable => pte.disable_cache = true,
137 .MemoryWriteBack => {},
138 else => @panic("bad memory type"),
139 }
140 }
141
142 pub fn parse_table(self: *Self, pte: *PageTable.Entry, level: u3) TableHandle {
143 return .{
144 .context = self,
145 .paddr = pte.getAddr(),
146 .level = level,
147 .underlying = pte,
148 .perms = .{
149 .w = pte.writable,
150 .x = !pte.nx,
151 .u = pte.user_accessible,
152 },
153 };
154 }
155
156 pub fn encode_mapping(self: *Self, mapping_handle: MappingHandle) PageTable.Entry {
157 var pte = std.mem.zeroes(PageTable.Entry);
158 pte.setAddr(mapping_handle.paddr);
159 pte.present = true;
160 if (mapping_handle.level != 0) {
161 pte.huge = true;
162 }
163
164 pte.writable = mapping_handle.perms.w;
165 pte.user_accessible = mapping_handle.perms.u;
166 pte.nx = !mapping_handle.perms.x;
167
168 self.encode_memory_type(&pte, mapping_handle);
169 return pte;
170 }
171
172 pub fn encode_table(_: *Self, table_handle: TableHandle) PageTable.Entry {
173 var pte = std.mem.zeroes(PageTable.Entry);
174 pte.writable = table_handle.perms.w;
175 pte.user_accessible = table_handle.perms.u;
176 pte.nx = !table_handle.perms.x;
177 pte.setAddr(table_handle.paddr);
178
179 pte.present = true;
180 pte.huge = false;
181
182 return pte;
183 }
184
185 pub fn invalidate(_: *const Self, vaddr: u64) void {
186 asm volatile (
187 \\ invlpg (%[vaddr])
188 :
189 : [vaddr] "r" (vaddr),
190 : .{ .memory = true });
191 }
192
193 pub fn domain(_: *const Self, level: u3, vaddr: u64) StupidSlice {
194 return .{
195 .ptr = vaddr & ~(page_sizes[level] - 1),
196 .len = page_sizes[level],
197 };
198 }
199
200 pub fn virt_to_phys(context: *Context, vaddr: usize) ?usize {
201 const root = context.root_table(0).get_children();
202 const indexes = [_]usize{
203 (vaddr >> 39) & 0x1FF,
204 (vaddr >> 30) & 0x1FF,
205 (vaddr >> 21) & 0x1FF,
206 (vaddr >> 12) & 0x1FF,
207 };
208 var pte_ptr = &root[indexes[0]];
209 std.log.warn("{*}: {any}, addr 0x{x}", .{ pte_ptr, pte_ptr, pte_ptr.getAddr() });
210 for (0..3) |i| {
211 if (!pte_ptr.present) {
212 return null;
213 }
214 const next_page_table = common.mm.physToHHDM(*PageTable, pte_ptr.getAddr());
215 pte_ptr = &next_page_table.entries[indexes[i + 1]];
216 std.log.warn("{*}: {any}, addr 0x{x}", .{ pte_ptr, pte_ptr, pte_ptr.getAddr() });
217 }
218 return pte_ptr.getAddr() + (vaddr & 0xFFF);
219 }
220};
221
222fn idx_from_level(vaddr: u64, level: u6) u9 {
223 const shamt = 12 + level * 9;
224 return @truncate(vaddr >> shamt);
225}
226
227pub fn make_page_table() !usize {
228 const page_size = std.heap.pageSize();
229 const paddr = try common.init_data.bootmem.allocPhys(page_size);
230 const pt_ptr = common.mm.physToHHDM([*]u8, paddr);
231 @memset(pt_ptr[0..page_size], 0);
232 return paddr;
233}
234
235pub const page_sizes = [_]usize{
236 0x1000, // 4K
237 0x200000, // 2M
238 0x40000000, // 1G
239 0x8000000000, // 512G
240 0x1000000000000, // 256T
241};
242
243const MappingHandle = struct {
244 paddr: u64,
245 level: u3,
246 memory_type: ?MemoryType,
247 context: *Context,
248 perms: Perms,
249 underlying: *PageTable.Entry,
250};
251
252pub const TableHandle = struct {
253 paddr: u64,
254 level: u3,
255 context: *Context,
256 perms: Perms,
257 underlying: ?*PageTable.Entry,
258
259 const Self = @This();
260 pub fn get_children(self: *const Self) []PageTable.Entry {
261 const pt = common.mm.physToHHDM(*PageTable, self.paddr);
262 return pt.entries[0..];
263 }
264
265 pub fn skip_to(self: *const Self, vaddr: u64) []PageTable.Entry {
266 return self.get_children()[idx_from_level(vaddr, self.level - 1)..];
267 }
268
269 pub fn decode_child(self: *const Self, pte: *PageTable.Entry) SomePteHandle {
270 return self.context.decode(pte, self.level - 1);
271 }
272
273 pub fn addPerms(self: *const Self, perms: Perms) void {
274 if (perms.x) {
275 self.underlying.?.nx = false;
276 }
277 if (perms.w) {
278 self.underlying.?.writable = true;
279 }
280 if (perms.u) {
281 self.underlying.?.user_accessible = true;
282 }
283 }
284
285 pub fn make_child_table(self: *const Self, pte: *PageTable.Entry, perms: Perms) !TableHandle {
286 const pmem = try make_page_table();
287
288 const result: TableHandle = .{
289 .paddr = pmem,
290 .context = self.context,
291 .level = self.level - 1,
292 .perms = perms,
293 .underlying = pte,
294 };
295 pte.* = self.context.encode_table(result);
296
297 return result;
298 }
299
300 pub fn make_child_mapping(self: *const Self, pte: *PageTable.Entry, paddr: ?u64, perms: Perms, memory_type: MemoryType) !MappingHandle {
301 const page_size = page_sizes[self.level - 1];
302 const pmem = paddr orelse try common.init_data.bootmem.allocPhys(page_size);
303
304 const result: MappingHandle = .{
305 .level = self.level - 1,
306 .memory_type = memory_type,
307 .context = self.context,
308 .perms = perms,
309 .underlying = pte,
310 .paddr = pmem,
311 };
312
313 pte.* = self.context.encode_mapping(result);
314
315 return result;
316 }
317
318 pub fn child_domain(self: *const Self, vaddr: u64) StupidSlice {
319 return self.context.domain(self.level - 1, vaddr);
320 }
321};
322
323pub const SomePteHandle = union(common.mm.paging.PTEType) {
324 Mapping: MappingHandle,
325 Table: TableHandle,
326 Empty,
327};
328
329pub const StupidSlice = struct {
330 len: usize,
331 ptr: usize,
332};
333
334pub fn page_fault_handler(stack_frame: *idt.InterruptFrame(u64)) callconv(.{ .x86_64_sysv = .{} }) void {
335 std.log.err("Page Fault @ 0x{x}, dying...", .{stack_frame.rip});
336 arch.instructions.die();
337}