Microkernel thing OS experiment (Zig ⚡)
1const arch = @import("arch");
2const std = @import("std");
3const TableHandle = arch.mm.paging.TableHandle;
4const MemoryType = arch.mm.paging.MemoryType;
5
6pub const Perms = struct {
7 writable: bool,
8 executable: bool,
9 userspace_accessible: bool = false,
10
11 const Self = @This();
12
13 /// Verify that the current permissions are a superset of the provided ones
14 pub fn allows(self: Self, other: Self) bool {
15 if (!self.writable and other.writable) {
16 return false;
17 }
18 if (!self.executable and other.executable) {
19 return false;
20 }
21 if (!self.userspace_accessible and other.userspace_accessible) {
22 return false;
23 }
24 return true;
25 }
26
27 /// OR two permissions
28 pub fn addPerms(self: Self, other: Self) Self {
29 return .{
30 .writable = self.writable or other.writable,
31 .executable = self.executable or other.executable,
32 .userspace = self.userspace_accessible or other.userspace_accessible,
33 };
34 }
35};
36
37pub const PTEType = enum { Mapping, Table, Empty };
38
39pub fn mapPhys(args: struct {
40 vaddr: usize,
41 paddr: usize,
42 size: usize,
43 perms: Perms,
44 memory_type: MemoryType,
45}) !void {
46 const root = arch.mm.paging.root_table(args.vaddr);
47 var vaddr = args.vaddr;
48 var paddr = args.paddr;
49 var size = args.size;
50 try mapPageImpl(&vaddr, &paddr, &size, root, args.perms, args.memory_type);
51}
52
53fn mapPageImpl(
54 vaddr: *usize,
55 paddr: ?*usize,
56 size: *usize,
57 table: TableHandle,
58 perms: Perms,
59 memory_type: MemoryType,
60) !void {
61 // 1. Get slice of every child from the target forwards
62 const children = table.skip_to(vaddr.*);
63
64 // 2. For each PTE, decode to the type (Mapping, Table, Empty)
65 // If there's already a mapping, we're fucked
66 // If it's a table, keep going forward till we reach Mapping or Empty,
67 // while of course ensuring permissions
68 // If it's empty, check if we reached our target level. If we didn't,
69 // then make a new child table and keep going. If it's not empty, then
70 // make the child mapping and reduce the amount of size we're targetting
71 for (children) |*child| {
72 switch (table.decode_child(child)) {
73 .Mapping => return error.AlreadyPresent,
74 .Table => |*tbl| {
75 try mapPageImpl(vaddr, paddr, size, tbl.*, perms, memory_type);
76 if (!tbl.perms.allows(perms)) {
77 tbl.addPerms(perms);
78 arch.mm.paging.invalidate(vaddr.*);
79 }
80 },
81 .Empty => {
82 const domain = table.child_domain(vaddr.*);
83 if (domain.ptr == vaddr.* and domain.len <= size.* and arch.mm.paging.can_map_at(table.level - 1) and is_aligned(vaddr.*, paddr, table.level - 1)) {
84 // Make child mapping etc
85 _ = try table.make_child_mapping(child, if (paddr) |p| p.* else null, perms, memory_type);
86 const step = domain.len;
87 if (step >= size.*) {
88 size.* = 0;
89 return;
90 } else {
91 size.* -= step;
92 vaddr.* += step;
93 if (paddr) |p| {
94 p.* += step;
95 }
96 }
97 } else {
98 const tbl = try table.make_child_table(child, perms);
99 try mapPageImpl(vaddr, paddr, size, tbl, perms, memory_type);
100 }
101 },
102 }
103 if (size.* == 0) return;
104 }
105}
106
107fn is_aligned(vaddr: usize, paddr: ?*usize, level: u3) bool {
108 if (!std.mem.isAligned(vaddr, arch.mm.paging.page_sizes[level])) {
109 return false;
110 }
111
112 if (paddr) |p| {
113 return std.mem.isAligned(p.*, arch.mm.paging.page_sizes[level]);
114 }
115
116 return true;
117}