Microkernel thing OS experiment (Zig ⚡)

overhaul paging API

pci.express fce9655d 3590420c

verified
Changed files
+250 -183
components
ukernel
arch
amd64
common
+13 -7
components/ukernel/arch/amd64/boot.zig
···
const StandardGdt = arch.structures.gdt.StandardGdt;
const Tss = arch.structures.tss.Tss;
pub const limine_requests = struct {
export var start_marker: limine.RequestsStartMarker linksection(".limine_reqs_start") = .{};
export var end_marker: limine.RequestsEndMarker linksection(".limine_reqs_end") = .{};
···
log.info("Setting up scheduling...", .{});
initApic() catch |err| {
log.err("Failed to set up APIC! {}", .{err});
@panic("apic");
···
.size = 0x1000,
.memory_type = .MemoryWriteBack,
.perms = .{
-
.executable = false,
-
.userspace_accessible = true,
-
.writable = true,
},
}) catch @panic("couldn't map user stack");
-
const entry = common.loadRootTask() catch |err| {
log.err("Couldn't load the root task! {}", .{err});
@panic("ggz");
};
···
.size = 0x1000,
.memory_type = .DeviceUncacheable,
.perms = .{
-
.executable = false,
-
.userspace_accessible = false,
-
.writable = true,
},
});
break :blk .{ .xapic = apic_base };
},
···
const StandardGdt = arch.structures.gdt.StandardGdt;
const Tss = arch.structures.tss.Tss;
+
var pg_ctx: arch.mm.paging.Context = undefined;
+
pub const limine_requests = struct {
export var start_marker: limine.RequestsStartMarker linksection(".limine_reqs_start") = .{};
export var end_marker: limine.RequestsEndMarker linksection(".limine_reqs_end") = .{};
···
log.info("Setting up scheduling...", .{});
+
pg_ctx = arch.mm.paging.Context.get_current();
+
initApic() catch |err| {
log.err("Failed to set up APIC! {}", .{err});
@panic("apic");
···
.size = 0x1000,
.memory_type = .MemoryWriteBack,
.perms = .{
+
.x = false,
+
.u = true,
+
.w = true,
},
+
.context = &pg_ctx,
}) catch @panic("couldn't map user stack");
+
const entry = common.loadRootTask(&pg_ctx) catch |err| {
log.err("Couldn't load the root task! {}", .{err});
@panic("ggz");
};
···
.size = 0x1000,
.memory_type = .DeviceUncacheable,
.perms = .{
+
.x = false,
+
.u = false,
+
.w = true,
},
+
.context = &pg_ctx,
});
break :blk .{ .xapic = apic_base };
},
+207 -155
components/ukernel/arch/amd64/mm/paging.zig
···
-
const common = @import("common");
const arch = @import("../root.zig");
const std = @import("std");
-
const physToVirt = common.mm.physToHHDM;
const Perms = common.mm.paging.Perms;
-
-
pub const page_sizes = [_]usize{
-
0x1000, // 4K
-
0x200000, // 2M
-
0x40000000, // 1G
-
0x8000000000, // 512G
-
0x1000000000000, // 256T
-
};
pub const PageTable = extern struct {
entries: [512]Entry,
···
};
};
-
fn extract_index_from_vaddr(vaddr: u64, level: u6) u9 {
-
const shamt = 12 + level * 9;
-
return @truncate(vaddr >> shamt);
}
-
pub const TypedPTE = union(common.mm.paging.PTEType) {
-
Mapping: MappingHandle,
-
Table: TableHandle,
-
Empty,
const Self = @This();
-
pub fn decode(pte: *PageTable.Entry, level: u3) Self {
if (!pte.present) {
return .Empty;
}
if (!pte.huge and level != 0) {
-
return .{ .Table = decode_table(pte, level) };
}
-
return .{ .Mapping = decode_mapping(pte, level) };
}
-
pub fn decode_table(pte: *PageTable.Entry, level: u3) TableHandle {
return .{
-
.phys_addr = pte.getAddr(),
.level = level,
.underlying = pte,
.perms = .{
-
.writable = pte.writable,
-
.executable = !pte.nx,
-
.userspace_accessible = pte.user_accessible,
},
};
}
-
pub fn decode_mapping(pte: *PageTable.Entry, level: u3) MappingHandle {
return .{
-
.phys_addr = pte.getAddr(),
.level = level,
-
// TODO: memory types
-
.memory_type = null,
.underlying = pte,
.perms = .{
-
.writable = pte.writable,
-
.executable = !pte.nx,
-
.userspace_accessible = pte.user_accessible,
},
};
}
};
-
pub const MappingHandle = struct {
-
phys_addr: usize,
level: u3,
memory_type: ?MemoryType,
perms: Perms,
underlying: *PageTable.Entry,
};
pub const TableHandle = struct {
-
phys_addr: usize,
level: u3,
perms: Perms,
underlying: ?*PageTable.Entry,
const Self = @This();
-
-
// Get the child entries of this page table
pub fn get_children(self: *const Self) []PageTable.Entry {
-
const page_table = physToVirt(*PageTable, self.phys_addr);
-
return page_table.entries[0..];
}
-
// Get children from the position holding the table and on
-
pub fn skip_to(self: *const Self, vaddr: usize) []PageTable.Entry {
-
return self.get_children()[extract_index_from_vaddr(vaddr, self.level - 1)..];
}
-
// Decode child table given an entry
-
pub fn decode_child(self: *const Self, pte: *PageTable.Entry) TypedPTE {
-
return TypedPTE.decode(pte, self.level - 1);
}
pub fn addPerms(self: *const Self, perms: Perms) void {
-
if (perms.executable) {
self.underlying.?.nx = false;
}
-
if (perms.writable) {
self.underlying.?.writable = true;
}
-
if (perms.userspace_accessible) {
self.underlying.?.user_accessible = true;
}
}
-
pub fn child_domain(self: *const Self, vaddr: usize) UntypedSlice {
-
return domain(vaddr, self.level - 1);
-
}
-
pub fn make_child_table(self: *const Self, pte: *PageTable.Entry, perms: Perms) !TableHandle {
const pmem = try make_page_table();
const result: TableHandle = .{
-
.phys_addr = pmem,
.level = self.level - 1,
.perms = perms,
.underlying = pte,
};
-
pte.* = encode_table(result);
return result;
}
-
pub fn make_child_mapping(
-
self: *const Self,
-
pte: *PageTable.Entry,
-
paddr: ?usize,
-
perms: Perms,
-
memory_type: MemoryType,
-
) !MappingHandle {
const page_size = page_sizes[self.level - 1];
const pmem = paddr orelse try common.init_data.bootmem.allocPhys(page_size);
const result: MappingHandle = .{
.level = self.level - 1,
.memory_type = memory_type,
.perms = perms,
.underlying = pte,
-
.phys_addr = pmem,
};
-
pte.* = encode_mapping(result);
return result;
}
-
};
-
pub fn root_table(vaddr: usize) TableHandle {
-
_ = vaddr;
-
const cr3_val = arch.registers.ControlRegisters.Cr3.read() & 0xFFFF_FFFF_FFFF_F000;
-
return .{
-
.phys_addr = cr3_val,
-
// TODO: detect and support 5 level paging!
-
.level = 4,
-
.perms = .{
-
.executable = true,
-
.writable = true,
-
},
-
.underlying = null,
-
};
-
}
-
-
fn encode_table(pte_handle: TableHandle) PageTable.Entry {
-
var pte = std.mem.zeroes(PageTable.Entry);
-
-
pte.setAddr(pte_handle.phys_addr);
-
pte.writable = pte_handle.perms.writable;
-
pte.user_accessible = pte_handle.perms.userspace_accessible;
-
pte.nx = !pte_handle.perms.executable;
-
pte.present = true;
-
pte.huge = false;
-
-
return pte;
-
}
-
-
fn encode_mapping(pte_handle: MappingHandle) PageTable.Entry {
-
var pte = std.mem.zeroes(PageTable.Entry);
-
-
pte.setAddr(pte_handle.phys_addr);
-
pte.present = true;
-
-
if (pte_handle.level != 0) {
-
pte.huge = true;
}
-
-
pte.writable = pte_handle.perms.writable;
-
pte.user_accessible = pte_handle.perms.userspace_accessible;
-
pte.nx = !pte_handle.perms.executable;
-
-
encode_memory_type(&pte, pte_handle);
-
-
return pte;
-
}
-
-
fn encode_memory_type(pte: *PageTable.Entry, pte_handle: MappingHandle) void {
-
const mt = pte_handle.memory_type orelse @panic("Unknown memory type");
-
-
// TODO: Page Attribute Table
-
switch (mt) {
-
.MemoryWritethrough => pte.write_through = true,
-
.DeviceUncacheable => pte.disable_cache = true,
-
.MemoryWriteBack => {},
-
else => @panic("Cannot set memory type"),
-
}
-
}
-
/// Returns physical address
-
fn make_page_table() !usize {
-
const pt_phys = try common.init_data.bootmem.allocPhys(std.heap.pageSize());
-
const pt = physToVirt([*]u8, pt_phys);
-
@memset(pt[0..std.heap.pageSize()], 0x00);
-
return pt_phys;
-
}
-
pub fn invalidate(vaddr: u64) void {
-
asm volatile (
-
\\ invlpg (%[vaddr])
-
:
-
: [vaddr] "r" (vaddr),
-
: .{ .memory = true });
-
}
-
-
const UntypedSlice = struct {
len: usize,
ptr: usize,
};
-
-
pub fn domain(vaddr: usize, level: u3) UntypedSlice {
-
return .{
-
.len = page_sizes[level],
-
.ptr = vaddr & ~(page_sizes[level] - 1),
-
};
-
}
-
-
pub const MemoryType = enum {
-
DeviceUncacheable,
-
DeviceWriteCombining,
-
MemoryWritethrough,
-
MemoryWriteBack,
-
};
-
-
pub fn can_map_at(level: u3) bool {
-
return level < 2;
-
}
···
const arch = @import("../root.zig");
+
const common = @import("common");
const std = @import("std");
+
const Cr3 = arch.registers.ControlRegisters.Cr3;
+
const Cr4 = arch.registers.ControlRegisters.Cr4;
const Perms = common.mm.paging.Perms;
pub const PageTable = extern struct {
entries: [512]Entry,
···
};
};
+
pub const MemoryType = enum {
+
DeviceUncacheable,
+
DeviceWriteCombining,
+
MemoryWritethrough,
+
MemoryWriteBack,
+
};
+
+
pub fn detect_5level() bool {
+
const bits: u64 = 1 << 12;
+
return Cr4.read() & bits != 0;
}
+
pub const Context = struct {
+
cr3_val: u64,
+
level5: bool,
const Self = @This();
+
pub fn apply(self: *Self) void {
+
// NX Enable
+
const IA32_EFER = arch.registers.MSR(u64, 0xC0000080);
+
const efer_val = IA32_EFER.read() | (0b1 << 11);
+
IA32_EFER.write(efer_val);
+
+
// Set the level 5 bit accordingly
+
const cr4 = Cr4.read();
+
const level5mask: u64 = 1 << 12;
+
Cr4.write(if (self.level5) cr4 | level5mask else cr4 & ~level5mask);
+
+
Cr3.write(self.cr3_val);
+
}
+
pub fn get_current() Context {
+
return .{
+
.cr3_val = Cr3.read(),
+
.level5 = detect_5level(),
+
};
+
}
+
+
pub fn can_map_at(_: *const Self, level: u3) bool {
+
return level < 2;
+
}
+
+
// We need the parameter because aarch64 has 2 root page tables
+
pub fn root_table(self: *Self, _: u64) TableHandle {
+
return .{
+
.paddr = self.cr3_val,
+
.level = if (self.level5) 5 else 4,
+
.context = self,
+
.perms = .{
+
.x = true,
+
.w = true,
+
.u = true,
+
},
+
.underlying = null,
+
};
+
}
+
+
pub fn decode(self: *Self, pte: *PageTable.Entry, level: u3) SomePteHandle {
if (!pte.present) {
return .Empty;
}
if (!pte.huge and level != 0) {
+
return .{ .Table = self.parse_table(pte, level) };
}
+
return .{ .Mapping = self.parse_mapping(pte, level) };
}
+
pub fn parse_mapping(self: *Self, pte: *PageTable.Entry, level: u3) MappingHandle {
+
const memory_type = self.decode_memory_type(pte, level);
return .{
+
.context = self,
+
.paddr = pte.getAddr(),
.level = level,
+
.memory_type = memory_type,
.underlying = pte,
.perms = .{
+
.w = pte.writable,
+
.x = !pte.nx,
+
.u = pte.user_accessible,
+
},
+
};
+
}
+
+
pub fn decode_memory_type(_: *Self, pte: *PageTable.Entry, _: u3) ?MemoryType {
+
return switch (pte.disable_cache) {
+
true => .DeviceUncacheable,
+
false => switch (pte.write_through) {
+
true => .MemoryWritethrough,
+
false => .MemoryWriteBack,
},
};
}
+
pub fn encode_memory_type(_: *Self, pte: *PageTable.Entry, mapping_handle: MappingHandle) void {
+
switch (mapping_handle.memory_type.?) {
+
.MemoryWritethrough => pte.write_through = true,
+
.DeviceUncacheable => pte.disable_cache = true,
+
.MemoryWriteBack => {},
+
else => @panic("bad memory type"),
+
}
+
}
+
+
pub fn parse_table(self: *Self, pte: *PageTable.Entry, level: u3) TableHandle {
return .{
+
.context = self,
+
.paddr = pte.getAddr(),
.level = level,
.underlying = pte,
.perms = .{
+
.w = pte.writable,
+
.x = !pte.nx,
+
.u = pte.user_accessible,
},
};
}
+
+
pub fn encode_mapping(self: *Self, mapping_handle: MappingHandle) PageTable.Entry {
+
var pte = std.mem.zeroes(PageTable.Entry);
+
pte.setAddr(mapping_handle.paddr);
+
pte.present = true;
+
if (mapping_handle.level != 0) {
+
pte.huge = true;
+
}
+
+
pte.writable = mapping_handle.perms.w;
+
pte.user_accessible = mapping_handle.perms.u;
+
pte.nx = !mapping_handle.perms.x;
+
+
self.encode_memory_type(&pte, mapping_handle);
+
return pte;
+
}
+
+
pub fn encode_table(_: *Self, table_handle: TableHandle) PageTable.Entry {
+
var pte = std.mem.zeroes(PageTable.Entry);
+
pte.writable = table_handle.perms.w;
+
pte.user_accessible = table_handle.perms.u;
+
pte.nx = !table_handle.perms.x;
+
pte.setAddr(table_handle.paddr);
+
+
pte.present = true;
+
pte.huge = false;
+
+
return pte;
+
}
+
+
pub fn invalidate(_: *const Self, vaddr: u64) void {
+
asm volatile (
+
\\ invlpg (%[vaddr])
+
:
+
: [vaddr] "r" (vaddr),
+
: .{ .memory = true });
+
}
+
+
pub fn domain(_: *const Self, level: u3, vaddr: u64) StupidSlice {
+
return .{
+
.ptr = vaddr & ~(page_sizes[level] - 1),
+
.len = page_sizes[level],
+
};
+
}
+
+
pub fn virt_to_phys(context: *Context, vaddr: usize) ?usize {
+
const root = context.root_table(0).get_children();
+
const indexes = [_]usize{
+
(vaddr >> 39) & 0x1FF,
+
(vaddr >> 30) & 0x1FF,
+
(vaddr >> 21) & 0x1FF,
+
(vaddr >> 12) & 0x1FF,
+
};
+
var pte_ptr = &root[indexes[0]];
+
std.log.warn("{*}: {any}, addr 0x{x}", .{ pte_ptr, pte_ptr, pte_ptr.getAddr() });
+
for (0..3) |i| {
+
if (!pte_ptr.present) {
+
return null;
+
}
+
const next_page_table = common.mm.physToHHDM(*PageTable, pte_ptr.getAddr());
+
pte_ptr = &next_page_table.entries[indexes[i + 1]];
+
std.log.warn("{*}: {any}, addr 0x{x}", .{ pte_ptr, pte_ptr, pte_ptr.getAddr() });
+
}
+
return pte_ptr.getAddr() + (vaddr & 0xFFF);
+
}
};
+
fn idx_from_level(vaddr: u64, level: u6) u9 {
+
const shamt = 12 + level * 9;
+
return @truncate(vaddr >> shamt);
+
}
+
+
pub fn make_page_table() !usize {
+
const page_size = std.heap.pageSize();
+
const paddr = try common.init_data.bootmem.allocPhys(page_size);
+
const pt_ptr = common.mm.physToHHDM([*]u8, paddr);
+
@memset(pt_ptr[0..page_size], 0);
+
return paddr;
+
}
+
+
pub const page_sizes = [_]usize{
+
0x1000, // 4K
+
0x200000, // 2M
+
0x40000000, // 1G
+
0x8000000000, // 512G
+
0x1000000000000, // 256T
+
};
+
+
const MappingHandle = struct {
+
paddr: u64,
level: u3,
memory_type: ?MemoryType,
+
context: *Context,
perms: Perms,
underlying: *PageTable.Entry,
};
pub const TableHandle = struct {
+
paddr: u64,
level: u3,
+
context: *Context,
perms: Perms,
underlying: ?*PageTable.Entry,
const Self = @This();
pub fn get_children(self: *const Self) []PageTable.Entry {
+
const pt = common.mm.physToHHDM(*PageTable, self.paddr);
+
return pt.entries[0..];
}
+
pub fn skip_to(self: *const Self, vaddr: u64) []PageTable.Entry {
+
return self.get_children()[idx_from_level(vaddr, self.level - 1)..];
}
+
pub fn decode_child(self: *const Self, pte: *PageTable.Entry) SomePteHandle {
+
return self.context.decode(pte, self.level - 1);
}
pub fn addPerms(self: *const Self, perms: Perms) void {
+
if (perms.x) {
self.underlying.?.nx = false;
}
+
if (perms.w) {
self.underlying.?.writable = true;
}
+
if (perms.u) {
self.underlying.?.user_accessible = true;
}
}
pub fn make_child_table(self: *const Self, pte: *PageTable.Entry, perms: Perms) !TableHandle {
const pmem = try make_page_table();
const result: TableHandle = .{
+
.paddr = pmem,
+
.context = self.context,
.level = self.level - 1,
.perms = perms,
.underlying = pte,
};
+
pte.* = self.context.encode_table(result);
return result;
}
+
pub fn make_child_mapping(self: *const Self, pte: *PageTable.Entry, paddr: ?u64, perms: Perms, memory_type: MemoryType) !MappingHandle {
const page_size = page_sizes[self.level - 1];
const pmem = paddr orelse try common.init_data.bootmem.allocPhys(page_size);
const result: MappingHandle = .{
.level = self.level - 1,
.memory_type = memory_type,
+
.context = self.context,
.perms = perms,
.underlying = pte,
+
.paddr = pmem,
};
+
pte.* = self.context.encode_mapping(result);
return result;
}
+
pub fn child_domain(self: *const Self, vaddr: u64) StupidSlice {
+
return self.context.domain(self.level - 1, vaddr);
}
+
};
+
pub const SomePteHandle = union(common.mm.paging.PTEType) {
+
Mapping: MappingHandle,
+
Table: TableHandle,
+
Empty,
+
};
+
pub const StupidSlice = struct {
len: usize,
ptr: usize,
};
+6 -4
components/ukernel/common/loader.zig
···
const common = @import("root.zig");
const paging = common.mm.paging;
const std = @import("std");
const elf = std.elf;
const log = std.log.scoped(.elf_loader);
// Load root task, return the entry point
-
pub fn loadRootTask() !usize {
const root_task = common.init_data.root_task;
const hdr = blk: {
const hdr: *elf.Elf64_Ehdr = @ptrCast(root_task);
···
.size = memsz_pages,
.memory_type = .MemoryWriteBack,
.perms = .{
-
.executable = entry.p_flags & elf.PF_X > 0,
-
.writable = entry.p_flags & elf.PF_W > 0,
-
.userspace_accessible = true,
},
});
// 2. Copy filesz bytes from offset to this new page
···
const common = @import("root.zig");
+
const arch = @import("arch");
const paging = common.mm.paging;
const std = @import("std");
const elf = std.elf;
const log = std.log.scoped(.elf_loader);
// Load root task, return the entry point
+
pub fn loadRootTask(context: *arch.mm.paging.Context) !usize {
const root_task = common.init_data.root_task;
const hdr = blk: {
const hdr: *elf.Elf64_Ehdr = @ptrCast(root_task);
···
.size = memsz_pages,
.memory_type = .MemoryWriteBack,
.perms = .{
+
.x = entry.p_flags & elf.PF_X > 0,
+
.w = entry.p_flags & elf.PF_W > 0,
+
.u = true,
},
+
.context = context,
});
// 2. Copy filesz bytes from offset to this new page
+24 -17
components/ukernel/common/mm/paging.zig
···
const std = @import("std");
const TableHandle = arch.mm.paging.TableHandle;
const MemoryType = arch.mm.paging.MemoryType;
pub const Perms = struct {
-
writable: bool,
-
executable: bool,
-
userspace_accessible: bool = false,
const Self = @This();
/// Verify that the current permissions are a superset of the provided ones
pub fn allows(self: Self, other: Self) bool {
-
if (!self.writable and other.writable) {
return false;
}
-
if (!self.executable and other.executable) {
return false;
}
-
if (!self.userspace_accessible and other.userspace_accessible) {
return false;
}
return true;
···
/// OR two permissions
pub fn addPerms(self: Self, other: Self) Self {
return .{
-
.writable = self.writable or other.writable,
-
.executable = self.executable or other.executable,
-
.userspace = self.userspace_accessible or other.userspace_accessible,
};
}
};
···
size: usize,
perms: Perms,
memory_type: MemoryType,
}) !void {
-
const root = arch.mm.paging.root_table(args.vaddr);
var vaddr = args.vaddr;
var paddr = args.paddr;
var size = args.size;
-
try mapPageImpl(&vaddr, &paddr, &size, root, args.perms, args.memory_type);
}
pub fn map(args: struct {
···
size: usize,
perms: Perms,
memory_type: MemoryType,
}) !void {
-
const root = arch.mm.paging.root_table(args.vaddr);
var vaddr = args.vaddr;
var size = args.size;
-
try mapPageImpl(&vaddr, null, &size, root, args.perms, args.memory_type);
}
fn mapPageImpl(
···
table: TableHandle,
perms: Perms,
memory_type: MemoryType,
) !void {
// 1. Get slice of every child from the target forwards
const children = table.skip_to(vaddr.*);
···
switch (table.decode_child(child)) {
.Mapping => return error.AlreadyPresent,
.Table => |*tbl| {
-
try mapPageImpl(vaddr, paddr, size, tbl.*, perms, memory_type);
if (!tbl.perms.allows(perms)) {
tbl.addPerms(perms);
-
arch.mm.paging.invalidate(vaddr.*);
}
},
.Empty => {
const domain = table.child_domain(vaddr.*);
-
if (domain.ptr == vaddr.* and domain.len <= size.* and arch.mm.paging.can_map_at(table.level - 1) and is_aligned(vaddr.*, paddr, table.level - 1)) {
// Make child mapping etc
_ = try table.make_child_mapping(child, if (paddr) |p| p.* else null, perms, memory_type);
const step = domain.len;
···
}
} else {
const tbl = try table.make_child_table(child, perms);
-
try mapPageImpl(vaddr, paddr, size, tbl, perms, memory_type);
}
},
}
···
const std = @import("std");
const TableHandle = arch.mm.paging.TableHandle;
const MemoryType = arch.mm.paging.MemoryType;
+
const Context = arch.mm.paging.Context;
pub const Perms = struct {
+
/// Writable
+
w: bool,
+
/// Executable
+
x: bool,
+
/// Userspace Accessible
+
u: bool = false,
const Self = @This();
/// Verify that the current permissions are a superset of the provided ones
pub fn allows(self: Self, other: Self) bool {
+
if (!self.w and other.w) {
return false;
}
+
if (!self.x and other.x) {
return false;
}
+
if (!self.u and other.u) {
return false;
}
return true;
···
/// OR two permissions
pub fn addPerms(self: Self, other: Self) Self {
return .{
+
.w = self.w or other.w,
+
.x = self.x or other.x,
+
.u = self.u or other.u,
};
}
};
···
size: usize,
perms: Perms,
memory_type: MemoryType,
+
context: *Context,
}) !void {
+
const root = args.context.root_table(args.vaddr);
var vaddr = args.vaddr;
var paddr = args.paddr;
var size = args.size;
+
try mapPageImpl(&vaddr, &paddr, &size, root, args.perms, args.memory_type, args.context);
}
pub fn map(args: struct {
···
size: usize,
perms: Perms,
memory_type: MemoryType,
+
context: *Context,
}) !void {
+
const root = args.context.root_table(args.vaddr);
var vaddr = args.vaddr;
var size = args.size;
+
try mapPageImpl(&vaddr, null, &size, root, args.perms, args.memory_type, args.context);
}
fn mapPageImpl(
···
table: TableHandle,
perms: Perms,
memory_type: MemoryType,
+
context: *Context,
) !void {
// 1. Get slice of every child from the target forwards
const children = table.skip_to(vaddr.*);
···
switch (table.decode_child(child)) {
.Mapping => return error.AlreadyPresent,
.Table => |*tbl| {
+
try mapPageImpl(vaddr, paddr, size, tbl.*, perms, memory_type, context);
if (!tbl.perms.allows(perms)) {
tbl.addPerms(perms);
+
context.invalidate(vaddr.*);
}
},
.Empty => {
const domain = table.child_domain(vaddr.*);
+
if (domain.ptr == vaddr.* and domain.len <= size.* and context.can_map_at(table.level - 1) and is_aligned(vaddr.*, paddr, table.level - 1)) {
// Make child mapping etc
_ = try table.make_child_mapping(child, if (paddr) |p| p.* else null, perms, memory_type);
const step = domain.len;
···
}
} else {
const tbl = try table.make_child_table(child, perms);
+
try mapPageImpl(vaddr, paddr, size, tbl, perms, memory_type, context);
}
},
}