const common = @import("root.zig"); const arch = @import("arch"); const paging = common.mm.paging; const std = @import("std"); const elf = std.elf; const log = std.log.scoped(.elf_loader); // Load root task, return the entry point pub fn loadElf(context: *arch.mm.paging.Context, task_slice: []align(4096) u8) !usize { const hdr = blk: { const hdr: *elf.Elf64_Ehdr = @ptrCast(task_slice); break :blk elf.Header.init(hdr.*, .little); }; var iter = hdr.iterateProgramHeadersBuffer(task_slice); while (try iter.next()) |entry| { if ((entry.p_type != elf.PT_LOAD) or (entry.p_memsz == 0)) continue; // 1. Allocate pages to back this allocation const real_vaddr = std.mem.alignBackward(usize, entry.p_vaddr, std.heap.pageSize()); const vaddr_shift = entry.p_vaddr - real_vaddr; const memsz_pages = std.mem.alignForward(usize, vaddr_shift + entry.p_memsz, std.heap.pageSize()); const page_backing = try common.init_data.bootmem.allocPhys(memsz_pages); try paging.mapPhys(.{ .vaddr = real_vaddr, .paddr = page_backing, .size = memsz_pages, .memory_type = .MemoryWriteBack, .perms = .{ .x = entry.p_flags & elf.PF_X > 0, .w = entry.p_flags & elf.PF_W > 0, .u = true, }, .context = context, }); // 2. Copy filesz bytes from offset to this new page const dst = common.mm.physToHHDM([*]u8, page_backing + vaddr_shift); const dst_slice = dst[0..entry.p_filesz]; const src_slice = task_slice[entry.p_offset..][0..entry.p_filesz]; @memcpy(dst_slice, src_slice); // 3. Add memsz - filesz zeroes const zero_slice = dst[entry.p_filesz..entry.p_memsz]; @memset(zero_slice, 0); } // Return the entry point return hdr.entry; }