Microkernel thing OS experiment (Zig ⚡)

feat: multitasking works!

Basic round robin scheduling is used

pci.express 62ca8c30 f49cd591

verified
Changed files
+198 -47
assets
components
+2 -1
assets/limine.conf
···
//AMD64 Kernel
protocol: limine
path: boot():/kernel-amd64.elf
-
module_path: boot():/init-amd64.elf
+
module_path: boot():/root-69.elf
+
module_path: boot():/root-420.elf
//aarch64 Kernel
protocol: limine
path: boot():/kernel-aarch64.elf
+12 -5
build.zig
···
const ukernel_inst = b.addInstallFile(ukernel_artifact.getEmittedBin(), arch.kernelExeName());
b.getInstallStep().dependOn(&ukernel_inst.step);
-
const root_dep = b.dependency("root_server", .{
+
const root_69 = b.dependency("root_server", .{
+
.arch = arch,
+
.number = 0x69,
+
}).artifact("root_server");
+
const root_69_inst = b.addInstallFile(root_69.getEmittedBin(), "root-69.elf");
+
b.getInstallStep().dependOn(&root_69_inst.step);
+
+
const root_420 = b.dependency("root_server", .{
.arch = arch,
-
});
-
const root_artifact = root_dep.artifact("root_server");
-
const root_inst = b.addInstallFile(root_artifact.getEmittedBin(), arch.rootTaskName());
-
b.getInstallStep().dependOn(&root_inst.step);
+
.number = 0x420,
+
}).artifact("root_server");
+
const root_420_inst = b.addInstallFile(root_420.getEmittedBin(), "root-420.elf");
+
b.getInstallStep().dependOn(&root_420_inst.step);
// Run in QEMU
run_blk: {
+2
components/root_server/build.zig
···
pub fn build(b: *std.Build) void {
const arch = b.option(build_helpers.Architecture, "arch", "The target root_server architecture") orelse .amd64;
+
const number = b.option(usize, "number", "The syscall number to use") orelse 0x69;
// set CPU features based on the architecture
const target = b.resolveTargetQuery(.{
···
const config = b.addOptions();
config.addOption(build_helpers.Architecture, "arch", arch);
+
config.addOption(usize, "number", number);
const build_helpers_dep = b.dependency("build_helpers", .{});
+6 -2
components/root_server/src/main.zig
···
const std = @import("std");
const os = @import("os.zig");
+
const config = @import("config");
export fn _start() callconv(.c) noreturn {
_ = os.syscall1(SYS_poke, 0xB16B00B5BADBABE);
_ = os.syscall1(SYS_exit, 0x69696969);
asm volatile ("int3");
asm volatile (
-
\\ mov $0x69696969, %%rdi
+
\\ mov %[number], %%rdi
\\ xor %%rsi, %%rsi
\\ xor %%rbx, %%rbx
\\ mainloop:
\\ xor %%rax, %%rax
\\ delayloop:
\\ inc %%rax
-
\\ cmp $0x4000000, %%rax
+
\\ cmp $0x1000000, %%rax
\\ jnz delayloop
\\ inc %%rbx
+
\\ mov %%rsp, %%rsi
\\ syscall
\\ jmp mainloop
+
:
+
: [number] "r" (config.number),
);
die();
+1 -1
components/ukernel/arch/aarch64/boot.zig
···
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
const mod_size = mod.size;
log.info("Loading root task with {s} @ {*}", .{ mod.path, mod.address });
-
common.init_data.root_task = mod_addr[0..mod_size];
+
common.init_data.root_task_elf = mod_addr[0..mod_size];
}
} else {
@branchHint(.unlikely);
+21 -5
components/ukernel/arch/amd64/boot.zig
···
const gdt = &arch.per_cpu_init_data.gdt_buf[0];
gdt.* = .{};
const tss = &arch.per_cpu_init_data.tss_buf[0];
-
// TSS rsp 0x3800
+
// TODO: create a fixed mapping for the pages maybe?
tss.* = .{
-
.rsp0 = 0x7ffe_0000_8000,
-
.rsp1 = 0x7ffe_0000_8000,
-
.rsp2 = 0x7ffe_0000_8000,
+
.rsp0 = common.init_data.hhdm_slide + arch.per_cpu_init_data.getStackPhys(0),
};
gdt.tss_desc.set_tss_addr(tss);
···
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
const mod_size = mod.size;
log.info("Loading root task with {s} @ {*}", .{ mod.path, mod.address });
-
common.init_data.root_task = mod_addr[0..mod_size];
+
common.init_data.root_task_elf = mod_addr[0..mod_size];
}
} else {
@branchHint(.unlikely);
···
}
bootstrapAPs();
+
}
+
+
pub fn loadTasks() void {
+
const tasks_buf: [*]arch.structures.Task = @ptrFromInt(common.init_data.bootmem.allocMem(std.heap.pageSize()) catch {
+
std.log.err("Couldn't allocate tasks!", .{});
+
@panic("allocPhys");
+
});
+
const tasks_scratch: []arch.structures.Task = tasks_buf[0 .. std.heap.pageSize() / @sizeOf(arch.structures.Task)];
+
+
if (limine_requests.modules.response) |module_response| {
+
if (module_response.module_count > 0) {
+
for (module_response.modules.?[0..module_response.module_count], 0..) |mod, i| {
+
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
+
const mod_size = mod.size;
+
common.loadTask(&tasks_scratch[i], mod_addr[0..mod_size]);
+
}
+
}
+
}
}
fn initHwDesc() void {
+17 -4
components/ukernel/arch/amd64/interrupts/apic.zig
···
}
pub fn timer_handler(stack_trace: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
-
log.warn("Got an APIC timer interrupt, incrementing user's rsi...", .{});
-
stack_trace.regs.rsi += 1;
-
singleton.setRegister(.eoi, 0);
-
armTimer(1000);
+
defer {
+
singleton.setRegister(.eoi, 0);
+
armTimer(20);
+
}
+
// 1. Get the next task. If there is no next task, just keep scheduling.
+
const task = common.scheduler.getNextTask() orelse return;
+
// 2. Swap the next task state with the current interrupt trace
+
std.mem.swap(arch.interrupts.idt.SavedRegisters, &task.regs, &stack_trace.regs);
+
std.mem.swap(u64, &task.rip, &stack_trace.rip);
+
std.mem.swap(u64, &task.rsp, &stack_trace.rsp);
+
// If task has a new cr3, swap current CR3 and task cr3 too
+
if (task.cr3_val != stack_trace.cr3) {
+
arch.registers.ControlRegisters.Cr3.write(task.cr3_val);
+
task.cr3_val = stack_trace.cr3;
+
}
+
// 3. Now, `task` has our current state, so enqueue it.
+
common.scheduler.pushTask(task);
}
+2
components/ukernel/arch/amd64/interrupts/idt.zig
···
r13: u64,
r14: u64,
r15: u64,
+
+
pub const default = std.mem.zeroes(SavedRegisters);
};
/// The Interrupt frame which we help generate
+15
components/ukernel/arch/amd64/interrupts/root.zig
···
const std = @import("std");
const log = std.log.scoped(.interrupts);
const arch = @import("../root.zig");
+
const common = @import("common");
pub inline fn enable() void {
asm volatile ("sti");
···
}
print_regs(stack_frame.normalize());
arch.instructions.die();
+
}
+
+
// Start scheduling
+
pub fn startScheduling() noreturn {
+
// 1. Pop off the task to run
+
const task = common.scheduler.getNextTask() orelse {
+
std.log.scoped(.startScheduling).err("No root task!", .{});
+
@panic("startScheduling");
+
};
+
// 2. Apply the paging context
+
task.getPagingContext().apply();
+
// 3. Give a slice of 1000ms and fire away
+
apic.armTimer(20);
+
enter_userspace(task.rip, 0x69, task.rsp);
}
// Set up the IDT, PIC, TSC, and APIC
+21 -7
components/ukernel/arch/amd64/mm/paging.zig
···
level5: bool,
const Self = @This();
-
pub fn apply(self: *Self) void {
+
pub fn apply(self: *const Self) void {
// NX Enable
const IA32_EFER = arch.registers.MSR(u64, 0xC0000080);
const efer_val = IA32_EFER.read() | (0b1 << 11);
···
};
}
+
pub fn make_user() !Context {
+
// Make a new root page table
+
const user_root_paddr = try make_page_table();
+
const user_root = common.mm.physToHHDM(*PageTable, user_root_paddr);
+
// Copy the entire higher half entries
+
const higher_half = common.init_data.kernel_paging_ctx.root_table(0).get_children();
+
@memcpy(user_root.entries[256..], higher_half[256..]);
+
return .{
+
.cr3_val = user_root_paddr,
+
.level5 = common.init_data.kernel_paging_ctx.level5,
+
};
+
}
+
pub fn can_map_at(_: *const Self, level: u3) bool {
return level < 2;
}
···
// We need the parameter because aarch64 has 2 root page tables
pub fn root_table(self: *Self, _: u64) TableHandle {
return .{
-
.paddr = self.cr3_val,
+
// Mask out the cr3 value
+
.paddr = self.cr3_val & 0xFFFFFFFF_FFFFF000,
.level = if (self.level5) 5 else 4,
.context = self,
.perms = .{
···
}
pub const page_sizes = [_]usize{
-
0x1000, // 4K
-
0x200000, // 2M
-
0x40000000, // 1G
-
0x8000000000, // 512G
-
0x1000000000000, // 256T
+
0x1000,
+
0x200000,
+
0x40000000,
+
0x8000000000,
+
0x1000000000000,
};
const MappingHandle = struct {
+18 -3
components/ukernel/arch/amd64/root.zig
···
gdt_buf: []StandardGdt = undefined,
tss_buf: []Tss = undefined,
+
// Physical ptr
+
stack_buf: usize = undefined,
+
+
const stack_size = std.heap.page_size_max;
const Self = @This();
pub fn init(self: *Self, cpu_count: u64) void {
-
// 1. Allocate space for GDT and TSS data
+
// 1. Allocate stack space for every core
+
self.stack_buf = common.init_data.bootmem.allocPhys(stack_size * cpu_count) catch |err| {
+
std.log.err("init PerCpuInitData: failed to allocate stack! {}", .{err});
+
@panic("stack_buf");
+
};
+
+
// 2. Allocate space for GDT and TSS data
const gdt_size = @sizeOf(StandardGdt);
const tss_size = @sizeOf(Tss);
const total_required_size = gdt_size * cpu_count + tss_size * cpu_count;
const buf: [*]u8 = @ptrFromInt(common.init_data.bootmem.allocMem(total_required_size) catch |err| {
std.log.err("init PerCpuInitData: GDT/TSS alloc failed: {}", .{err});
-
@panic("rip bozo");
+
@panic("gdt_tss_buf");
});
-
// 2. Transmute and fill out the structure
+
// 3. Transmute and fill out the structure
const gdt_buf: [*]StandardGdt = @ptrCast(@alignCast(buf[0 .. gdt_size * cpu_count]));
const tss_buf: [*]Tss = @ptrCast(@alignCast(buf[gdt_size * cpu_count ..][0 .. tss_size * cpu_count]));
self.gdt_buf = gdt_buf[0..cpu_count];
self.tss_buf = tss_buf[0..cpu_count];
+
}
+
+
// returns a pointer to the TOP of the stack!
+
pub fn getStackPhys(self: *Self, core_num: usize) usize {
+
return self.stack_buf + (core_num + 1) * stack_size;
}
};
+24
components/ukernel/arch/amd64/structures/root.zig
···
pub const gdt = @import("gdt.zig");
pub const tss = @import("tss.zig");
+
const arch = @import("../root.zig");
+
const common = @import("common");
+
const Queue = @import("Queue");
+
+
// Uses an intrusive queue
+
pub const Task = struct {
+
// Saved Registers
+
regs: arch.interrupts.idt.SavedRegisters align(8),
+
// Address Space context
+
cr3_val: u64,
+
// Instruction Pointer
+
rip: u64,
+
// Stack Pointer
+
rsp: u64,
+
// Next task basically
+
node: Queue.Node = .{},
+
+
pub fn getPagingContext(self: Task) arch.mm.paging.Context {
+
return .{
+
.cr3_val = self.cr3_val,
+
.level5 = common.init_data.kernel_paging_ctx.level5,
+
};
+
}
+
};
+3
components/ukernel/build.zig
···
const spinlock_mod = b.dependency("spinlock", .{}).module("spinlock");
const limine_mod = b.dependency("limine", .{ .api_revision = 3 }).module("limine");
const console_mod = b.dependency("console", .{}).module("console");
+
const queue_mod = b.dependency("Queue", .{}).module("Queue");
arch_module.addImport("limine", limine_mod);
arch_module.addImport("console", console_mod);
arch_module.addImport("common", common_mod);
+
arch_module.addImport("Queue", queue_mod);
common_mod.addImport("arch", arch_module);
common_mod.addImport("spinlock", spinlock_mod);
common_mod.addImport("console", console_mod);
+
common_mod.addImport("Queue", queue_mod);
const kernel = b.addExecutable(.{
.name = "ukernel",
+4
components/ukernel/build.zig.zon
···
.spinlock = .{ .path = "deps/spinlock" },
.build_helpers = .{ .path = "../build_helpers" },
.console = .{ .path = "deps/console" },
+
.Queue = .{
+
.url = "git+https://tangled.sh/@sydney.blue/Queue.zig?ref=dev#6c0760e8a233c1d59554a40a87f0ef293a9697f3",
+
.hash = "Queue-0.0.0-upnEfhEPAADNV4Dvs3DVCRSnOh-BrhgsRR6scaE2qTIa",
+
},
},
.paths = .{
"build.zig",
+1 -1
components/ukernel/common/aux.zig
···
console: ?console.Console = null,
framebuffer: ?console.Framebuffer = null,
hardware_description: HardwareDescription = .none,
-
root_task: []align(4096) u8 = undefined,
+
root_task_elf: []align(4096) u8 = undefined,
hhdm_slide: usize = 0,
kernel_paging_ctx: arch.mm.paging.Context = undefined,
};
+4 -5
components/ukernel/common/loader.zig
···
const log = std.log.scoped(.elf_loader);
// Load root task, return the entry point
-
pub fn loadRootTask(context: *arch.mm.paging.Context) !usize {
-
const root_task = common.init_data.root_task;
+
pub fn loadElf(context: *arch.mm.paging.Context, task_slice: []align(4096) u8) !usize {
const hdr = blk: {
-
const hdr: *elf.Elf64_Ehdr = @ptrCast(root_task);
+
const hdr: *elf.Elf64_Ehdr = @ptrCast(task_slice);
break :blk elf.Header.init(hdr.*, .little);
};
-
var iter = hdr.iterateProgramHeadersBuffer(root_task);
+
var iter = hdr.iterateProgramHeadersBuffer(task_slice);
while (try iter.next()) |entry| {
if ((entry.p_type != elf.PT_LOAD) or (entry.p_memsz == 0)) continue;
···
const dst = common.mm.physToHHDM([*]u8, page_backing + vaddr_shift);
const dst_slice = dst[0..entry.p_filesz];
-
const src_slice = root_task[entry.p_offset..][0..entry.p_filesz];
+
const src_slice = task_slice[entry.p_offset..][0..entry.p_filesz];
@memcpy(dst_slice, src_slice);
// 3. Add memsz - filesz zeroes
+29 -13
components/ukernel/common/root.zig
···
pub const aux = @import("aux.zig");
pub const mm = @import("mm/root.zig");
-
pub const loadRootTask = loader.loadRootTask;
+
pub const scheduler = @import("scheduler.zig");
+
pub const loadElf = loader.loadElf;
const arch = @import("arch");
const std = @import("std");
const loader = @import("loader.zig");
···
// Now, set up interrupts
arch.interrupts.init();
+
arch.interrupts.init_syscalls();
-
log.info("Loading root task...", .{});
+
log.info("Loading attached tasks...", .{});
+
arch.boot.loadTasks();
-
// The following needs to be genericized and unshittified
+
log.info("Dropping to userspace!", .{});
-
// Allocate a stack
+
arch.interrupts.startScheduling();
+
}
+
+
pub fn loadTask(scratch: *arch.structures.Task, task_slice: []align(4096) u8) void {
+
// 1. Create a user address space
+
var user_ctx = arch.mm.paging.Context.make_user() catch |err| {
+
std.log.err("Failed to make user context! {}", .{err});
+
@panic("make_user_ctx");
+
};
+
+
// 2. Allocate a user stack
mm.paging.map(.{
.vaddr = 0x7ffe_0000_0000,
.size = 65536,
···
.u = true,
.w = true,
},
+
.context = &user_ctx,
}) catch @panic("couldn't map user stack");
-
// TODO: make user page tables!
-
const entry = loadRootTask(&init_data.kernel_paging_ctx) catch |err| {
-
log.err("Couldn't load the root task! {}", .{err});
+
// 3. Map ELF into address space
+
const entry = loadElf(&user_ctx, task_slice) catch |err| {
+
std.log.err("Couldn't load the root task! {}", .{err});
@panic("ggz");
};
-
log.info("Dropping to userspace entry 0x{x:0>16}", .{entry});
-
-
arch.interrupts.init_syscalls();
-
-
arch.interrupts.apic.armTimer(1000);
-
arch.interrupts.enter_userspace(entry, 0x69, 0x7ffe_0001_0000);
+
// 4. Add task to scheduler
+
scratch.* = .{
+
.cr3_val = user_ctx.cr3_val,
+
.regs = .default,
+
.rip = entry,
+
.rsp = 0x7ffe_0001_0000,
+
};
+
scheduler.pushTask(scratch);
}
// std options etc.
+16
components/ukernel/common/scheduler.zig
···
+
const std = @import("std");
+
const arch = @import("arch");
+
const Queue = @import("Queue");
+
const Task = arch.structures.Task;
+
+
var task_queue: Queue = .{};
+
+
pub fn pushTask(task: *Task) void {
+
task_queue.enqueue(&task.node);
+
}
+
+
pub fn getNextTask() ?*Task {
+
const node = task_queue.dequeue() orelse return null;
+
const task: *Task = @fieldParentPtr("node", node);
+
return task;
+
}