Microkernel thing OS experiment (Zig ⚡)

Compare changes

Choose any two refs to compare.

+2 -1
assets/limine.conf
···
//AMD64 Kernel
protocol: limine
path: boot():/kernel-amd64.elf
-
module_path: boot():/init-amd64.elf
+
module_path: boot():/root-69.elf
+
module_path: boot():/root-420.elf
//aarch64 Kernel
protocol: limine
path: boot():/kernel-aarch64.elf
+12 -5
build.zig
···
const ukernel_inst = b.addInstallFile(ukernel_artifact.getEmittedBin(), arch.kernelExeName());
b.getInstallStep().dependOn(&ukernel_inst.step);
-
const root_dep = b.dependency("root_server", .{
+
const root_69 = b.dependency("root_server", .{
+
.arch = arch,
+
.number = 0x69,
+
}).artifact("root_server");
+
const root_69_inst = b.addInstallFile(root_69.getEmittedBin(), "root-69.elf");
+
b.getInstallStep().dependOn(&root_69_inst.step);
+
+
const root_420 = b.dependency("root_server", .{
.arch = arch,
-
});
-
const root_artifact = root_dep.artifact("root_server");
-
const root_inst = b.addInstallFile(root_artifact.getEmittedBin(), arch.rootTaskName());
-
b.getInstallStep().dependOn(&root_inst.step);
+
.number = 0x420,
+
}).artifact("root_server");
+
const root_420_inst = b.addInstallFile(root_420.getEmittedBin(), "root-420.elf");
+
b.getInstallStep().dependOn(&root_420_inst.step);
// Run in QEMU
run_blk: {
+3 -4
build.zig.zon
···
.path = "components/build_helpers",
},
.limine_binary = .{
-
.url = "git+https://codeberg.org/Limine/Limine?ref=v9.x-binary#acf1e35c4685dba7ef271013db375a727c340ff7",
-
.hash = "N-V-__8AAOkzSACT_9p6kmSSly1l008erzXuG39Z6r54B_y0",
-
// Codeberg is always down so better to leave it not lazy
-
// .lazy = true,
+
.url = "git+https://codeberg.org/Limine/Limine?ref=v10.x-binary#648e33afd153bdbf780ba123e45997428796395d",
+
.hash = "N-V-__8AAJ8bSADxAGaebgaAbkAR2kqOBy52rXAL0oCumn0t",
+
.lazy = true,
},
.edk2_binary = .{
.url = "git+https://github.com/retrage/edk2-nightly#23068f498687bf64f2b8f80fbcf11e82d987fd9b",
+2
components/root_server/build.zig
···
pub fn build(b: *std.Build) void {
const arch = b.option(build_helpers.Architecture, "arch", "The target root_server architecture") orelse .amd64;
+
const number = b.option(usize, "number", "The syscall number to use") orelse 0x69;
// set CPU features based on the architecture
const target = b.resolveTargetQuery(.{
···
const config = b.addOptions();
config.addOption(build_helpers.Architecture, "arch", arch);
+
config.addOption(usize, "number", number);
const build_helpers_dep = b.dependency("build_helpers", .{});
+19
components/root_server/src/main.zig
···
const std = @import("std");
const os = @import("os.zig");
+
const config = @import("config");
export fn _start() callconv(.c) noreturn {
_ = os.syscall1(SYS_poke, 0xB16B00B5BADBABE);
_ = os.syscall1(SYS_exit, 0x69696969);
+
asm volatile ("int3");
+
asm volatile (
+
\\ mov %[number], %%rdi
+
\\ xor %%rsi, %%rsi
+
\\ xor %%rbx, %%rbx
+
\\ mainloop:
+
\\ xor %%rax, %%rax
+
\\ delayloop:
+
\\ inc %%rax
+
\\ cmp $0x1000000, %%rax
+
\\ jnz delayloop
+
\\ inc %%rbx
+
\\ mov %%rsp, %%rsi
+
\\ syscall
+
\\ jmp mainloop
+
:
+
: [number] "r" (config.number),
+
);
die();
}
+3 -4
components/ukernel/arch/aarch64/boot.zig
···
const std = @import("std");
const arch = @import("root.zig");
const common = @import("common");
-
const console = @import("console");
const log = std.log.scoped(.aarch64_init);
pub const limine_requests = struct {
···
pub fn bsp_init() callconv(.c) noreturn {
if (limine_requests.framebuffer.response) |fb_response| {
if (fb_response.framebuffer_count > 0) {
-
const fb = console.Framebuffer.from_limine(fb_response.getFramebuffers()[0]);
+
const fb = common.aux.Framebuffer.from_limine(fb_response.getFramebuffers()[0]);
common.init_data.framebuffer = fb;
@memset(fb.address[0..64], 0xFF);
}
···
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
const mod_size = mod.size;
log.info("Loading root task with {s} @ {*}", .{ mod.path, mod.address });
-
common.init_data.root_task = mod_addr[0..mod_size];
+
common.init_data.root_task_elf = mod_addr[0..mod_size];
}
} else {
@branchHint(.unlikely);
···
fn initConsole() void {
if (limine_requests.framebuffer.response) |fb_response| {
if (fb_response.framebuffer_count > 0) {
-
const fb = console.Framebuffer.from_limine(fb_response.getFramebuffers()[0]);
+
const fb = common.aux.Framebuffer.from_limine(fb_response.getFramebuffers()[0]);
common.init_data.framebuffer = fb;
// At this point, log becomes usable
common.init_data.console = console.Console.from_font(fb, console.DefaultFont);
+64 -241
components/ukernel/arch/amd64/boot.zig
···
const common = @import("common");
const console = @import("console");
const log = std.log.scoped(.amd64_init);
-
const Idt = arch.structures.Idt;
const StandardGdt = arch.structures.gdt.StandardGdt;
const Tss = arch.structures.tss.Tss;
···
pub export var mp: limine.SmpMpFeature.MpRequest linksection(".limine_reqs") = .{ .flags = .{ .x2apic = true } };
};
-
pub fn bsp_init() callconv(.c) noreturn {
+
pub fn early_init() void {
// Don't optimize away the limine requests
inline for (@typeInfo(limine_requests).@"struct".decls) |decl| {
std.mem.doNotOptimizeAway(&@field(limine_requests, decl.name));
···
arch.instructions.die();
}
+
// If the base revision isn't supported, we can't boot
+
if (!limine_requests.base_revision.isSupported()) {
+
@branchHint(.cold);
+
arch.instructions.die();
+
}
+
// Die if we don't have a memory map or Higher Half Direct Mapping
if (limine_requests.memmap.response == null) {
@branchHint(.cold);
···
const hhdm_offset = limine_requests.hhdm.response.?.offset;
common.init_data.hhdm_slide = hhdm_offset;
-
// Add in a framebuffer if found
-
initConsole();
+
// Get CPUID info
+
arch.instructions.cpuid.init();
+
+
// Set up the kernel paging context
+
common.init_data.kernel_paging_ctx = arch.mm.paging.Context.get_current();
+
}
+
+
pub fn bsp_init() void {
+
// Set up per-cpu data
+
arch.per_cpu_init_data.init(limine_requests.mp.response.?.cpu_count);
+
+
// Set up our own GDT and TSS
+
const gdt = &arch.per_cpu_init_data.gdt_buf[0];
+
gdt.* = .{};
+
const tss = &arch.per_cpu_init_data.tss_buf[0];
+
// TODO: create a fixed mapping for the pages maybe?
+
tss.* = .{
+
.rsp0 = common.init_data.hhdm_slide + arch.per_cpu_init_data.getStackPhys(0),
+
};
+
+
gdt.tss_desc.set_tss_addr(tss);
+
gdt.load();
+
+
// Add in the framebuffer
+
if (limine_requests.framebuffer.response) |fb_response| {
+
if (fb_response.framebuffer_count > 0) {
+
const fb = fb_response.getFramebuffers()[0];
+
common.init_data.framebuffer = .{
+
.address = @ptrCast(@alignCast(fb.address)),
+
.width = fb.width,
+
.height = fb.height,
+
.pitch = fb.pitch,
+
.red_mask_size = fb.red_mask_size,
+
.red_mask_shift = fb.red_mask_shift,
+
.green_mask_size = fb.green_mask_size,
+
.green_mask_shift = fb.green_mask_shift,
+
.blue_mask_size = fb.blue_mask_size,
+
.blue_mask_shift = fb.blue_mask_shift,
+
.bypp = fb.bpp / 8,
+
};
+
}
+
}
// Add in ACPI/dtb if found, prefer ACPI
initHwDesc();
-
// Set up the temporary Physical Memory Allocator
-
common.mm.bootmem.init();
-
// Attach the root task
if (limine_requests.modules.response) |module_response| {
if (module_response.module_count > 0) {
···
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
const mod_size = mod.size;
log.info("Loading root task with {s} @ {*}", .{ mod.path, mod.address });
-
common.init_data.root_task = mod_addr[0..mod_size];
+
common.init_data.root_task_elf = mod_addr[0..mod_size];
}
} else {
@branchHint(.unlikely);
@panic("No root task found!");
}
-
// Initialize per-cpu data (GDT and TSS)
-
arch.per_cpu_init_data.init(limine_requests.mp.response.?.cpu_count);
-
-
// Install the IDT
-
initIdt();
-
-
// Set up our own GDT and TSS
-
const gdt = &arch.per_cpu_init_data.gdt_buf[0];
-
gdt.* = .{};
-
const tss = &arch.per_cpu_init_data.tss_buf[0];
-
// TSS rsp 0x3800
-
tss.* = .{
-
.rsp0 = 0x3800,
-
.rsp1 = 0x3800,
-
.rsp2 = 0x3800,
-
};
-
-
gdt.tss_desc.set_tss_addr(tss);
-
gdt.load();
-
log.info("BSP successfully setup GDT+TSS!", .{});
-
-
// AP bootstrap
bootstrapAPs();
-
-
// Calibrate our TSC
-
arch.tsc.calibrate_pit() catch {
-
log.info("Failed to calibrate with PIT!", .{});
-
arch.instructions.die();
-
};
-
log.info("TSC estimate: {} MHz", .{arch.tsc.tsc_khz / 1000});
-
-
log.info("Setting up scheduling...", .{});
-
-
initApic() catch |err| {
-
log.err("Failed to set up APIC! {}", .{err});
-
@panic("apic");
-
};
-
-
log.info("Allocating code for userspace...", .{});
-
-
// Allocate a stack (0x3000 - 0x4000)
-
common.mm.paging.map(.{
-
.vaddr = 0x3000,
-
.size = 0x1000,
-
.memory_type = .MemoryWriteBack,
-
.perms = .{
-
.executable = false,
-
.userspace_accessible = true,
-
.writable = true,
-
},
-
}) catch @panic("couldn't map user stack");
-
-
const entry = common.loadRootTask() catch |err| {
-
log.err("Couldn't load the root task! {}", .{err});
-
@panic("ggz");
-
};
-
log.info("Dropping to userspace entry 0x{x:0>16}", .{entry});
-
-
init_syscalls();
-
-
enter_userspace(entry, 0x69, 0x4000);
}
-
// Get ready for system calls (set MSRs)
-
fn init_syscalls() void {
-
// Set up the STAR MSR with the segment descriptors
-
const IA32_STAR = arch.registers.MSR(u64, 0xC0000081);
-
const star_value: u64 = 0 | @as(u64, arch.structures.gdt.StandardGdt.selectors.kernel_code) << 32 | (@as(u64, arch.structures.gdt.StandardGdt.selectors.tss_desc + 8) | 3) << 48;
-
IA32_STAR.write(star_value);
-
-
// Set up the EFER MSR with SCE (System Call Enable)
-
const IA32_EFER = arch.registers.MSR(u64, 0xC0000080);
-
const efer_val = IA32_EFER.read() | 0b1;
-
IA32_EFER.write(efer_val);
-
-
// Set up LSTAR with the syscall handler and FMASK to clear interrupts
-
const IA32_LSTAR = arch.registers.MSR(u64, 0xC0000082);
-
IA32_LSTAR.write(@intFromPtr(syscall_entry));
-
-
const IA32_FMASK = arch.registers.MSR(u64, 0xC0000084);
-
IA32_FMASK.write(1 << 9);
-
}
+
pub fn loadTasks() void {
+
const tasks_buf: [*]arch.structures.Task = @ptrFromInt(common.init_data.bootmem.allocMem(std.heap.pageSize()) catch {
+
std.log.err("Couldn't allocate tasks!", .{});
+
@panic("allocPhys");
+
});
+
const tasks_scratch: []arch.structures.Task = tasks_buf[0 .. std.heap.pageSize() / @sizeOf(arch.structures.Task)];
-
const syscall_entry = @extern(*anyopaque, .{
-
.name = "syscall_entry",
-
});
-
export fn syscall_handler(rdi: usize, rsi: usize) callconv(.c) void {
-
std.log.info("Got a syscall! rdi=0x{x}, rsi=0x{x}", .{ rdi, rsi });
-
}
-
-
fn enter_userspace(entry: u64, arg: u64, stack: u64) noreturn {
-
log.info("usercode64 GDT 0x{x}, userdata64 GDT 0x{x}", .{ arch.structures.gdt.StandardGdt.selectors.user_code, arch.structures.gdt.StandardGdt.selectors.user_data });
-
const cr3 = arch.registers.ControlRegisters.Cr3.read();
-
arch.registers.ControlRegisters.Cr3.write(cr3);
-
asm volatile (
-
\\ push %[userdata64]
-
\\ push %[stack]
-
\\ push $0x202
-
\\ push %[usercode64]
-
\\ push %[entry]
-
\\
-
\\ mov %[userdata64], %%rax
-
\\ mov %%rax, %%es
-
\\ mov %%rax, %%ds
-
\\
-
\\ xor %%rsi, %%rsi
-
\\ xor %%rax, %%rax
-
\\ xor %%rdx, %%rdx
-
\\ xor %%rcx, %%rcx
-
\\ xor %%rbp, %%rbp
-
\\ xor %%rbx, %%rbx
-
\\
-
\\ xor %%r8, %%r8
-
\\ xor %%r9, %%r9
-
\\ xor %%r10, %%r10
-
\\ xor %%r11, %%r11
-
\\ xor %%r12, %%r12
-
\\ xor %%r13, %%r13
-
\\ xor %%r14, %%r14
-
\\ xor %%r15, %%r15
-
\\
-
\\ iretq
-
\\
-
:
-
: [arg] "{rdi}" (arg),
-
[stack] "r" (stack),
-
[entry] "r" (entry),
-
[userdata64] "i" (arch.structures.gdt.StandardGdt.selectors.user_data),
-
[usercode64] "i" (arch.structures.gdt.StandardGdt.selectors.user_code),
-
);
-
unreachable;
-
}
-
-
fn initApic() !void {
-
const has_x2apic = limine_requests.mp.response.?.flags.x2apic;
-
arch.interrupts.apic.singleton = switch (has_x2apic) {
-
true => .x2apic,
-
false => blk: {
-
// Map the APIC first!
-
const apic_base = common.mm.physToHHDM([*]volatile u8, 0xFEE0_0000);
-
try common.mm.paging.mapPhys(.{
-
.vaddr = @intFromPtr(apic_base),
-
.paddr = 0xFEE0_0000,
-
.size = 0x1000,
-
.memory_type = .DeviceUncacheable,
-
.perms = .{
-
.executable = false,
-
.userspace_accessible = false,
-
.writable = true,
-
},
-
});
-
break :blk .{ .xapic = apic_base };
-
},
-
};
-
// Set up the spurious vector and the TPR
-
arch.interrupts.apic.init.initialSetup();
-
-
// Calibrate the APIC timer
-
arch.interrupts.apic.init.calibrateTimer();
-
-
// Enable periodic interrupts
-
arch.interrupts.apic.init.enablePeriodicInterrupt(1000);
-
}
-
-
fn initConsole() void {
-
if (limine_requests.framebuffer.response) |fb_response| {
-
if (fb_response.framebuffer_count > 0) {
-
const fb = console.Framebuffer.from_limine(fb_response.getFramebuffers()[0]);
-
common.init_data.framebuffer = fb;
-
// At this point, log becomes usable
-
common.init_data.console = console.Console.from_font(fb, console.DefaultFont);
-
common.init_data.console.?.setColor(0x3bcf1d, 0);
+
if (limine_requests.modules.response) |module_response| {
+
if (module_response.module_count > 0) {
+
for (module_response.modules.?[0..module_response.module_count], 0..) |mod, i| {
+
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
+
const mod_size = mod.size;
+
common.loadTask(&tasks_scratch[i], mod_addr[0..mod_size]);
+
}
}
}
}
···
}
}
-
pub fn initIdt() void {
-
const idt_addr: usize = @intFromPtr(arch.per_cpu_init_data.idt);
-
-
// Install the known exception handlers
-
arch.per_cpu_init_data.idt.breakpoint.installHandler(breakpoint_handler);
-
arch.per_cpu_init_data.idt.double_fault.installHandler(double_fault);
-
arch.per_cpu_init_data.idt.general_protection_fault.installHandler(gpf);
-
arch.per_cpu_init_data.idt.page_fault.installHandler(page_fault);
-
arch.per_cpu_init_data.idt.interrupts[0xFF - 32].installHandler(arch.interrupts.apic.spurious_interrupt_handler);
-
arch.per_cpu_init_data.idt.interrupts[48 - 32].installHandler(arch.interrupts.apic.periodic_handler);
-
-
// Load the Idt Register
-
const reg: Idt.Idtr = .{ .addr = idt_addr, .limit = @sizeOf(Idt) - 1 };
-
reg.load();
-
}
-
-
// TODO: update the type reflection thing to make a custom
-
// function type for the ISR
-
pub const PageFaultErrorCode = packed struct(u64) {
-
present: bool,
-
write: bool,
-
user: bool,
-
reserved_write: bool,
-
instruction_fetch: bool,
-
protection_key: bool,
-
shadow_stack: bool,
-
_reserved: u8,
-
sgx: bool,
-
_reserved2: u48,
-
-
pub fn val(self: *const PageFaultErrorCode) u64 {
-
return @bitCast(self.*);
-
}
-
};
-
pub fn page_fault(stack_frame: *arch.structures.Idt.InterruptStackFrame, err_code_u64: u64) callconv(.{ .x86_64_interrupt = .{} }) void {
-
const err_code: PageFaultErrorCode = @bitCast(err_code_u64);
-
log.err("PAGE FAULT @ 0x{x:0>16}, code 0x{x}!!!!!!!!!!!", .{ stack_frame.instruction_pointer, err_code.val() });
-
const cr2 = arch.registers.ControlRegisters.Cr2.read();
-
switch (err_code.write) {
-
true => log.err("Tried to write to vaddr 0x{x:0>16}", .{cr2}),
-
false => log.err("Tried to read from vaddr 0x{x:0>16}", .{cr2}),
-
}
-
log.err("dying...", .{});
-
arch.instructions.die();
-
}
-
-
pub fn breakpoint_handler(stack_frame: *Idt.InterruptStackFrame) callconv(.{ .x86_64_interrupt = .{} }) void {
-
log.warn("Breakpoint @ 0x{x:0>16}, returning execution...", .{stack_frame.instruction_pointer});
-
}
-
-
pub fn gpf(stack_frame: *Idt.InterruptStackFrame, err_code: u64) callconv(.{ .x86_64_interrupt = .{} }) void {
-
log.warn("gpf @ 0x{x:0>16} ERR CODE {}, returning execution...", .{ stack_frame.instruction_pointer, err_code });
-
arch.instructions.die();
-
}
-
-
pub fn double_fault(stack_frame: *Idt.InterruptStackFrame, err_code: u64) callconv(.{ .x86_64_interrupt = .{} }) noreturn {
-
common.init_data.console.?.setColor(0xf40d17, 0);
-
log.err("FATAL DOUBLE FAULT @ 0x{x:0>16}, code 0x{x}!!!!!!!!!!!", .{ stack_frame.instruction_pointer, err_code });
-
log.err("dying...", .{});
-
arch.instructions.die();
-
}
-
fn bootstrapAPs() void {
log.info("Bootstrapping APs...", .{});
const cpus = limine_requests.mp.response.?.getCpus();
···
fn ap_init(mp_info: *limine.SmpMpFeature.MpInfo) callconv(.c) noreturn {
// Set up the IDT
-
const idt_addr: usize = @intFromPtr(arch.per_cpu_init_data.idt);
-
const reg: Idt.Idtr = .{ .addr = idt_addr, .limit = @sizeOf(Idt) - 1 };
-
reg.load();
+
arch.interrupts.idt.load();
// Set up our GDT and TSS
const gdt = &arch.per_cpu_init_data.gdt_buf[mp_info.processor_id];
+61
components/ukernel/arch/amd64/instructions/cpuid.zig
···
+
// Do all the needed CPUID calls here, and store the info for later use
+
const std = @import("std");
+
const arch = @import("../root.zig");
+
+
pub const captured = struct {
+
pub var vendor_str: [12]u8 = undefined;
+
};
+
pub fn init() void {
+
capture_vendor_str();
+
capture_cpu_features();
+
}
+
+
fn capture_vendor_str() void {
+
const res = cpuid(0, 0);
+
@memcpy(captured.vendor_str[0..4], std.mem.asBytes(&res.ebx));
+
@memcpy(captured.vendor_str[4..8], std.mem.asBytes(&res.edx));
+
@memcpy(captured.vendor_str[8..12], std.mem.asBytes(&res.ecx));
+
}
+
+
fn capture_cpu_features() void {
+
const res = cpuid(1, 0);
+
const feat_ecx: FeaturesEcx = @bitCast(res.ecx);
+
arch.interrupts.apic.tsc_deadline_available = feat_ecx.tsc_deadline;
+
arch.interrupts.apic.has_x2apic = feat_ecx.x2apic;
+
}
+
+
const FeaturesEcx = packed struct(u32) {
+
sse3: bool,
+
pclmulqdq: bool,
+
dtes64: bool,
+
monitor: bool,
+
ds_cpl: bool,
+
vmx: bool,
+
smx: bool,
+
est: bool,
+
tm2: bool,
+
ssse3: bool,
+
cnxt_id: bool,
+
sdbg: bool,
+
fma: bool,
+
cx16: bool,
+
xtpr: bool,
+
pdcm: bool,
+
_reserved0: bool,
+
pcid: bool,
+
dca: bool,
+
sse4_1: bool,
+
sse4_2: bool,
+
x2apic: bool,
+
movbe: bool,
+
popcnt: bool,
+
tsc_deadline: bool,
+
aesni: bool,
+
xsave: bool,
+
osxsave: bool,
+
avx: bool,
+
f16c: bool,
+
rdrand: bool,
+
hypervisor: bool,
+
};
+
pub inline fn cpuid(leaf: u32, sub: u32) DefaultResults {
var eax: u32 = undefined;
var ebx: u32 = undefined;
+6 -3
components/ukernel/arch/amd64/instructions/root.zig
···
pub const cpuid = @import("cpuid.zig");
pub inline fn die() noreturn {
-
while (true) {
-
asm volatile ("hlt");
-
}
+
asm volatile (
+
\\ mov $0xDEADDEADDEADDEAD, %%rax
+
\\ 1: hlt
+
\\ jmp 1b
+
);
+
unreachable;
}
+82 -13
components/ukernel/arch/amd64/interrupts/apic.zig
···
const std = @import("std");
const arch = @import("../root.zig");
+
const idt = arch.interrupts.idt;
const log = std.log.scoped(.apic);
+
const common = @import("common");
pub var lapic_timer_khz: usize = 0;
+
pub var tsc_deadline_available = false;
+
pub var has_x2apic: bool = false;
// tbh every cpu will be either x2apic or not, and if xapic it will
// have the exact same base address anyways so this is fine
···
};
pub const init = struct {
-
// Get the APIC ready (call first)
pub fn initialSetup() void {
+
// First, make the APIC accessible
+
initSingleton() catch |err| {
+
log.err("Failed to map APIC! {}", .{err});
+
@panic("initSingleton");
+
};
+
// Set up the interrupt handlers
singleton.setSpuriousInterruptRegister(.{
.apic_soft_enable = true,
.idt_entry = 0xFF,
···
// .priority_class = 0,
// .priority_sub_class = 0,
// });
+
arch.interrupts.idt.add_handler(.{ .interrupt = 0xFF }, u64, spurious_interrupt_handler, 0, 0);
+
arch.interrupts.idt.add_handler(.{ .interrupt = 48 }, u64, timer_handler, 0, 0);
+
+
// Calibrate against the TSC
+
calibrateTimer();
+
// Set up the LVT Timer Register
+
enableOneshotInterrupt();
}
-
pub fn calibrateTimer() void {
+
fn initSingleton() !void {
+
arch.interrupts.apic.singleton = switch (has_x2apic) {
+
true => .x2apic,
+
false => blk: {
+
// Map the APIC first!
+
const apic_base = common.mm.physToHHDM([*]volatile u8, 0xFEE0_0000);
+
try common.mm.paging.mapPhys(.{
+
.vaddr = @intFromPtr(apic_base),
+
.paddr = 0xFEE0_0000,
+
.size = 0x1000,
+
.memory_type = .DeviceUncacheable,
+
.perms = .{
+
.x = false,
+
.u = false,
+
.w = true,
+
},
+
});
+
break :blk .{ .xapic = apic_base };
+
},
+
};
+
}
+
+
fn calibrateTimer() void {
singleton.setDivideConfigurationRegister(.div2);
singleton.setLVTTimerRegister(.{
.idt_entry = 0x69,
···
lapic_timer_khz = norm / 5;
-
log.debug("APIC timer: {} kHz", .{lapic_timer_khz});
+
log.debug("timer: {} kHz", .{lapic_timer_khz});
}
-
pub fn enablePeriodicInterrupt(ms: usize) void {
-
singleton.setInitialCountRegister(0);
-
singleton.setDivideConfigurationRegister(.div2);
+
fn enableOneshotInterrupt() void {
+
const mode: LAPIC.LVTTimerRegister.Mode = switch (tsc_deadline_available) {
+
true => .tsc_deadline,
+
false => blk: {
+
singleton.setInitialCountRegister(0);
+
singleton.setDivideConfigurationRegister(.div2);
+
break :blk .oneshot;
+
},
+
};
+
// TODO: detect and support tsc_deadline, ditto @ armTimer
singleton.setLVTTimerRegister(.{
.idt_entry = 48,
-
.mode = .periodic,
+
.mode = mode,
.masked = false,
});
+
}
+
};
+
pub fn armTimer(ms: usize) void {
+
if (tsc_deadline_available) {
+
const IA32_TSC_DEADLINE = arch.registers.MSR(u64, 0x6E0);
+
const delta = arch.tsc.tsc_khz * ms;
+
const target = arch.tsc.rdtsc() + delta;
+
+
IA32_TSC_DEADLINE.write(target);
+
} else {
const ticks: u32 = @truncate(lapic_timer_khz * ms);
-
singleton.setInitialCountRegister(ticks);
}
-
};
+
}
-
pub fn spurious_interrupt_handler(_: *arch.structures.Idt.InterruptStackFrame) callconv(.{ .x86_64_interrupt = .{} }) void {
+
pub fn spurious_interrupt_handler(_: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
log.warn("Got a spurious interrupt!", .{});
}
-
pub fn periodic_handler(_: *arch.structures.Idt.InterruptStackFrame) callconv(.{ .x86_64_interrupt = .{} }) void {
-
log.warn("Got an ACPI timer interrupt!", .{});
-
singleton.setRegister(.eoi, 0);
+
pub fn timer_handler(stack_trace: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
+
defer {
+
singleton.setRegister(.eoi, 0);
+
armTimer(20);
+
}
+
// 1. Get the next task. If there is no next task, just keep scheduling.
+
const task = common.scheduler.getNextTask() orelse return;
+
// 2. Swap the next task state with the current interrupt trace
+
std.mem.swap(arch.interrupts.idt.SavedRegisters, &task.regs, &stack_trace.regs);
+
std.mem.swap(u64, &task.rip, &stack_trace.rip);
+
std.mem.swap(u64, &task.rsp, &stack_trace.rsp);
+
// If task has a new cr3, swap current CR3 and task cr3 too
+
if (task.cr3_val != stack_trace.cr3) {
+
arch.registers.ControlRegisters.Cr3.write(task.cr3_val);
+
task.cr3_val = stack_trace.cr3;
+
}
+
// 3. Now, `task` has our current state, so enqueue it.
+
common.scheduler.pushTask(task);
}
+314
components/ukernel/arch/amd64/interrupts/idt.zig
···
+
const arch = @import("../root.zig");
+
const std = @import("std");
+
const interrupts = arch.interrupts;
+
const StandardGdt = arch.structures.gdt.StandardGdt;
+
+
// The actual IDT memory
+
const entry_count = 256;
+
export var interrupt_descriptor_table: [entry_count]Entry = undefined;
+
+
// Pointers to the actual ISRs which the global interrupt handler call
+
// Each IDT entry pushes the interrupt number then calls the global
+
// handler, which pushes more information then calls the user
+
// defined handler. Use common sense and don't return from an exception
+
// which shouldn't be returned from.
+
pub const CallConv: std.builtin.CallingConvention = .{ .x86_64_sysv = .{} };
+
pub fn InterruptHandler(comptime E: type) type {
+
return *const fn (*InterruptFrame(E)) callconv(CallConv) void;
+
}
+
pub export var defined_handlers: [entry_count]InterruptHandler(u64) = undefined;
+
+
// The actual handlers with addresses in the IDT.
+
const ActualHandler = *const fn () callconv(.naked) void;
+
const actual_handlers: [entry_count]ActualHandler = blk: {
+
@setEvalBranchQuota(100000000);
+
var ret: [entry_count]ActualHandler = undefined;
+
for (0..256) |i| {
+
ret[i] = make_actual_handler(.{ .interrupt = i });
+
}
+
break :blk ret;
+
};
+
+
fn make_actual_handler(comptime interrupt: Interrupt) ActualHandler {
+
var asm_code: []const u8 = "";
+
// Make the stack consistent
+
if (!interrupt.has_error_code()) {
+
asm_code = asm_code ++ "pushq $0\n";
+
}
+
// Push the interrupt number
+
asm_code = asm_code ++ std.fmt.comptimePrint("pushq ${}\n", .{interrupt.interrupt});
+
// Jump to the common interrupt handler code
+
asm_code = asm_code ++ "jmp _int_handler_common";
+
const code = asm_code;
+
+
const tmp_struct = struct {
+
fn actual_handler() callconv(.naked) void {
+
asm volatile (code);
+
}
+
};
+
return tmp_struct.actual_handler;
+
}
+
+
// The global assembly for the common interrupt handler
+
comptime {
+
// Construct the push and pop instructions from SavedRegisters
+
var push_instrs: []const u8 = "\n";
+
var pop_instrs: []const u8 = "\n";
+
+
const saved_regs = @typeInfo(SavedRegisters).@"struct".fields;
+
for (saved_regs) |saved_reg| {
+
// We must prepend to push because pushes are basically
+
// building the struct in reverse order, so reverse the effects
+
push_instrs = "\n pushq %" ++ saved_reg.name ++ push_instrs;
+
// Of course, pop in the opposite order as push
+
pop_instrs = pop_instrs ++ "popq %" ++ saved_reg.name ++ "\n";
+
}
+
+
asm (
+
\\ .global _int_handler_common
+
\\ .type _int_handler_common, @function
+
\\ _int_handler_common:
+
+
// Push the general purpose registers and then CR3
+
++ push_instrs ++
+
\\ mov %cr3, %rax
+
\\ pushq %rax
+
+
// Now, rsp points to the start of InterruptFrame. Read int_num and call into an offset of
+
// the defined handlers list after setting the first arg to the created structure.
+
++ std.fmt.comptimePrint("\nmov {}(%rsp), %rcx\n", .{@offsetOf(InterruptFrame(u64), "int_num")}) ++
+
\\ mov %rsp, %rdi
+
\\ callq *defined_handlers(, %rcx, 8)
+
+
// Skip the stack all the way down to the general purpose
+
// registers, then pop all of them
+
// TODO: restore CR3??
+
++ std.fmt.comptimePrint("\nadd ${}, %rsp\n", .{@offsetOf(InterruptFrame(u64), "regs")}) ++ pop_instrs ++
+
// Skip the interrupt number and error code and iretq
+
\\ add $16, %rsp
+
\\ iretq
+
);
+
}
+
+
/// IDT Register
+
const Idtr = packed struct(u80) {
+
limit: u16,
+
addr: u64,
+
+
/// Load the IDT Register
+
pub fn load(self: *const Idtr) void {
+
asm volatile ("lidt (%[idtr_addr])"
+
:
+
: [idtr_addr] "r" (self),
+
);
+
}
+
};
+
+
// A raw IDT entry
+
const Entry = extern struct {
+
func_low: u16,
+
gdt_selector: u16,
+
options: Options,
+
func_mid: u16,
+
func_high: u32,
+
_reserved0: u32 = 0,
+
+
pub const Options = packed struct(u16) {
+
ist_index: u3,
+
_reserved0: u5 = 0,
+
disable_interrupts: bool = true,
+
// type: Type,
+
must_be_one: u3 = 0b111,
+
must_be_zero: u1 = 0,
+
dpl: u2,
+
present: bool,
+
};
+
+
pub const Owner = enum {
+
kernel,
+
user,
+
};
+
+
const Self = @This();
+
pub fn init(func_ptr: usize, dpl: u2, ist: u3) Entry {
+
// _ = typ;
+
return .{
+
.func_low = @truncate(func_ptr),
+
.gdt_selector = StandardGdt.selectors.kernel_code,
+
.options = .{
+
.ist_index = ist,
+
.dpl = dpl,
+
.present = true,
+
},
+
.func_mid = @truncate(func_ptr >> 16),
+
.func_high = @truncate(func_ptr >> 32),
+
};
+
}
+
+
// Changes the address without changing anything else
+
pub fn set_func(self: *Self, ptr: usize) void {
+
self.func_low = @truncate(ptr);
+
self.func_mid = @truncate(ptr >> 16);
+
self.func_high = @truncate(ptr >> 32);
+
}
+
};
+
+
/// A selector error code indexing into the GDT, IDT, or LDT.
+
/// Used in a general protection fault for example.
+
pub const SelectorErrorCode = packed struct(u64) {
+
external: bool,
+
interrupt: bool,
+
// Only valid if not interrupt
+
type: enum(u1) {
+
gdt = 0,
+
ldt = 1,
+
},
+
idx: u13,
+
_reserved0: u48 = 0,
+
+
pub const Target = union(enum) {
+
interrupt: Interrupt,
+
gdt_sel: u16,
+
ldt_sel: u13,
+
};
+
+
const Self = @This();
+
pub fn parse(self: Self) Target {
+
return switch (self.interrupt) {
+
true => .{ .interrupt = .{ .interrupt = @truncate(self.idx) } },
+
false => switch (self.type) {
+
.gdt => .{ .gdt_sel = self.idx },
+
.ldt => .{ .ldt_sel = self.idx },
+
},
+
};
+
}
+
};
+
+
/// List of the general built in exceptions
+
pub const Exception = enum(u8) {
+
divide_error = 0x00,
+
debug_exeption = 0x01,
+
non_maskable_interrupt = 0x02,
+
breakpoint = 0x03,
+
overflow = 0x04,
+
bound_range_exceeded = 0x05,
+
invalid_opcode = 0x06,
+
device_not_available = 0x07,
+
double_fault = 0x08,
+
// _coprocessor_segment_overrun = 0x09,
+
invalid_tss = 0x0a,
+
segment_not_present = 0x0b,
+
stack_segment_fault = 0x0c,
+
general_protection_fault = 0x0d,
+
page_fault = 0x0e,
+
// _reserved0 = 0x0f,
+
x87_floating_point = 0x10,
+
alignment_check = 0x11,
+
machine_check = 0x12,
+
simd_floating_point = 0x13,
+
virtualization = 0x14,
+
control_protection = 0x15,
+
hypervisor = 0x1c,
+
vmm = 0x1d,
+
security_fault = 0x1e,
+
_,
+
+
fn has_error_code(self: Exception) bool {
+
return switch (self) {
+
.double_fault, .invalid_tss, .segment_not_present, .general_protection_fault, .page_fault, .security_fault => true,
+
else => false,
+
};
+
}
+
};
+
+
pub const Interrupt = packed union {
+
exception: Exception,
+
interrupt: u8,
+
+
pub fn has_error_code(self: Interrupt) bool {
+
return self.exception.has_error_code();
+
}
+
};
+
+
/// Basically all the general purpose registers except rsp,
+
/// because RSP is already in the InterruptFrame. Ordered
+
/// in the order of the X.Reg from the instruction encoding lol
+
pub const SavedRegisters = extern struct {
+
rax: u64,
+
rcx: u64,
+
rdx: u64,
+
rbx: u64,
+
// rsp: u64,
+
rbp: u64,
+
rsi: u64,
+
rdi: u64,
+
r8: u64,
+
r9: u64,
+
r10: u64,
+
r11: u64,
+
r12: u64,
+
r13: u64,
+
r14: u64,
+
r15: u64,
+
+
pub const default = std.mem.zeroes(SavedRegisters);
+
};
+
+
/// The Interrupt frame which we help generate
+
pub fn InterruptFrame(comptime ErrorCode: type) type {
+
if (@bitSizeOf(ErrorCode) != 64) {
+
@compileError("ErrorCode for InterruptFrame must be exactly 64 bits!");
+
}
+
return extern struct {
+
// CR3
+
cr3: u64,
+
// All the general purpose registers
+
regs: SavedRegisters align(8),
+
// The interrupt number
+
int_num: Interrupt align(8),
+
// Pushed by the CPU (error_code may be pushed by us)
+
error_code: ErrorCode,
+
rip: u64,
+
cs: u16 align(8),
+
eflags: u64,
+
rsp: u64,
+
ss: u16 align(8),
+
+
pub fn normalize(self: *InterruptFrame(ErrorCode)) *InterruptFrame(u64) {
+
return @ptrCast(self);
+
}
+
};
+
}
+
+
// Initialize the IDT with the default unhandled exception
+
pub fn init() void {
+
// Set every IDT entry to the corresponding ActualHandler
+
for (0..entry_count) |i| {
+
const actual_handler = @intFromPtr(actual_handlers[i]);
+
interrupt_descriptor_table[i] = Entry.init(actual_handler, 0, 0);
+
}
+
// Now, set every defined handler to the default one
+
@memset(&defined_handlers, arch.interrupts.unhandled_interrupt);
+
+
// Finally, load the idt
+
load();
+
}
+
+
pub fn load() void {
+
const idtr: Idtr = .{
+
.addr = @intFromPtr(&interrupt_descriptor_table),
+
.limit = 0xFFF,
+
};
+
idtr.load();
+
}
+
+
pub fn add_handler(interrupt: Interrupt, comptime E: type, handler: InterruptHandler(E), dpl: u2, ist: u3) void {
+
// Modify the type, dpl, and ist in place
+
var tmp = interrupt_descriptor_table[interrupt.interrupt];
+
tmp.options.dpl = dpl;
+
tmp.options.ist_index = ist;
+
interrupt_descriptor_table[interrupt.interrupt] = tmp;
+
+
// Add the InterruptHandler
+
defined_handlers[interrupt.interrupt] = @ptrCast(handler);
+
}
+10
components/ukernel/arch/amd64/interrupts/pic.zig
···
/// Remap the 8259 PIC to an interrupt base of 0x32
const arch = @import("../root.zig");
+
const idt = arch.interrupts.idt;
+
const std = @import("std");
+
const log = std.log.scoped(.pic);
const out = arch.port.out;
const in = arch.port.in;
···
wait();
out(u8, PIC_TWO_DATA_PORT, 0b1111_1111);
wait();
+
+
// Set up a spurious IRQ7 handler
+
arch.interrupts.idt.add_handler(.{ .interrupt = 32 + 7 }, u64, spurious_handler, 0, 0);
}
inline fn wait() void {
···
pub fn end_of_timer_interrupt() void {
out(u8, PIC_ONE_CMD_PORT, CMD_EOI);
}
+
+
pub fn spurious_handler(_: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
+
std.log.warn("Got a spurious IRQ7 (8259)", .{});
+
}
+165
components/ukernel/arch/amd64/interrupts/root.zig
···
pub const apic = @import("apic.zig");
pub const pic = @import("pic.zig");
pub const pit = @import("pit.zig");
+
pub const idt = @import("idt.zig");
+
const std = @import("std");
+
const log = std.log.scoped(.interrupts);
+
const arch = @import("../root.zig");
+
const common = @import("common");
pub inline fn enable() void {
asm volatile ("sti");
···
pub inline fn disable() void {
asm volatile ("cli");
}
+
+
const syscall_entry = @extern(*anyopaque, .{
+
.name = "syscall_entry",
+
});
+
+
export fn syscall_handler(rdi: usize, rsi: usize) callconv(.c) void {
+
std.log.info("Got a syscall! rdi=0x{x}, rsi=0x{x}", .{ rdi, rsi });
+
}
+
+
pub fn init_syscalls() void {
+
// Set up the STAR MSR with the segment descriptors
+
const IA32_STAR = arch.registers.MSR(u64, 0xC0000081);
+
const star_value: u64 = 0 | @as(u64, arch.structures.gdt.StandardGdt.selectors.kernel_code) << 32 | (@as(u64, arch.structures.gdt.StandardGdt.selectors.tss_desc + 8) | 3) << 48;
+
IA32_STAR.write(star_value);
+
+
// Set up the EFER MSR with SCE (System Call Enable)
+
const IA32_EFER = arch.registers.MSR(u64, 0xC0000080);
+
const efer_val = IA32_EFER.read() | 0b1;
+
IA32_EFER.write(efer_val);
+
+
// Set up LSTAR with the syscall handler and FMASK to clear interrupts
+
const IA32_LSTAR = arch.registers.MSR(u64, 0xC0000082);
+
IA32_LSTAR.write(@intFromPtr(syscall_entry));
+
+
const IA32_FMASK = arch.registers.MSR(u64, 0xC0000084);
+
IA32_FMASK.write(1 << 9);
+
}
+
+
pub fn print_regs(frame: *idt.InterruptFrame(u64)) void {
+
std.log.err("CR3: 0x{x:0>16}", .{frame.cr3});
+
std.log.err("RAX: 0x{x:0>16}, RBX: 0x{x:0>16}, RCX: 0x{x:0>16}, RDX: 0x{x:0>16}", .{ frame.regs.rax, frame.regs.rbx, frame.regs.rcx, frame.regs.rdx });
+
std.log.err("RSI: 0x{x:0>16}, RDI: 0x{x:0>16}, RBP: 0x{x:0>16}, RSP: 0x{x:0>16}", .{ frame.regs.rsi, frame.regs.rdi, frame.regs.rbp, frame.rsp });
+
std.log.err("R8: 0x{x:0>16}, R9: 0x{x:0>16}, R10: 0x{x:0>16}, R11: 0x{x:0>16}", .{ frame.regs.r8, frame.regs.r9, frame.regs.r10, frame.regs.r11 });
+
std.log.err("R12: 0x{x:0>16}, R13: 0x{x:0>16}, R14: 0x{x:0>16}, R15: 0x{x:0>16}", .{ frame.regs.r12, frame.regs.r13, frame.regs.r14, frame.regs.r15 });
+
std.log.err("RFL: 0x{x:0>16}, RIP: 0x{x:0>16}, CS: 0x{x:0>16}, SS: 0x{x:0>16}", .{ frame.eflags, frame.rip, frame.cs, frame.ss });
+
}
+
+
pub fn unhandled_interrupt(frame: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
+
if (std.enums.tagName(idt.Exception, frame.int_num.exception)) |exception_name| {
+
std.log.err("Unhandled interrupt (0x{x} : {s})!!!", .{ frame.int_num.interrupt, exception_name });
+
} else {
+
std.log.err("Unhandled interrupt (0x{x})!!!", .{frame.int_num.interrupt});
+
}
+
+
print_regs(frame);
+
+
arch.interrupts.disable();
+
arch.instructions.die();
+
}
+
+
pub fn breakpoint(stack_frame: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
+
std.log.warn("Breakpoint @ 0x{x}, returning execution...", .{stack_frame.rip});
+
}
+
+
pub fn double_fault(stack_frame: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
+
std.log.err("Double fault @ 0x{x}, dying!!!", .{stack_frame.rip});
+
print_regs(stack_frame);
+
arch.interrupts.disable();
+
arch.instructions.die();
+
}
+
+
pub fn general_protection_fault(stack_frame: *idt.InterruptFrame(idt.SelectorErrorCode)) callconv(idt.CallConv) void {
+
arch.interrupts.disable();
+
std.log.warn("General Protection Fault @ 0x{x}", .{stack_frame.rip});
+
+
const target = stack_frame.error_code.parse();
+
switch (target) {
+
.interrupt => |int| {
+
if (std.enums.tagName(idt.Exception, int.exception)) |exc_name| {
+
std.log.warn("Caused by interrupt 0x{x} ({s})", .{ int.interrupt, exc_name });
+
} else {
+
std.log.warn("Caused by interrupt 0x{x}", .{int.interrupt});
+
}
+
},
+
.gdt_sel => |gdt_sel| {
+
std.log.warn("GDT selector: 0x{x}", .{gdt_sel});
+
},
+
.ldt_sel => |ldt_sel| {
+
std.log.warn("LDT selector: 0x{x}", .{ldt_sel});
+
},
+
}
+
print_regs(stack_frame.normalize());
+
arch.instructions.die();
+
}
+
+
// Start scheduling
+
pub fn startScheduling() noreturn {
+
// 1. Pop off the task to run
+
const task = common.scheduler.getNextTask() orelse {
+
std.log.scoped(.startScheduling).err("No root task!", .{});
+
@panic("startScheduling");
+
};
+
// 2. Apply the paging context
+
task.getPagingContext().apply();
+
// 3. Give a slice of 1000ms and fire away
+
apic.armTimer(20);
+
enter_userspace(task.rip, 0x69, task.rsp);
+
}
+
+
// Set up the IDT, PIC, TSC, and APIC
+
pub fn init() void {
+
// Set up the IDT and associated vectors
+
idt.init();
+
idt.add_handler(.{ .exception = .breakpoint }, u64, arch.interrupts.breakpoint, 3, 0);
+
idt.add_handler(.{ .exception = .double_fault }, u64, arch.interrupts.double_fault, 0, 0);
+
idt.add_handler(.{ .exception = .general_protection_fault }, idt.SelectorErrorCode, arch.interrupts.general_protection_fault, 0, 0);
+
idt.add_handler(.{ .exception = .page_fault }, u64, arch.mm.paging.page_fault_handler, 0, 0);
+
// Set up the 8254's (we need 8259 timer to calibrate tsc)
+
pic.init();
+
// Calibrate the TSC against the 8259
+
arch.tsc.calibrate_pit() catch |err| {
+
log.err("Failed to calibrate TSC: {}", .{err});
+
};
+
// Set up everything needed to arm the timer
+
apic.init.initialSetup();
+
}
+
+
// TODO: make this slightly less shit
+
pub fn enter_userspace(entry: u64, arg: u64, stack: u64) noreturn {
+
log.info("usercode64 GDT 0x{x}, userdata64 GDT 0x{x}", .{ arch.structures.gdt.StandardGdt.selectors.user_code, arch.structures.gdt.StandardGdt.selectors.user_data });
+
const cr3 = arch.registers.ControlRegisters.Cr3.read();
+
arch.registers.ControlRegisters.Cr3.write(cr3);
+
asm volatile (
+
\\ push %[userdata64]
+
\\ push %[stack]
+
\\ push $0x202
+
\\ push %[usercode64]
+
\\ push %[entry]
+
\\
+
\\ mov %[userdata64], %%rax
+
\\ mov %%rax, %%es
+
\\ mov %%rax, %%ds
+
\\
+
\\ xor %%rsi, %%rsi
+
\\ xor %%rax, %%rax
+
\\ xor %%rdx, %%rdx
+
\\ xor %%rcx, %%rcx
+
\\ xor %%rbp, %%rbp
+
\\ xor %%rbx, %%rbx
+
\\
+
\\ xor %%r8, %%r8
+
\\ xor %%r9, %%r9
+
\\ xor %%r10, %%r10
+
\\ xor %%r11, %%r11
+
\\ xor %%r12, %%r12
+
\\ xor %%r13, %%r13
+
\\ xor %%r14, %%r14
+
\\ xor %%r15, %%r15
+
\\
+
\\ iretq
+
\\
+
:
+
: [arg] "{rdi}" (arg),
+
[stack] "r" (stack),
+
[entry] "r" (entry),
+
[userdata64] "i" (arch.structures.gdt.StandardGdt.selectors.user_data),
+
[usercode64] "i" (arch.structures.gdt.StandardGdt.selectors.user_code),
+
);
+
unreachable;
+
}
+227 -153
components/ukernel/arch/amd64/mm/paging.zig
···
-
const common = @import("common");
const arch = @import("../root.zig");
+
const common = @import("common");
const std = @import("std");
-
const physToVirt = common.mm.physToHHDM;
+
const Cr3 = arch.registers.ControlRegisters.Cr3;
+
const Cr4 = arch.registers.ControlRegisters.Cr4;
+
const idt = arch.interrupts.idt;
const Perms = common.mm.paging.Perms;
-
-
pub const page_sizes = [_]usize{
-
0x1000, // 4K
-
0x200000, // 2M
-
0x40000000, // 1G
-
0x8000000000, // 512G
-
0x1000000000000, // 256T
-
};
pub const PageTable = extern struct {
entries: [512]Entry,
···
};
};
-
fn extract_index_from_vaddr(vaddr: u64, level: u6) u9 {
-
const shamt = 12 + level * 9;
-
return @truncate(vaddr >> shamt);
+
pub const MemoryType = enum {
+
DeviceUncacheable,
+
DeviceWriteCombining,
+
MemoryWritethrough,
+
MemoryWriteBack,
+
};
+
+
pub fn detect_5level() bool {
+
const bits: u64 = 1 << 12;
+
return Cr4.read() & bits != 0;
}
-
pub const TypedPTE = union(common.mm.paging.PTEType) {
-
Mapping: MappingHandle,
-
Table: TableHandle,
-
Empty,
+
pub const Context = struct {
+
cr3_val: u64,
+
level5: bool,
const Self = @This();
+
pub fn apply(self: *const Self) void {
+
// NX Enable
+
const IA32_EFER = arch.registers.MSR(u64, 0xC0000080);
+
const efer_val = IA32_EFER.read() | (0b1 << 11);
+
IA32_EFER.write(efer_val);
-
pub fn decode(pte: *PageTable.Entry, level: u3) Self {
+
// Set the level 5 bit accordingly
+
const cr4 = Cr4.read();
+
const level5mask: u64 = 1 << 12;
+
Cr4.write(if (self.level5) cr4 | level5mask else cr4 & ~level5mask);
+
+
Cr3.write(self.cr3_val);
+
}
+
+
pub fn get_current() Context {
+
return .{
+
.cr3_val = Cr3.read(),
+
.level5 = detect_5level(),
+
};
+
}
+
+
pub fn make_user() !Context {
+
// Make a new root page table
+
const user_root_paddr = try make_page_table();
+
const user_root = common.mm.physToHHDM(*PageTable, user_root_paddr);
+
// Copy the entire higher half entries
+
const higher_half = common.init_data.kernel_paging_ctx.root_table(0).get_children();
+
@memcpy(user_root.entries[256..], higher_half[256..]);
+
return .{
+
.cr3_val = user_root_paddr,
+
.level5 = common.init_data.kernel_paging_ctx.level5,
+
};
+
}
+
+
pub fn can_map_at(_: *const Self, level: u3) bool {
+
return level < 2;
+
}
+
+
// We need the parameter because aarch64 has 2 root page tables
+
pub fn root_table(self: *Self, _: u64) TableHandle {
+
return .{
+
// Mask out the cr3 value
+
.paddr = self.cr3_val & 0xFFFFFFFF_FFFFF000,
+
.level = if (self.level5) 5 else 4,
+
.context = self,
+
.perms = .{
+
.x = true,
+
.w = true,
+
.u = true,
+
},
+
.underlying = null,
+
};
+
}
+
+
pub fn decode(self: *Self, pte: *PageTable.Entry, level: u3) SomePteHandle {
if (!pte.present) {
return .Empty;
}
if (!pte.huge and level != 0) {
-
return .{ .Table = decode_table(pte, level) };
+
return .{ .Table = self.parse_table(pte, level) };
}
-
return .{ .Mapping = decode_mapping(pte, level) };
+
return .{ .Mapping = self.parse_mapping(pte, level) };
}
-
pub fn decode_table(pte: *PageTable.Entry, level: u3) TableHandle {
+
pub fn parse_mapping(self: *Self, pte: *PageTable.Entry, level: u3) MappingHandle {
+
const memory_type = self.decode_memory_type(pte, level);
return .{
-
.phys_addr = pte.getAddr(),
+
.context = self,
+
.paddr = pte.getAddr(),
.level = level,
+
.memory_type = memory_type,
.underlying = pte,
.perms = .{
-
.writable = pte.writable,
-
.executable = !pte.nx,
-
.userspace_accessible = pte.user_accessible,
+
.w = pte.writable,
+
.x = !pte.nx,
+
.u = pte.user_accessible,
},
};
}
-
pub fn decode_mapping(pte: *PageTable.Entry, level: u3) MappingHandle {
+
pub fn decode_memory_type(_: *Self, pte: *PageTable.Entry, _: u3) ?MemoryType {
+
return switch (pte.disable_cache) {
+
true => .DeviceUncacheable,
+
false => switch (pte.write_through) {
+
true => .MemoryWritethrough,
+
false => .MemoryWriteBack,
+
},
+
};
+
}
+
+
pub fn encode_memory_type(_: *Self, pte: *PageTable.Entry, mapping_handle: MappingHandle) void {
+
switch (mapping_handle.memory_type.?) {
+
.MemoryWritethrough => pte.write_through = true,
+
.DeviceUncacheable => pte.disable_cache = true,
+
.MemoryWriteBack => {},
+
else => @panic("bad memory type"),
+
}
+
}
+
+
pub fn parse_table(self: *Self, pte: *PageTable.Entry, level: u3) TableHandle {
return .{
-
.phys_addr = pte.getAddr(),
+
.context = self,
+
.paddr = pte.getAddr(),
.level = level,
-
// TODO: memory types
-
.memory_type = null,
.underlying = pte,
.perms = .{
-
.writable = pte.writable,
-
.executable = !pte.nx,
-
.userspace_accessible = pte.user_accessible,
+
.w = pte.writable,
+
.x = !pte.nx,
+
.u = pte.user_accessible,
},
};
}
+
+
pub fn encode_mapping(self: *Self, mapping_handle: MappingHandle) PageTable.Entry {
+
var pte = std.mem.zeroes(PageTable.Entry);
+
pte.setAddr(mapping_handle.paddr);
+
pte.present = true;
+
if (mapping_handle.level != 0) {
+
pte.huge = true;
+
}
+
+
pte.writable = mapping_handle.perms.w;
+
pte.user_accessible = mapping_handle.perms.u;
+
pte.nx = !mapping_handle.perms.x;
+
+
self.encode_memory_type(&pte, mapping_handle);
+
return pte;
+
}
+
+
pub fn encode_table(_: *Self, table_handle: TableHandle) PageTable.Entry {
+
var pte = std.mem.zeroes(PageTable.Entry);
+
pte.writable = table_handle.perms.w;
+
pte.user_accessible = table_handle.perms.u;
+
pte.nx = !table_handle.perms.x;
+
pte.setAddr(table_handle.paddr);
+
+
pte.present = true;
+
pte.huge = false;
+
+
return pte;
+
}
+
+
pub fn invalidate(_: *const Self, vaddr: u64) void {
+
asm volatile (
+
\\ invlpg (%[vaddr])
+
:
+
: [vaddr] "r" (vaddr),
+
: .{ .memory = true });
+
}
+
+
pub fn domain(_: *const Self, level: u3, vaddr: u64) StupidSlice {
+
return .{
+
.ptr = vaddr & ~(page_sizes[level] - 1),
+
.len = page_sizes[level],
+
};
+
}
+
+
pub fn virt_to_phys(context: *Context, vaddr: usize) ?usize {
+
const root = context.root_table(0).get_children();
+
const indexes = [_]usize{
+
(vaddr >> 39) & 0x1FF,
+
(vaddr >> 30) & 0x1FF,
+
(vaddr >> 21) & 0x1FF,
+
(vaddr >> 12) & 0x1FF,
+
};
+
var pte_ptr = &root[indexes[0]];
+
std.log.warn("{*}: {any}, addr 0x{x}", .{ pte_ptr, pte_ptr, pte_ptr.getAddr() });
+
for (0..3) |i| {
+
if (!pte_ptr.present) {
+
return null;
+
}
+
const next_page_table = common.mm.physToHHDM(*PageTable, pte_ptr.getAddr());
+
pte_ptr = &next_page_table.entries[indexes[i + 1]];
+
std.log.warn("{*}: {any}, addr 0x{x}", .{ pte_ptr, pte_ptr, pte_ptr.getAddr() });
+
}
+
return pte_ptr.getAddr() + (vaddr & 0xFFF);
+
}
};
-
pub const MappingHandle = struct {
-
phys_addr: usize,
+
fn idx_from_level(vaddr: u64, level: u6) u9 {
+
const shamt = 12 + level * 9;
+
return @truncate(vaddr >> shamt);
+
}
+
+
pub fn make_page_table() !usize {
+
const page_size = std.heap.pageSize();
+
const paddr = try common.init_data.bootmem.allocPhys(page_size);
+
const pt_ptr = common.mm.physToHHDM([*]u8, paddr);
+
@memset(pt_ptr[0..page_size], 0);
+
return paddr;
+
}
+
+
pub const page_sizes = [_]usize{
+
0x1000,
+
0x200000,
+
0x40000000,
+
0x8000000000,
+
0x1000000000000,
+
};
+
+
const MappingHandle = struct {
+
paddr: u64,
level: u3,
memory_type: ?MemoryType,
+
context: *Context,
perms: Perms,
underlying: *PageTable.Entry,
};
pub const TableHandle = struct {
-
phys_addr: usize,
+
paddr: u64,
level: u3,
+
context: *Context,
perms: Perms,
underlying: ?*PageTable.Entry,
const Self = @This();
-
-
// Get the child entries of this page table
pub fn get_children(self: *const Self) []PageTable.Entry {
-
const page_table = physToVirt(*PageTable, self.phys_addr);
-
return page_table.entries[0..];
+
const pt = common.mm.physToHHDM(*PageTable, self.paddr);
+
return pt.entries[0..];
}
-
// Get children from the position holding the table and on
-
pub fn skip_to(self: *const Self, vaddr: usize) []PageTable.Entry {
-
return self.get_children()[extract_index_from_vaddr(vaddr, self.level - 1)..];
+
pub fn skip_to(self: *const Self, vaddr: u64) []PageTable.Entry {
+
return self.get_children()[idx_from_level(vaddr, self.level - 1)..];
}
-
// Decode child table given an entry
-
pub fn decode_child(self: *const Self, pte: *PageTable.Entry) TypedPTE {
-
return TypedPTE.decode(pte, self.level - 1);
+
pub fn decode_child(self: *const Self, pte: *PageTable.Entry) SomePteHandle {
+
return self.context.decode(pte, self.level - 1);
}
pub fn addPerms(self: *const Self, perms: Perms) void {
-
if (perms.executable) {
+
if (perms.x) {
self.underlying.?.nx = false;
}
-
if (perms.writable) {
+
if (perms.w) {
self.underlying.?.writable = true;
}
-
if (perms.userspace_accessible) {
+
if (perms.u) {
self.underlying.?.user_accessible = true;
}
}
-
pub fn child_domain(self: *const Self, vaddr: usize) UntypedSlice {
-
return domain(vaddr, self.level - 1);
-
}
-
pub fn make_child_table(self: *const Self, pte: *PageTable.Entry, perms: Perms) !TableHandle {
const pmem = try make_page_table();
const result: TableHandle = .{
-
.phys_addr = pmem,
+
.paddr = pmem,
+
.context = self.context,
.level = self.level - 1,
.perms = perms,
.underlying = pte,
};
-
pte.* = encode_table(result);
+
pte.* = self.context.encode_table(result);
return result;
}
-
pub fn make_child_mapping(
-
self: *const Self,
-
pte: *PageTable.Entry,
-
paddr: ?usize,
-
perms: Perms,
-
memory_type: MemoryType,
-
) !MappingHandle {
+
pub fn make_child_mapping(self: *const Self, pte: *PageTable.Entry, paddr: ?u64, perms: Perms, memory_type: MemoryType) !MappingHandle {
const page_size = page_sizes[self.level - 1];
const pmem = paddr orelse try common.init_data.bootmem.allocPhys(page_size);
const result: MappingHandle = .{
.level = self.level - 1,
.memory_type = memory_type,
+
.context = self.context,
.perms = perms,
.underlying = pte,
-
.phys_addr = pmem,
+
.paddr = pmem,
};
-
pte.* = encode_mapping(result);
+
pte.* = self.context.encode_mapping(result);
return result;
}
-
};
-
pub fn root_table(vaddr: usize) TableHandle {
-
_ = vaddr;
-
const cr3_val = arch.registers.ControlRegisters.Cr3.read() & 0xFFFF_FFFF_FFFF_F000;
-
return .{
-
.phys_addr = cr3_val,
-
// TODO: detect and support 5 level paging!
-
.level = 4,
-
.perms = .{
-
.executable = true,
-
.writable = true,
-
},
-
.underlying = null,
-
};
-
}
-
-
fn encode_table(pte_handle: TableHandle) PageTable.Entry {
-
var pte = std.mem.zeroes(PageTable.Entry);
-
-
pte.setAddr(pte_handle.phys_addr);
-
pte.writable = pte_handle.perms.writable;
-
pte.user_accessible = pte_handle.perms.userspace_accessible;
-
pte.nx = !pte_handle.perms.executable;
-
pte.present = true;
-
pte.huge = false;
-
-
return pte;
-
}
-
-
fn encode_mapping(pte_handle: MappingHandle) PageTable.Entry {
-
var pte = std.mem.zeroes(PageTable.Entry);
-
-
pte.setAddr(pte_handle.phys_addr);
-
pte.present = true;
-
-
if (pte_handle.level != 0) {
-
pte.huge = true;
+
pub fn child_domain(self: *const Self, vaddr: u64) StupidSlice {
+
return self.context.domain(self.level - 1, vaddr);
}
-
-
pte.writable = pte_handle.perms.writable;
-
pte.user_accessible = pte_handle.perms.userspace_accessible;
-
pte.nx = !pte_handle.perms.executable;
-
-
encode_memory_type(&pte, pte_handle);
-
-
return pte;
-
}
+
};
-
fn encode_memory_type(pte: *PageTable.Entry, pte_handle: MappingHandle) void {
-
const mt = pte_handle.memory_type orelse @panic("Unknown memory type");
+
pub const SomePteHandle = union(common.mm.paging.PTEType) {
+
Mapping: MappingHandle,
+
Table: TableHandle,
+
Empty,
+
};
-
// TODO: Page Attribute Table
-
switch (mt) {
-
.MemoryWritethrough => pte.write_through = true,
-
.DeviceUncacheable => pte.disable_cache = true,
-
.MemoryWriteBack => {},
-
else => @panic("Cannot set memory type"),
-
}
-
}
-
-
/// Returns physical address
-
fn make_page_table() !usize {
-
const pt_phys = try common.init_data.bootmem.allocPhys(std.heap.pageSize());
-
const pt = physToVirt([*]u8, pt_phys);
-
@memset(pt[0..std.heap.pageSize()], 0x00);
-
return pt_phys;
-
}
-
-
pub fn invalidate(vaddr: u64) void {
-
asm volatile (
-
\\ invlpg (%[vaddr])
-
:
-
: [vaddr] "r" (vaddr),
-
: .{ .memory = true });
-
}
-
-
const UntypedSlice = struct {
+
pub const StupidSlice = struct {
len: usize,
ptr: usize,
};
-
pub fn domain(vaddr: usize, level: u3) UntypedSlice {
-
return .{
-
.len = page_sizes[level],
-
.ptr = vaddr & ~(page_sizes[level] - 1),
-
};
-
}
-
-
pub const MemoryType = enum {
-
DeviceUncacheable,
-
DeviceWriteCombining,
-
MemoryWritethrough,
-
MemoryWriteBack,
-
};
-
-
pub fn can_map_at(level: u3) bool {
-
return level < 2;
+
pub fn page_fault_handler(stack_frame: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
+
std.log.err("Page Fault @ 0x{x}, dying...", .{stack_frame.rip});
+
arch.interrupts.print_regs(stack_frame.normalize());
+
std.log.err("Error CR2: 0x{x:0>16}, Error Code: 0x{x:0>16}", .{ arch.registers.ControlRegisters.Cr2.read(), stack_frame.error_code });
+
arch.instructions.die();
}
+21 -23
components/ukernel/arch/amd64/root.zig
···
const common = @import("common");
const std = @import("std");
-
fn pageSize() usize {
-
return 4 << 10;
-
}
-
-
pub const std_options: std.Options = .{
-
.logFn = common.aux.logFn,
-
.page_size_min = 4 << 10,
-
.page_size_max = 4 << 10,
-
.queryPageSize = pageSize,
+
// needed by std options
+
pub const page_size = struct {
+
pub const min = 4 << 10;
+
pub const max = 4 << 10;
+
pub fn get() usize {
+
return 4 << 10;
+
}
};
-
pub const panic = std.debug.FullPanic(common.aux.panic);
pub var per_cpu_init_data: PerCpuInitData = .{};
const PerCpuInitData = struct {
const StandardGdt = structures.gdt.StandardGdt;
const Tss = structures.tss.Tss;
-
const Idt = structures.Idt;
gdt_buf: []StandardGdt = undefined,
tss_buf: []Tss = undefined,
-
idt: *Idt = undefined,
+
// Physical ptr
+
stack_buf: usize = undefined,
+
+
const stack_size = std.heap.page_size_max;
const Self = @This();
pub fn init(self: *Self, cpu_count: u64) void {
-
// 1. Allocate an IDT
-
const idt_addr = common.init_data.bootmem.allocMem(@sizeOf(Idt)) catch |err| {
-
std.log.err("init PerCpuInitData: IDT alloc failed: {}", .{err});
-
@panic("rip bozo");
+
// 1. Allocate stack space for every core
+
self.stack_buf = common.init_data.bootmem.allocPhys(stack_size * cpu_count) catch |err| {
+
std.log.err("init PerCpuInitData: failed to allocate stack! {}", .{err});
+
@panic("stack_buf");
};
-
self.idt = @ptrFromInt(idt_addr);
// 2. Allocate space for GDT and TSS data
const gdt_size = @sizeOf(StandardGdt);
···
const total_required_size = gdt_size * cpu_count + tss_size * cpu_count;
const buf: [*]u8 = @ptrFromInt(common.init_data.bootmem.allocMem(total_required_size) catch |err| {
std.log.err("init PerCpuInitData: GDT/TSS alloc failed: {}", .{err});
-
@panic("rip bozo");
+
@panic("gdt_tss_buf");
});
// 3. Transmute and fill out the structure
···
self.gdt_buf = gdt_buf[0..cpu_count];
self.tss_buf = tss_buf[0..cpu_count];
}
-
};
-
comptime {
-
// Entry point (_start)
-
@export(&boot.bsp_init, .{ .name = "_start", .linkage = .strong });
-
}
+
// returns a pointer to the TOP of the stack!
+
pub fn getStackPhys(self: *Self, core_num: usize) usize {
+
return self.stack_buf + (core_num + 1) * stack_size;
+
}
+
};
-170
components/ukernel/arch/amd64/structures/Idt.zig
···
-
//! The entire Interrupt Descriptor Table (IDT) structure for AMD64,
-
//! including all the necessary ISR entries. Each of the defined
-
//! ISRs is meant for a specific type of exception, while the
-
//! array at the end of the IDT can be used for whatever is necessary.
-
const std = @import("std");
-
const arch = @import("../root.zig");
-
const StandardGdt = arch.structures.gdt.StandardGdt;
-
-
/// Faulty division (mostly divide by zero)
-
divide_error: Entry(.handler),
-
/// AMD64 Debug Exception, either a fault or a trap
-
debug_exception: Entry(.handler),
-
/// Non Maskable Interrupt
-
non_maskable_interrupt: Entry(.handler),
-
/// Breakpoint (int3) trap
-
breakpoint: Entry(.handler),
-
/// Overflow trap (INTO instruction)
-
overflow: Entry(.handler),
-
/// Bound Range Exception (BOUND instruction)
-
bound_range_exceeded: Entry(.handler),
-
/// Invalid Opcode Exception
-
invalid_opcode: Entry(.handler),
-
/// Device Not Available (FPU instructions when FPU disabled)
-
device_not_available: Entry(.handler),
-
/// Double Fault Exception
-
double_fault: Entry(.abort_with_err_code),
-
_coprocessor_segment_overrun: Entry(.handler),
-
/// Invalid TSS: bad segment selector
-
invalid_tss: Entry(.handler_with_err_code),
-
/// Segment Not Present
-
segment_not_present: Entry(.handler_with_err_code),
-
/// Stack Segment Fault
-
stack_segment_fault: Entry(.handler_with_err_code),
-
/// General Protection Fault
-
general_protection_fault: Entry(.handler_with_err_code),
-
/// Page Fault
-
page_fault: Entry(.handler_with_err_code),
-
-
_reserved1: Entry(.handler),
-
/// x87 Floating Point Exception
-
x87_floating_point: Entry(.handler),
-
/// Alignment Check Exception
-
alignment_check: Entry(.handler_with_err_code),
-
/// Machine Check Exception (MCE)
-
machine_check: Entry(.abort),
-
/// SIMD Floating Point Exception
-
simd_floating_point: Entry(.handler),
-
/// Virtualization Exception
-
virtualization: Entry(.handler),
-
/// Control Protection Exception
-
control_protection: Entry(.handler_with_err_code),
-
_reserved2: [10]Entry(.handler),
-
/// User Accessible Interrupts
-
interrupts: [256 - 32]Entry(.handler),
-
-
/// An ISR Entry in the IDT
-
pub const EntryType = union(enum) {
-
abort: void,
-
abort_with_err_code: void,
-
handler: void,
-
handler_with_err_code: void,
-
handler_with_custom_err_code: type,
-
};
-
pub fn Entry(comptime entry_type: EntryType) type {
-
const return_type = switch (entry_type) {
-
.abort, .abort_with_err_code => noreturn,
-
.handler, .handler_with_err_code, .handler_with_custom_err_code => void,
-
};
-
const params: []const std.builtin.Type.Fn.Param = switch (entry_type) {
-
.handler, .abort => &.{
-
// Interrupt stack frame
-
.{ .is_generic = false, .is_noalias = false, .type = *InterruptStackFrame },
-
},
-
.handler_with_err_code, .abort_with_err_code => &.{
-
// Interrupt stack frame
-
.{ .is_generic = false, .is_noalias = false, .type = *InterruptStackFrame },
-
// Error code
-
.{ .is_generic = false, .is_noalias = false, .type = u64 },
-
},
-
.handler_with_custom_err_code => |err_code_type| &.{
-
// Interrupt stack frame
-
.{ .is_generic = false, .is_noalias = false, .type = *InterruptStackFrame },
-
// Custom Error code
-
.{ .is_generic = false, .is_noalias = false, .type = err_code_type },
-
},
-
};
-
const FunctionTypeInfo: std.builtin.Type = .{
-
.@"fn" = .{
-
.calling_convention = .{ .x86_64_interrupt = .{} },
-
.is_generic = false,
-
.is_var_args = false,
-
.return_type = return_type,
-
.params = params,
-
},
-
};
-
-
// The actual IDT entry structure
-
return extern struct {
-
func_low: u16,
-
gdt_selector: u16,
-
options: Options,
-
func_mid: u16,
-
func_high: u32,
-
_reserved: u32 = 0,
-
-
const FuncType = @Type(FunctionTypeInfo);
-
-
pub const Options = packed struct(u16) {
-
/// Interrupt Stack Table Index
-
ist_index: u3,
-
_reserved: u5 = 0,
-
disable_interrupts: bool,
-
must_be_one: u3 = 0b111,
-
must_be_zero: u1 = 0,
-
/// Descriptor Privilege Level
-
dpl: u2,
-
present: bool,
-
};
-
-
const Self = @This();
-
-
pub fn installHandler(self: *Self, func: *const FuncType) void {
-
// Fetch the Code Segment
-
const func_ptr = @intFromPtr(func);
-
self.* = .{
-
// Set the function pointer
-
.func_low = @truncate(func_ptr & 0xFFFF),
-
.func_mid = @truncate((func_ptr >> 16) & 0xFFFF),
-
.func_high = @truncate((func_ptr >> 32) & 0xFFFF_FFFF),
-
.gdt_selector = StandardGdt.selectors.kernel_code,
-
.options = .{
-
// No Interrupt Stack Table yet
-
.ist_index = 0,
-
// Mask interrupts while running ISR handler
-
.disable_interrupts = true,
-
// Ring 3 Minimum privilege level
-
.dpl = 3,
-
// Mark as present
-
.present = true,
-
},
-
};
-
}
-
};
-
}
-
-
/// IDT Register
-
pub const Idtr = packed struct(u80) {
-
limit: u16,
-
addr: u64,
-
-
/// Load the IDT Register
-
pub fn load(self: *const Idtr) void {
-
asm volatile ("lidt (%[idtr_addr])"
-
:
-
: [idtr_addr] "r" (self),
-
);
-
}
-
};
-
-
/// Interrupt Stack Frame
-
/// TODO: maybe move this somewhere else
-
pub const InterruptStackFrame = extern struct {
-
instruction_pointer: u64,
-
code_segment: u16,
-
_reserved1: [6]u8,
-
cpu_flags: u64,
-
stack_pointer: u64,
-
stack_segment: u16,
-
_reserved2: [6]u8,
-
};
+24 -1
components/ukernel/arch/amd64/structures/root.zig
···
pub const gdt = @import("gdt.zig");
pub const tss = @import("tss.zig");
-
pub const Idt = @import("Idt.zig");
+
const arch = @import("../root.zig");
+
const common = @import("common");
+
const Queue = @import("Queue");
+
+
// Uses an intrusive queue
+
pub const Task = struct {
+
// Saved Registers
+
regs: arch.interrupts.idt.SavedRegisters align(8),
+
// Address Space context
+
cr3_val: u64,
+
// Instruction Pointer
+
rip: u64,
+
// Stack Pointer
+
rsp: u64,
+
// Next task basically
+
node: Queue.Node = .{},
+
+
pub fn getPagingContext(self: Task) arch.mm.paging.Context {
+
return .{
+
.cr3_val = self.cr3_val,
+
.level5 = common.init_data.kernel_paging_ctx.level5,
+
};
+
}
+
};
+4
components/ukernel/arch/amd64/tsc.zig
···
const arch = @import("root.zig");
+
const std = @import("std");
const out = arch.port.out;
const in = arch.port.in;
+
const log = std.log.scoped(.tsc);
pub var tsc_khz: usize = 0;
···
if (pollcnt < 1000) return error.PitError;
tsc_khz = (end - start) / 50;
+
+
log.debug("{} MHz", .{tsc_khz / 1000});
}
/// Delay for a set amount of ms using crappy polling
+14 -20
components/ukernel/build.zig
···
const target = b.resolveTargetQuery(target_query);
const optimize = b.standardOptimizeOption(.{ .preferred_optimize_mode = .ReleaseSafe });
-
const arch_module = b.createModule(.{
-
.root_source_file = b.path(arch_root_path),
+
const common_mod = b.createModule(.{
+
.root_source_file = b.path("common/root.zig"),
.target = target,
.optimize = optimize,
.code_model = code_model,
});
+
const arch_module = b.createModule(.{
+
.root_source_file = b.path(arch_root_path),
+
});
switch (arch) {
.amd64 => {
arch_module.addAssemblyFile(b.path("arch/amd64/asm/traps.S"));
···
else => {},
}
-
const limine_dep = b.dependency("limine", .{
-
.api_revision = 3,
-
});
-
const spinlock_dep = b.dependency("spinlock", .{});
-
const console_dep = b.dependency("console", .{});
-
-
const limine_mod = limine_dep.module("limine");
-
const console_mod = console_dep.module("console");
-
const spinlock_mod = spinlock_dep.module("spinlock");
-
-
const common_mod = b.createModule(.{
-
.root_source_file = b.path("common/root.zig"),
-
});
+
const spinlock_mod = b.dependency("spinlock", .{}).module("spinlock");
+
const limine_mod = b.dependency("limine", .{ .api_revision = 3 }).module("limine");
+
const console_mod = b.dependency("console", .{}).module("console");
+
const queue_mod = b.dependency("Queue", .{}).module("Queue");
arch_module.addImport("limine", limine_mod);
arch_module.addImport("console", console_mod);
arch_module.addImport("common", common_mod);
-
-
console_mod.addImport("limine", limine_mod);
+
arch_module.addImport("Queue", queue_mod);
common_mod.addImport("arch", arch_module);
-
common_mod.addImport("console", console_mod);
common_mod.addImport("spinlock", spinlock_mod);
+
common_mod.addImport("console", console_mod);
+
common_mod.addImport("Queue", queue_mod);
const kernel = b.addExecutable(.{
.name = "ukernel",
-
.root_module = arch_module,
+
.root_module = common_mod,
// TODO: remove when x86 backend is less broken with removing CPU features
.use_llvm = true,
});
kernel.pie = false;
-
kernel.want_lto = true;
+
kernel.want_lto = false;
kernel.setLinkerScript(b.path(linker_script_path));
b.installArtifact(kernel);
}
+5 -1
components/ukernel/build.zig.zon
···
.dependencies = .{
.limine = .{ .path = "deps/limine-zig" },
.spinlock = .{ .path = "deps/spinlock" },
+
.build_helpers = .{ .path = "../build_helpers" },
.console = .{ .path = "deps/console" },
-
.build_helpers = .{ .path = "../build_helpers" },
+
.Queue = .{
+
.url = "git+https://tangled.org/@sydney.blue/Queue.zig?ref=dev#6c0760e8a233c1d59554a40a87f0ef293a9697f3",
+
.hash = "Queue-0.0.0-upnEfhEPAADNV4Dvs3DVCRSnOh-BrhgsRR6scaE2qTIa",
+
},
},
.paths = .{
"build.zig",
+17 -7
components/ukernel/common/aux.zig
···
console: ?console.Console = null,
framebuffer: ?console.Framebuffer = null,
hardware_description: HardwareDescription = .none,
-
root_task: []align(4096) u8 = undefined,
+
root_task_elf: []align(4096) u8 = undefined,
hhdm_slide: usize = 0,
+
kernel_paging_ctx: arch.mm.paging.Context = undefined,
};
+
pub fn initConsole() void {
+
const fb = common.init_data.framebuffer.?;
+
// Create a canvas for the console to render to
+
const canvas: [*]u8 = @ptrFromInt(common.init_data.bootmem.allocMem(fb.width * fb.height * fb.bypp) catch @panic("Couldn't allocate a canvas"));
+
@memset(canvas[0 .. fb.width * fb.height * fb.bypp], 0);
+
+
// Set the console instance
+
common.init_data.console = console.Console.init(fb, canvas);
+
}
+
var stdout_lock: spinlock.Spinlock = .{};
pub fn logFn(
···
if (common.init_data.console == null) return;
// Use the same naming as the default logger
-
const level, const color = switch (message_level) {
+
const level, const color: u32 = switch (message_level) {
.debug => .{ "D", 0x3bcf1d },
.err => .{ "E", 0xff0000 },
.info => .{ "I", 0x00bbbb },
···
stdout_lock.lock();
defer stdout_lock.unlock();
-
const cons = &common.init_data.console.?;
-
-
cons.setColor(color, 0);
-
cons.writer().print(prefix ++ format ++ "\n", args) catch return;
+
common.init_data.console.?.setColor(color, 0);
+
// No buffering for now
+
var writer = console.Console.Writer.init(&common.init_data.console.?, &.{});
+
writer.interface.print(prefix ++ format ++ "\n", args) catch return;
}
}
pub fn panic(msg: []const u8, first_trace_addr: ?usize) noreturn {
_ = first_trace_addr;
const log = std.log.scoped(.panic);
-
common.init_data.console.?.setColor(0xff0000, 0);
log.err("PANIC: {s}", .{msg});
var it = std.debug.StackIterator.init(@returnAddress(), @frameAddress());
defer it.deinit();
+9 -8
components/ukernel/common/loader.zig
···
const common = @import("root.zig");
+
const arch = @import("arch");
const paging = common.mm.paging;
const std = @import("std");
const elf = std.elf;
const log = std.log.scoped(.elf_loader);
// Load root task, return the entry point
-
pub fn loadRootTask() !usize {
-
const root_task = common.init_data.root_task;
+
pub fn loadElf(context: *arch.mm.paging.Context, task_slice: []align(4096) u8) !usize {
const hdr = blk: {
-
const hdr: *elf.Elf64_Ehdr = @ptrCast(root_task);
+
const hdr: *elf.Elf64_Ehdr = @ptrCast(task_slice);
break :blk elf.Header.init(hdr.*, .little);
};
-
var iter = hdr.iterateProgramHeadersBuffer(root_task);
+
var iter = hdr.iterateProgramHeadersBuffer(task_slice);
while (try iter.next()) |entry| {
if ((entry.p_type != elf.PT_LOAD) or (entry.p_memsz == 0)) continue;
···
.size = memsz_pages,
.memory_type = .MemoryWriteBack,
.perms = .{
-
.executable = entry.p_flags & elf.PF_X > 0,
-
.writable = entry.p_flags & elf.PF_W > 0,
-
.userspace_accessible = true,
+
.x = entry.p_flags & elf.PF_X > 0,
+
.w = entry.p_flags & elf.PF_W > 0,
+
.u = true,
},
+
.context = context,
});
// 2. Copy filesz bytes from offset to this new page
const dst = common.mm.physToHHDM([*]u8, page_backing + vaddr_shift);
const dst_slice = dst[0..entry.p_filesz];
-
const src_slice = root_task[entry.p_offset..][0..entry.p_filesz];
+
const src_slice = task_slice[entry.p_offset..][0..entry.p_filesz];
@memcpy(dst_slice, src_slice);
// 3. Add memsz - filesz zeroes
-1
components/ukernel/common/mm/bootmem.zig
···
// Finally, initialize the global bootmem
common.init_data.bootmem.initialize(bootmem_struct);
-
common.init_data.bootmem.debugInfo();
}
+25 -17
components/ukernel/common/mm/paging.zig
···
const arch = @import("arch");
const std = @import("std");
+
const common = @import("../root.zig");
const TableHandle = arch.mm.paging.TableHandle;
const MemoryType = arch.mm.paging.MemoryType;
+
const Context = arch.mm.paging.Context;
pub const Perms = struct {
-
writable: bool,
-
executable: bool,
-
userspace_accessible: bool = false,
+
/// Writable
+
w: bool,
+
/// Executable
+
x: bool,
+
/// Userspace Accessible
+
u: bool = false,
const Self = @This();
/// Verify that the current permissions are a superset of the provided ones
pub fn allows(self: Self, other: Self) bool {
-
if (!self.writable and other.writable) {
+
if (!self.w and other.w) {
return false;
}
-
if (!self.executable and other.executable) {
+
if (!self.x and other.x) {
return false;
}
-
if (!self.userspace_accessible and other.userspace_accessible) {
+
if (!self.u and other.u) {
return false;
}
return true;
···
/// OR two permissions
pub fn addPerms(self: Self, other: Self) Self {
return .{
-
.writable = self.writable or other.writable,
-
.executable = self.executable or other.executable,
-
.userspace = self.userspace_accessible or other.userspace_accessible,
+
.w = self.w or other.w,
+
.x = self.x or other.x,
+
.u = self.u or other.u,
};
}
};
···
size: usize,
perms: Perms,
memory_type: MemoryType,
+
context: *Context = &common.init_data.kernel_paging_ctx,
}) !void {
-
const root = arch.mm.paging.root_table(args.vaddr);
+
const root = args.context.root_table(args.vaddr);
var vaddr = args.vaddr;
var paddr = args.paddr;
var size = args.size;
-
try mapPageImpl(&vaddr, &paddr, &size, root, args.perms, args.memory_type);
+
try mapPageImpl(&vaddr, &paddr, &size, root, args.perms, args.memory_type, args.context);
}
pub fn map(args: struct {
···
size: usize,
perms: Perms,
memory_type: MemoryType,
+
context: *Context = &common.init_data.kernel_paging_ctx,
}) !void {
-
const root = arch.mm.paging.root_table(args.vaddr);
+
const root = args.context.root_table(args.vaddr);
var vaddr = args.vaddr;
var size = args.size;
-
try mapPageImpl(&vaddr, null, &size, root, args.perms, args.memory_type);
+
try mapPageImpl(&vaddr, null, &size, root, args.perms, args.memory_type, args.context);
}
fn mapPageImpl(
···
table: TableHandle,
perms: Perms,
memory_type: MemoryType,
+
context: *Context,
) !void {
// 1. Get slice of every child from the target forwards
const children = table.skip_to(vaddr.*);
···
switch (table.decode_child(child)) {
.Mapping => return error.AlreadyPresent,
.Table => |*tbl| {
-
try mapPageImpl(vaddr, paddr, size, tbl.*, perms, memory_type);
+
try mapPageImpl(vaddr, paddr, size, tbl.*, perms, memory_type, context);
if (!tbl.perms.allows(perms)) {
tbl.addPerms(perms);
-
arch.mm.paging.invalidate(vaddr.*);
+
context.invalidate(vaddr.*);
}
},
.Empty => {
const domain = table.child_domain(vaddr.*);
-
if (domain.ptr == vaddr.* and domain.len <= size.* and arch.mm.paging.can_map_at(table.level - 1) and is_aligned(vaddr.*, paddr, table.level - 1)) {
+
if (domain.ptr == vaddr.* and domain.len <= size.* and context.can_map_at(table.level - 1) and is_aligned(vaddr.*, paddr, table.level - 1)) {
// Make child mapping etc
_ = try table.make_child_mapping(child, if (paddr) |p| p.* else null, perms, memory_type);
const step = domain.len;
···
}
} else {
const tbl = try table.make_child_table(child, perms);
-
try mapPageImpl(vaddr, paddr, size, tbl, perms, memory_type);
+
try mapPageImpl(vaddr, paddr, size, tbl, perms, memory_type, context);
}
},
}
+80 -1
components/ukernel/common/root.zig
···
pub const aux = @import("aux.zig");
pub const mm = @import("mm/root.zig");
-
pub const loadRootTask = loader.loadRootTask;
+
pub const scheduler = @import("scheduler.zig");
+
pub const loadElf = loader.loadElf;
+
const arch = @import("arch");
+
const std = @import("std");
const loader = @import("loader.zig");
// Arch init must set up appropriate fields!
pub var init_data: aux.InitState = .{};
+
+
// Generic bsp init
+
pub fn generic_init() callconv(.c) noreturn {
+
const log = std.log.scoped(.generic_init);
+
// First, do early arch init
+
arch.boot.early_init();
+
+
// Now, set up the bootmem and console
+
mm.bootmem.init();
+
+
// Now, do the rest of the arch init
+
arch.boot.bsp_init();
+
+
// Next, set up the console
+
aux.initConsole();
+
+
// Now, set up interrupts
+
arch.interrupts.init();
+
arch.interrupts.init_syscalls();
+
+
log.info("Loading attached tasks...", .{});
+
arch.boot.loadTasks();
+
+
log.info("Dropping to userspace!", .{});
+
+
arch.interrupts.startScheduling();
+
}
+
+
pub fn loadTask(scratch: *arch.structures.Task, task_slice: []align(4096) u8) void {
+
// 1. Create a user address space
+
var user_ctx = arch.mm.paging.Context.make_user() catch |err| {
+
std.log.err("Failed to make user context! {}", .{err});
+
@panic("make_user_ctx");
+
};
+
+
// 2. Allocate a user stack
+
mm.paging.map(.{
+
.vaddr = 0x7ffe_0000_0000,
+
.size = 65536,
+
.memory_type = .MemoryWriteBack,
+
.perms = .{
+
.x = false,
+
.u = true,
+
.w = true,
+
},
+
.context = &user_ctx,
+
}) catch @panic("couldn't map user stack");
+
+
// 3. Map ELF into address space
+
const entry = loadElf(&user_ctx, task_slice) catch |err| {
+
std.log.err("Couldn't load the root task! {}", .{err});
+
@panic("ggz");
+
};
+
// 4. Add task to scheduler
+
scratch.* = .{
+
.cr3_val = user_ctx.cr3_val,
+
.regs = .default,
+
.rip = entry,
+
.rsp = 0x7ffe_0001_0000,
+
};
+
scheduler.pushTask(scratch);
+
}
+
+
// std options etc.
+
pub const panic = std.debug.FullPanic(aux.panic);
+
pub const std_options: std.Options = .{
+
.logFn = aux.logFn,
+
.page_size_min = arch.page_size.min,
+
.page_size_max = arch.page_size.max,
+
.queryPageSize = arch.page_size.get,
+
};
+
+
comptime {
+
// Entry point (_start)
+
@export(&generic_init, .{ .name = "_start", .linkage = .strong });
+
}
+16
components/ukernel/common/scheduler.zig
···
+
const std = @import("std");
+
const arch = @import("arch");
+
const Queue = @import("Queue");
+
const Task = arch.structures.Task;
+
+
var task_queue: Queue = .{};
+
+
pub fn pushTask(task: *Task) void {
+
task_queue.enqueue(&task.node);
+
}
+
+
pub fn getNextTask() ?*Task {
+
const node = task_queue.dequeue() orelse return null;
+
const task: *Task = @fieldParentPtr("node", node);
+
return task;
+
}
+1 -2
components/ukernel/deps/console/build.zig
···
const std = @import("std");
-
pub fn build(b: *std.Build) void {
_ = b.addModule("console", .{
-
.root_source_file = b.path("console.zig"),
+
.root_source_file = b.path("src/root.zig"),
});
}
+4 -9
components/ukernel/deps/console/build.zig.zon
···
.{
.name = .console,
-
.fingerprint = 0x3603cfb6f7920fba,
-
.version = "0.0.1",
+
.version = "0.0.0",
+
.fingerprint = 0x3603cfb621692996, // Changing this has security and trust implications.
.minimum_zig_version = "0.15.1",
-
.dependencies = .{
-
.limine = .{
-
.path = "../limine-zig",
-
},
-
},
+
.dependencies = .{},
.paths = .{
"build.zig",
"build.zig.zon",
-
"console.zig",
-
"psf2.zig",
+
"src",
"fonts",
},
}
-195
components/ukernel/deps/console/console.zig
···
-
const limine = @import("limine");
-
const builtin = @import("builtin");
-
const psf2 = @import("psf2.zig");
-
pub const Font = psf2.Font;
-
const std = @import("std");
-
const fontdata = @embedFile("fonts/spleen-12x24.psf");
-
const are_we_le = builtin.cpu.arch.endian() == .little;
-
-
pub const DefaultFont = Font.new(fontdata) catch unreachable;
-
-
pub const Framebuffer = struct {
-
const Self = @This();
-
address: [*]u8,
-
width: u64,
-
height: u64,
-
pitch: u64,
-
bypp: u16,
-
red_mask_size: u8,
-
red_mask_shift: u8,
-
green_mask_size: u8,
-
green_mask_shift: u8,
-
blue_mask_size: u8,
-
blue_mask_shift: u8,
-
-
pub fn from_limine(fb: *const limine.Framebuffer) Self {
-
return .{
-
.address = @ptrCast(fb.address),
-
.width = fb.width,
-
.height = fb.height,
-
.pitch = fb.pitch,
-
.red_mask_size = fb.red_mask_size,
-
.red_mask_shift = fb.red_mask_shift,
-
.green_mask_size = fb.green_mask_size,
-
.green_mask_shift = fb.green_mask_shift,
-
.blue_mask_size = fb.blue_mask_size,
-
.blue_mask_shift = fb.blue_mask_shift,
-
.bypp = fb.bpp / 8,
-
};
-
}
-
};
-
-
pub const Console = struct {
-
const Self = @This();
-
const Writer = std.io.GenericWriter(*Self, error{}, write);
-
// framebuffer data
-
fb: Framebuffer,
-
// font
-
font: psf2.Font,
-
// state data
-
current_x: u64 = 0,
-
current_y: u64 = 0,
-
fg_color: u32 = 0xFFFFFFFF,
-
bg_color: u32 = 0,
-
-
pub fn from_font(fb: Framebuffer, font: psf2.Font) Self {
-
return .{
-
.fb = fb,
-
.font = font,
-
};
-
}
-
-
// places a character at the given position
-
pub fn putchar(self: *const Self, ch: u8, cx: u64, cy: u64, fg_val: u32, bg_val: u32) void {
-
// convert colors to bytes
-
const fg_bytes: [4]u8 = @bitCast(if (are_we_le) fg_val else @byteSwap(fg_val));
-
const bg_bytes: [4]u8 = @bitCast(if (are_we_le) bg_val else @byteSwap(bg_val));
-
// initial calculations
-
const bytes_per_line = self.font.hdr.bytesPerLine();
-
const mask_shamt: u5 = @intCast(bytes_per_line * 8 - 1);
-
const mask_initial: u32 = @as(u32, 1) << mask_shamt;
-
const glyph = self.font.getGlyph(ch) catch return;
-
-
// find the screen offset for the beignning of the character
-
// add pitch to go to next line...
-
var offset: u64 = (cy * self.font.hdr.height * self.fb.pitch) + (cx * (self.font.hdr.width + 0) * self.fb.bypp);
-
// run for every line
-
var y: u32 = 0;
-
var mask: u32 = 0;
-
while (y < self.font.hdr.height) : (y += 1) {
-
// initialize the mask and the current line
-
mask = mask_initial;
-
-
// get the current line
-
const line_value: u32 = psf2.readIntTo32(glyph[y * bytes_per_line ..][0..bytes_per_line]);
-
var line_offset: u64 = offset;
-
var x: u32 = 0;
-
while (x < self.font.hdr.width) : (x += 1) {
-
// write the pixel value to the correct position of the screen...
-
if (line_value & mask != 0) {
-
@memcpy(self.fb.address[line_offset..][0..self.fb.bypp], fg_bytes[0..]);
-
} else {
-
@memcpy(self.fb.address[line_offset..][0..self.fb.bypp], bg_bytes[0..]);
-
}
-
line_offset += self.fb.bypp;
-
mask >>= 1;
-
}
-
offset += self.fb.pitch;
-
}
-
}
-
-
pub fn putc(self: *Self, ch: u8) void {
-
// input can be \r, \n, or printable.
-
// ignore \r, move down for \n, and print char normally
-
// if \n, check to see if we overrun then scroll
-
// if normal, see if we overrun the end and do newline
-
if (ch == '\r') return;
-
if (ch == '\n') {
-
self.current_x = 0;
-
self.current_y += 1;
-
// go to top if we went below the bottom of the screen
-
if (self.current_y >= self.maxCharsHeight()) {
-
self.scrollUp(1);
-
self.current_y = self.maxCharsHeight() - 1;
-
}
-
return;
-
}
-
self.putchar(ch, self.current_x, self.current_y, self.fg_color, self.bg_color);
-
self.current_x += 1;
-
-
if (self.current_x < self.maxCharsWidth()) return;
-
self.current_x = 0;
-
self.current_y += 1;
-
if (self.current_y >= self.maxCharsHeight()) {
-
self.scrollUp(1);
-
self.current_y = self.maxCharsHeight() - 1;
-
}
-
}
-
-
pub fn puts(self: *Self, msg: []const u8) void {
-
for (msg) |ch| {
-
self.putc(ch);
-
}
-
}
-
-
fn convertColor(self: *const Self, color: u32) u32 {
-
const mult: u32 = blk: {
-
const width: u4 = @truncate(self.fb.red_mask_size);
-
break :blk (@as(u32, 1) << width) - 1;
-
};
-
const div = 255;
-
const red: u32 = (color >> 16) & 0xFF;
-
const green: u32 = (color >> 8) & 0xFF;
-
const blue: u32 = color & 0xFF;
-
-
const red_shift: u5 = @truncate(self.fb.red_mask_shift);
-
const green_shift: u5 = @truncate(self.fb.green_mask_shift);
-
const blue_shift: u5 = @truncate(self.fb.blue_mask_shift);
-
-
return (((red * mult) / div) << red_shift) | (((green * mult) / div) << green_shift) | (((blue * mult) / div) << blue_shift);
-
}
-
-
pub fn setColor(self: *Self, fg: u32, bg: u32) void {
-
self.fg_color = self.convertColor(fg);
-
self.bg_color = self.convertColor(bg);
-
}
-
-
pub fn writer(self: *Self) Writer {
-
return .{ .context = self };
-
}
-
-
pub fn write(self: *Self, buffer: []const u8) !usize {
-
self.puts(buffer);
-
return buffer.len;
-
}
-
-
// scroll the lines of text, without doing anything else.
-
// erase the first line of text, and memcpy the second line and on up to the first
-
pub fn scrollUp(self: *Self, amount: u64) void {
-
const num_lines = self.maxCharsHeight();
-
const h = self.font.hdr.height;
-
if (amount > num_lines) return; // later just clear the entire screen
-
var i: u64 = amount;
-
while (i < num_lines) : (i += 1) {
-
// for each run, erase the previous line and copy the current line up a line.
-
// const curr_line = self.fb.address[i * h * self.fb.pitch .. (i + 1) * h * self.fb.pitch];
-
const curr_line = self.fb.address[i * h * self.fb.pitch ..][0 .. h * self.fb.pitch];
-
const prev_line = self.fb.address[(i - amount) * h * self.fb.pitch ..][0 .. h * self.fb.pitch];
-
-
@memset(prev_line, 0);
-
@memcpy(prev_line, curr_line);
-
}
-
// finally, delete the last line (s)
-
// const last_line = self.fb.address[(num_lines - amount) * h * self.fb.pitch .. (num_lines) * h * self.fb.pitch];
-
const last_line = self.fb.address[(num_lines - amount) * h * self.fb.pitch ..][0 .. amount * h * self.fb.pitch];
-
@memset(last_line, 0);
-
}
-
-
fn maxCharsWidth(self: *const Self) u64 {
-
return self.fb.width / (self.font.hdr.width + 0);
-
}
-
-
fn maxCharsHeight(self: *const Self) u64 {
-
return self.fb.height / self.font.hdr.height;
-
}
-
};
+24
components/ukernel/deps/console/flake.lock
···
+
{
+
"nodes": {
+
"nixpkgs": {
+
"locked": {
+
"lastModified": 315532800,
+
"narHash": "sha256-t4zrLJk1EZWk1lUnvNEVjPBVNBHVzS3A0RsxkRSwwSE=",
+
"rev": "6d7ec06d6868ac6d94c371458fc2391ded9ff13d",
+
"type": "tarball",
+
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre861040.6d7ec06d6868/nixexprs.tar.xz?rev=6d7ec06d6868ac6d94c371458fc2391ded9ff13d"
+
},
+
"original": {
+
"type": "tarball",
+
"url": "https://channels.nixos.org/nixpkgs-unstable/nixexprs.tar.xz"
+
}
+
},
+
"root": {
+
"inputs": {
+
"nixpkgs": "nixpkgs"
+
}
+
}
+
},
+
"root": "root",
+
"version": 7
+
}
+23
components/ukernel/deps/console/flake.nix
···
+
{
+
inputs = {
+
nixpkgs.url = "https://channels.nixos.org/nixpkgs-unstable/nixexprs.tar.xz";
+
};
+
outputs =
+
{ nixpkgs, ... }@inputs:
+
let
+
inherit (inputs.nixpkgs) lib;
+
forAllSystems =
+
body: lib.genAttrs lib.systems.flakeExposed (system: body nixpkgs.legacyPackages.${system});
+
in
+
{
+
devShells = forAllSystems (pkgs: {
+
default = pkgs.mkShell {
+
packages = with pkgs; [
+
zig_0_15
+
];
+
};
+
});
+
+
formatter = forAllSystems (pkgs: pkgs.nixfmt-rfc-style);
+
};
+
}
-24
components/ukernel/deps/console/fonts/LICENSE.spleen
···
-
Copyright (c) 2018-2024, Frederic Cambus
-
All rights reserved.
-
-
Redistribution and use in source and binary forms, with or without
-
modification, are permitted provided that the following conditions are met:
-
-
* Redistributions of source code must retain the above copyright
-
notice, this list of conditions and the following disclaimer.
-
-
* Redistributions in binary form must reproduce the above copyright
-
notice, this list of conditions and the following disclaimer in the
-
documentation and/or other materials provided with the distribution.
-
-
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
-
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-
POSSIBILITY OF SUCH DAMAGE.
components/ukernel/deps/console/fonts/bold16x32.psf

This is a binary file and will not be displayed.

components/ukernel/deps/console/fonts/bold8x16.psf

This is a binary file and will not be displayed.

components/ukernel/deps/console/fonts/spleen-12x24.psf

This is a binary file and will not be displayed.

components/ukernel/deps/console/fonts/spleen-16x32.psf

This is a binary file and will not be displayed.

components/ukernel/deps/console/fonts/spleen-32x64.psf

This is a binary file and will not be displayed.

components/ukernel/deps/console/fonts/spleen-5x8.psf

This is a binary file and will not be displayed.

components/ukernel/deps/console/fonts/spleen-6x12.psf

This is a binary file and will not be displayed.

components/ukernel/deps/console/fonts/spleen-8x16.psf

This is a binary file and will not be displayed.

-53
components/ukernel/deps/console/psf2.zig
···
-
const std = @import("std");
-
-
pub const Font = struct {
-
const Self = @This();
-
pub const PsfHeader = extern struct {
-
magic: u32 = 0x864ab572,
-
version: u32,
-
header_size: u32,
-
flags: u32,
-
numglyph: u32,
-
bytes_per_glyph: u32,
-
height: u32,
-
width: u32,
-
-
pub fn bytesPerLine(self: *const PsfHeader) u32 {
-
return (self.width + 7) / 8;
-
}
-
};
-
-
fontdata: []const u8,
-
hdr: PsfHeader,
-
-
pub fn new(fontdata: []const u8) !Self {
-
var ret: Self = undefined;
-
ret.fontdata = fontdata;
-
-
// fill the header properly
-
const hdr_size = @sizeOf(PsfHeader);
-
if (fontdata.len < hdr_size) return error.TooSmall;
-
const hdr_ptr: [*]u8 = @ptrCast(&ret.hdr);
-
@memcpy(hdr_ptr[0..hdr_size], fontdata[0..hdr_size]);
-
-
return ret;
-
}
-
-
pub fn getGlyph(self: *const Self, ch: u8) ![]const u8 {
-
const startpos: u64 = self.hdr.header_size + ch * self.hdr.bytes_per_glyph;
-
const endpos: u64 = startpos + self.hdr.bytes_per_glyph;
-
-
if (self.fontdata.len < endpos) return error.InvalidCharacter;
-
return self.fontdata[startpos..endpos];
-
}
-
};
-
pub fn readIntTo32(buffer: []const u8) u32 {
-
const readInt = std.mem.readInt;
-
return switch (buffer.len) {
-
0 => 0,
-
1 => @intCast(readInt(u8, buffer[0..1], .big)),
-
2 => @intCast(readInt(u16, buffer[0..2], .big)),
-
3 => @intCast(readInt(u24, buffer[0..3], .big)),
-
else => @intCast(readInt(u32, buffer[0..4], .big)),
-
};
-
}
+24
components/ukernel/deps/console/src/fonts/LICENSE.spleen
···
+
Copyright (c) 2018-2024, Frederic Cambus
+
All rights reserved.
+
+
Redistribution and use in source and binary forms, with or without
+
modification, are permitted provided that the following conditions are met:
+
+
* Redistributions of source code must retain the above copyright
+
notice, this list of conditions and the following disclaimer.
+
+
* Redistributions in binary form must reproduce the above copyright
+
notice, this list of conditions and the following disclaimer in the
+
documentation and/or other materials provided with the distribution.
+
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+
POSSIBILITY OF SUCH DAMAGE.
components/ukernel/deps/console/src/fonts/spleen-12x24.psf

This is a binary file and will not be displayed.

+39
components/ukernel/deps/console/src/psf2.zig
···
+
const std = @import("std");
+
+
pub const Psf2Header = extern struct {
+
magic: u32 = 0x864ab572,
+
version: u32,
+
header_size: u32,
+
flags: u32,
+
numglyph: u32,
+
bytes_per_glyph: u32,
+
height: u32,
+
width: u32,
+
+
pub fn bytesPerLine(self: *const Psf2Header) u32 {
+
return (self.width + 7) / 8;
+
}
+
};
+
+
pub const Font = struct {
+
const Self = @This();
+
fontdata: []const u8 align(4),
+
pub fn new(fontdata: []const u8) Self {
+
return .{
+
.fontdata = fontdata,
+
};
+
}
+
+
pub fn getHdr(self: *const Self) *const Psf2Header {
+
return @ptrCast(@alignCast(self.fontdata));
+
}
+
+
pub fn getGlyph(self: *const Self, ch: u8) ![]const u8 {
+
const hdr = self.getHdr();
+
const startpos: u64 = @as(u64, hdr.header_size) + @as(u64, ch) * @as(u64, hdr.bytes_per_glyph);
+
+
if (self.fontdata.len < startpos + @as(u64, hdr.bytes_per_glyph)) return error.InvalidCharacter;
+
+
return self.fontdata[startpos..][0..hdr.bytes_per_glyph];
+
}
+
};
+290
components/ukernel/deps/console/src/root.zig
···
+
const std = @import("std");
+
const builtin = @import("builtin");
+
pub const psf2 = @import("psf2.zig");
+
const are_we_le = builtin.cpu.arch.endian() == .little;
+
+
const fontdata_embed = @embedFile("fonts/spleen-12x24.psf");
+
const fontdata: [fontdata_embed.len]u8 align(@alignOf(u32)) = fontdata_embed.*;
+
+
/// Basic framebuffer container
+
pub const Framebuffer = struct {
+
const Self = @This();
+
address: [*]u32,
+
width: u64,
+
height: u64,
+
pitch: u64,
+
bypp: u16,
+
red_mask_size: u8,
+
red_mask_shift: u8,
+
green_mask_size: u8,
+
green_mask_shift: u8,
+
blue_mask_size: u8,
+
blue_mask_shift: u8,
+
};
+
+
/// Framebuffer based console
+
pub const Console = struct {
+
fb: Framebuffer,
+
canvas: [*]u8,
+
font: psf2.Font,
+
x_pos: usize = 0,
+
y_pos: usize = 0,
+
x_chrs_max: usize,
+
y_chrs_max: usize,
+
fg_color: u32 = 0xFFFFFFFF,
+
bg_color: u32 = 0,
+
+
/// Create an instance given a framebuffer
+
/// Canvas must be exactly fb.width * fb.height * fb.bypp bytes
+
pub fn init(fb: Framebuffer, canvas: [*]u8) Console {
+
const font = psf2.Font.new(&fontdata);
+
return init_with_font(fb, canvas, font);
+
}
+
+
/// Create an instance given a framebuffer and font
+
pub fn init_with_font(fb: Framebuffer, canvas: [*]u8, font: psf2.Font) Console {
+
const font_hdr = font.getHdr();
+
return .{
+
.fb = fb,
+
.font = font,
+
.canvas = canvas,
+
// TODO: implement spacing between chars?
+
.x_chrs_max = fb.width / font_hdr.width,
+
.y_chrs_max = fb.height / font_hdr.height,
+
};
+
}
+
+
/// Write a string to the console
+
pub fn puts(self: *Console, msg: []const u8) usize {
+
var written: usize = 0;
+
+
const start_line, const num_lines = blk: {
+
const start_line = self.y_pos;
+
var scrolled: bool = false;
+
for (msg) |ch| {
+
// TODO: handle characters that failed to print
+
scrolled |= self.putc(ch) catch false;
+
written += 1;
+
}
+
if (scrolled) break :blk .{ 0, self.y_chrs_max };
+
break :blk .{ start_line, self.y_pos - start_line + 1 };
+
};
+
self.renderCanvas(start_line, num_lines);
+
return written;
+
}
+
+
// Copy in the given lines from the canvas to the framebuffer
+
fn renderCanvas(
+
self: *Console,
+
start_line: usize,
+
num_lines: usize,
+
) void {
+
const glyph_height: usize = @intCast(self.font.getHdr().height);
+
// Not necessarily fb pitch!
+
const canvas_pitch = self.fb.width * self.fb.bypp;
+
const byte_fb: [*]u8 = @ptrCast(self.fb.address);
+
+
if (canvas_pitch == self.fb.pitch) {
+
const src_buf = self.canvas[canvas_pitch * glyph_height * start_line ..][0 .. canvas_pitch * glyph_height * num_lines];
+
const dst_buf = byte_fb[self.fb.pitch * glyph_height * start_line ..][0 .. self.fb.pitch * glyph_height * num_lines];
+
@memcpy(dst_buf, src_buf);
+
} else {
+
// Unfortunately we have to copy line by line
+
var i: usize = 0;
+
const canvas_start = canvas_pitch * start_line * glyph_height;
+
const fb_start = self.fb.pitch * start_line * glyph_height;
+
while (i < num_lines * glyph_height) : (i += 1) {
+
const src_line = self.canvas[canvas_start + i * canvas_pitch ..][0..canvas_pitch];
+
const dst_line = byte_fb[fb_start + i * self.fb.pitch ..][0..canvas_pitch];
+
@memcpy(dst_line, src_line);
+
}
+
}
+
}
+
+
/// Write a character to the console, return true if scrolled
+
/// If putchar failed we did not scroll for sure
+
fn putc(self: *Console, ch: u8) !bool {
+
var scrolled: bool = false;
+
// Handle newlines
+
if (ch == '\r') return scrolled;
+
if (ch == '\n') {
+
// Reset to the beginning of the next line
+
self.x_pos = 0;
+
self.y_pos += 1;
+
// If we've overrun, scroll the entire framebuffer up one
+
// and then reset to the last line
+
if (self.y_pos >= self.y_chrs_max) {
+
self.scrollUp();
+
scrolled = true;
+
}
+
return scrolled;
+
}
+
// TODO: color palette and escape codes?
+
try self.putchar(ch, self.x_pos, self.y_pos, self.fg_color, self.bg_color);
+
self.x_pos += 1;
+
+
// If our x is too far, go down a line
+
if (self.x_pos < self.x_chrs_max) return scrolled;
+
self.x_pos = 0;
+
self.y_pos += 1;
+
if (self.y_pos >= self.y_chrs_max) {
+
self.scrollUp();
+
scrolled = true;
+
}
+
return scrolled;
+
}
+
+
fn putchar(self: *const Console, ch: u8, x_pos: usize, y_pos: usize, fg_val: u32, bg_val: u32) !void {
+
const raw_color_choice: [2]u32 = [2]u32{
+
if (are_we_le) bg_val else @byteSwap(bg_val),
+
if (are_we_le) fg_val else @byteSwap(fg_val),
+
};
+
+
const font = self.font;
+
const hdr = font.getHdr();
+
+
const bytes_per_line = hdr.bytesPerLine();
+
const mask_shamt: u5 = @truncate(bytes_per_line * 8 - 1);
+
const mask_initial: u32 = @as(u32, 1) << mask_shamt;
+
const glyph = try font.getGlyph(ch);
+
+
// Offset into framebuffer of the beginning of the character
+
const canvas_pitch: usize = self.fb.width * self.fb.bypp;
+
var offset: usize = (y_pos * @as(usize, hdr.height) * canvas_pitch) + (x_pos * @as(usize, hdr.width) * self.fb.bypp);
+
// run for every line
+
var glyph_y: u32 = 0;
+
var mask: u32 = 0;
+
while (glyph_y < hdr.height) : (glyph_y += 1) {
+
// initialize the mask and current line
+
mask = mask_initial;
+
// TODO: endian
+
const line_value: u32 = std.mem.readVarInt(u32, glyph[glyph_y * bytes_per_line ..][0..bytes_per_line], .big);
+
// offset into the fb of the current line
+
var line_offset: usize = offset;
+
var glyph_x: u32 = 0;
+
while (glyph_x < hdr.width) : (glyph_x += 1) {
+
// Write the fb or bg color
+
const color: [4]u8 = @bitCast(raw_color_choice[@intFromBool(line_value & mask != 0)]);
+
@memcpy(self.canvas[line_offset..][0..self.fb.bypp], color[0..]);
+
// Move right a pixel
+
line_offset += self.fb.bypp;
+
mask >>= 1;
+
}
+
// Move down a line
+
offset += canvas_pitch;
+
}
+
}
+
+
// Set the fg and bg color
+
pub fn setColor(self: *Console, new_fg: u32, new_bg: u32) void {
+
self.fg_color = self.convertColor(new_fg);
+
self.bg_color = self.convertColor(new_bg);
+
}
+
+
// Convert a normal _RGB u32 to the actual framebuffer format
+
fn convertColor(self: *const Console, color: u32) u32 {
+
// The color value also needs to be scaled. For example,
+
// if it's 10 bits per color and we're starting from 8 bits,
+
// Full bright will only be 0xFF/0x3FF or about 25% brightness.
+
// To fix this hypothetical, shift left by (10 - 8). This isn't
+
// perfectly accurate but close enough.
+
const red_left_shift: u5 = @truncate(self.fb.red_mask_size);
+
const green_left_shift: u5 = @truncate(self.fb.red_mask_size);
+
const blue_left_shift: u5 = @truncate(self.fb.red_mask_size);
+
+
// Get our source RGB 888
+
const right_shift = 8;
+
const red_src: u32 = (color >> 16) & 0xFF;
+
const green_src: u32 = (color >> 8) & 0xFF;
+
const blue_src: u32 = color & 0xFF;
+
+
// These shifts are the offsets to place each color.
+
const red_dest_shift: u5 = @truncate(self.fb.red_mask_shift);
+
const green_dest_shift: u5 = @truncate(self.fb.green_mask_shift);
+
const blue_dest_shift: u5 = @truncate(self.fb.blue_mask_shift);
+
+
// Do the calculations
+
const red_dst = switch (std.math.order(red_left_shift, right_shift)) {
+
.gt => red_src << (red_left_shift - right_shift),
+
.lt => red_src >> (right_shift - red_left_shift),
+
.eq => red_src,
+
} << red_dest_shift;
+
+
const green_dst = switch (std.math.order(green_left_shift, right_shift)) {
+
.gt => green_src << (green_left_shift - right_shift),
+
.lt => green_src >> (right_shift - green_left_shift),
+
.eq => green_src,
+
} << green_dest_shift;
+
+
const blue_dst = switch (std.math.order(blue_left_shift, right_shift)) {
+
.gt => blue_src << (blue_left_shift - right_shift),
+
.lt => blue_src >> (right_shift - blue_left_shift),
+
.eq => blue_src,
+
} << blue_dest_shift;
+
+
return red_dst | green_dst | blue_dst;
+
}
+
+
// Make sure to set damage to the entire screen!
+
fn scrollUp(self: *Console) void {
+
const glyph_height: usize = @intCast(self.font.getHdr().height);
+
const canvas_pitch: usize = self.fb.width * self.fb.bypp;
+
+
// Copy 1:n line up to 0:n-1 with memmove
+
const dst_buf = self.canvas[0 .. canvas_pitch * glyph_height * (self.y_chrs_max - 1)];
+
const src_buf = self.canvas[canvas_pitch * glyph_height ..][0 .. canvas_pitch * glyph_height * (self.y_chrs_max - 1)];
+
@memmove(dst_buf, src_buf);
+
+
// Clear last line
+
const last_line = self.canvas[canvas_pitch * glyph_height * (self.y_chrs_max - 1) ..][0 .. canvas_pitch * glyph_height];
+
@memset(last_line, 0);
+
self.x_pos = 0;
+
self.y_pos = self.y_chrs_max - 1;
+
}
+
+
// Get a writer with optional buffering
+
pub fn writer(self: *Console, buffer: []u8) Writer {
+
return Writer.init(self, buffer);
+
}
+
+
// Writer with the new std.Io.Writer interface
+
pub const Writer = struct {
+
console: *Console,
+
interface: std.Io.Writer,
+
+
pub fn init(console: *Console, buffer: []u8) Writer {
+
return .{
+
.console = console,
+
.interface = .{
+
.buffer = buffer,
+
.vtable = &.{ .drain = Writer.drain },
+
},
+
};
+
}
+
+
pub fn drain(w: *std.Io.Writer, data: []const []const u8, splat: usize) !usize {
+
// fieldParentPtr is so cool
+
const self: *Writer = @fieldParentPtr("interface", w);
+
var written: usize = 0;
+
// First, consume the buffer
+
if (w.end < w.buffer.len) {
+
const n = self.console.puts(w.buffer[w.end..]);
+
written += n;
+
w.end += n;
+
}
+
+
// Iterate over all the provided slices
+
for (data, 0..) |slice, i| {
+
// If we are the last slice, splat by the amount
+
if (i == data.len - 1) {
+
for (0..splat) |_| {
+
written += self.console.puts(slice);
+
}
+
break;
+
}
+
written += self.console.puts(slice);
+
}
+
return written;
+
}
+
};
+
};