Microkernel thing OS experiment (Zig ⚡)

Compare changes

Choose any two refs to compare.

+2 -1
assets/limine.conf
···
//AMD64 Kernel
protocol: limine
path: boot():/kernel-amd64.elf
-
module_path: boot():/init-amd64.elf
//aarch64 Kernel
protocol: limine
path: boot():/kernel-aarch64.elf
···
//AMD64 Kernel
protocol: limine
path: boot():/kernel-amd64.elf
+
module_path: boot():/root-69.elf
+
module_path: boot():/root-420.elf
//aarch64 Kernel
protocol: limine
path: boot():/kernel-aarch64.elf
+12 -5
build.zig
···
const ukernel_inst = b.addInstallFile(ukernel_artifact.getEmittedBin(), arch.kernelExeName());
b.getInstallStep().dependOn(&ukernel_inst.step);
-
const root_dep = b.dependency("root_server", .{
.arch = arch,
-
});
-
const root_artifact = root_dep.artifact("root_server");
-
const root_inst = b.addInstallFile(root_artifact.getEmittedBin(), arch.rootTaskName());
-
b.getInstallStep().dependOn(&root_inst.step);
// Run in QEMU
run_blk: {
···
const ukernel_inst = b.addInstallFile(ukernel_artifact.getEmittedBin(), arch.kernelExeName());
b.getInstallStep().dependOn(&ukernel_inst.step);
+
const root_69 = b.dependency("root_server", .{
+
.arch = arch,
+
.number = 0x69,
+
}).artifact("root_server");
+
const root_69_inst = b.addInstallFile(root_69.getEmittedBin(), "root-69.elf");
+
b.getInstallStep().dependOn(&root_69_inst.step);
+
+
const root_420 = b.dependency("root_server", .{
.arch = arch,
+
.number = 0x420,
+
}).artifact("root_server");
+
const root_420_inst = b.addInstallFile(root_420.getEmittedBin(), "root-420.elf");
+
b.getInstallStep().dependOn(&root_420_inst.step);
// Run in QEMU
run_blk: {
+3 -4
build.zig.zon
···
.path = "components/build_helpers",
},
.limine_binary = .{
-
.url = "git+https://codeberg.org/Limine/Limine?ref=v9.x-binary#acf1e35c4685dba7ef271013db375a727c340ff7",
-
.hash = "N-V-__8AAOkzSACT_9p6kmSSly1l008erzXuG39Z6r54B_y0",
-
// Codeberg is always down so better to leave it not lazy
-
// .lazy = true,
},
.edk2_binary = .{
.url = "git+https://github.com/retrage/edk2-nightly#23068f498687bf64f2b8f80fbcf11e82d987fd9b",
···
.path = "components/build_helpers",
},
.limine_binary = .{
+
.url = "git+https://codeberg.org/Limine/Limine?ref=v10.x-binary#648e33afd153bdbf780ba123e45997428796395d",
+
.hash = "N-V-__8AAJ8bSADxAGaebgaAbkAR2kqOBy52rXAL0oCumn0t",
+
.lazy = true,
},
.edk2_binary = .{
.url = "git+https://github.com/retrage/edk2-nightly#23068f498687bf64f2b8f80fbcf11e82d987fd9b",
+2
components/root_server/build.zig
···
pub fn build(b: *std.Build) void {
const arch = b.option(build_helpers.Architecture, "arch", "The target root_server architecture") orelse .amd64;
// set CPU features based on the architecture
const target = b.resolveTargetQuery(.{
···
const config = b.addOptions();
config.addOption(build_helpers.Architecture, "arch", arch);
const build_helpers_dep = b.dependency("build_helpers", .{});
···
pub fn build(b: *std.Build) void {
const arch = b.option(build_helpers.Architecture, "arch", "The target root_server architecture") orelse .amd64;
+
const number = b.option(usize, "number", "The syscall number to use") orelse 0x69;
// set CPU features based on the architecture
const target = b.resolveTargetQuery(.{
···
const config = b.addOptions();
config.addOption(build_helpers.Architecture, "arch", arch);
+
config.addOption(usize, "number", number);
const build_helpers_dep = b.dependency("build_helpers", .{});
+8 -4
components/root_server/src/main.zig
···
const std = @import("std");
const os = @import("os.zig");
export fn _start() callconv(.c) noreturn {
-
// _ = os.syscall1(SYS_poke, 0xB16B00B5BADBABE);
-
// _ = os.syscall1(SYS_exit, 0x69696969);
asm volatile ("int3");
asm volatile (
-
\\ mov $0x69696969, %%rdi
\\ xor %%rsi, %%rsi
\\ xor %%rbx, %%rbx
\\ mainloop:
\\ xor %%rax, %%rax
\\ delayloop:
\\ inc %%rax
-
\\ cmp $0x4000000, %%rax
\\ jnz delayloop
\\ inc %%rbx
\\ syscall
\\ jmp mainloop
);
die();
···
const std = @import("std");
const os = @import("os.zig");
+
const config = @import("config");
export fn _start() callconv(.c) noreturn {
+
_ = os.syscall1(SYS_poke, 0xB16B00B5BADBABE);
+
_ = os.syscall1(SYS_exit, 0x69696969);
asm volatile ("int3");
asm volatile (
+
\\ mov %[number], %%rdi
\\ xor %%rsi, %%rsi
\\ xor %%rbx, %%rbx
\\ mainloop:
\\ xor %%rax, %%rax
\\ delayloop:
\\ inc %%rax
+
\\ cmp $0x1000000, %%rax
\\ jnz delayloop
\\ inc %%rbx
+
\\ mov %%rsp, %%rsi
\\ syscall
\\ jmp mainloop
+
:
+
: [number] "r" (config.number),
);
die();
+1 -1
components/ukernel/arch/aarch64/boot.zig
···
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
const mod_size = mod.size;
log.info("Loading root task with {s} @ {*}", .{ mod.path, mod.address });
-
common.init_data.root_task = mod_addr[0..mod_size];
}
} else {
@branchHint(.unlikely);
···
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
const mod_size = mod.size;
log.info("Loading root task with {s} @ {*}", .{ mod.path, mod.address });
+
common.init_data.root_task_elf = mod_addr[0..mod_size];
}
} else {
@branchHint(.unlikely);
+58 -190
components/ukernel/arch/amd64/boot.zig
···
const arch = @import("root.zig");
const common = @import("common");
const console = @import("console");
-
const flanterm = @import("flanterm");
const log = std.log.scoped(.amd64_init);
const StandardGdt = arch.structures.gdt.StandardGdt;
const Tss = arch.structures.tss.Tss;
-
-
var pg_ctx: arch.mm.paging.Context = undefined;
pub const limine_requests = struct {
export var start_marker: limine.RequestsStartMarker linksection(".limine_reqs_start") = .{};
···
pub export var mp: limine.SmpMpFeature.MpRequest linksection(".limine_reqs") = .{ .flags = .{ .x2apic = true } };
};
-
pub fn bsp_init() callconv(.c) noreturn {
// Don't optimize away the limine requests
inline for (@typeInfo(limine_requests).@"struct".decls) |decl| {
std.mem.doNotOptimizeAway(&@field(limine_requests, decl.name));
···
arch.instructions.die();
}
// Die if we don't have a memory map or Higher Half Direct Mapping
if (limine_requests.memmap.response == null) {
@branchHint(.cold);
···
const hhdm_offset = limine_requests.hhdm.response.?.offset;
common.init_data.hhdm_slide = hhdm_offset;
-
// Add in a framebuffer if found
-
initConsole();
-
// Get basic information through CPUID
arch.instructions.cpuid.init();
-
// Add in ACPI/dtb if found, prefer ACPI
-
initHwDesc();
-
-
// Set up the temporary Physical Memory Allocator
-
common.mm.bootmem.init();
-
-
// Attach the root task
-
if (limine_requests.modules.response) |module_response| {
-
if (module_response.module_count > 0) {
-
const mod = module_response.modules.?[0];
-
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
-
const mod_size = mod.size;
-
log.info("Loading root task with {s} @ {*}", .{ mod.path, mod.address });
-
common.init_data.root_task = mod_addr[0..mod_size];
-
}
-
} else {
-
@branchHint(.unlikely);
-
@panic("No root task found!");
-
}
-
// Initialize per-cpu data (GDT and TSS)
arch.per_cpu_init_data.init(limine_requests.mp.response.?.cpu_count);
-
-
// Install the IDT
-
arch.interrupts.idt.init();
// Set up our own GDT and TSS
const gdt = &arch.per_cpu_init_data.gdt_buf[0];
gdt.* = .{};
const tss = &arch.per_cpu_init_data.tss_buf[0];
-
// TSS rsp 0x3800
tss.* = .{
-
.rsp0 = 0x7ffe_0000_8000,
-
.rsp1 = 0x7ffe_0000_8000,
-
.rsp2 = 0x7ffe_0000_8000,
};
gdt.tss_desc.set_tss_addr(tss);
gdt.load();
-
log.info("BSP successfully setup GDT+TSS!", .{});
-
-
// AP bootstrap
-
bootstrapAPs();
-
-
// Calibrate our TSC
-
arch.tsc.calibrate_pit() catch {
-
log.info("Failed to calibrate with PIT!", .{});
-
arch.instructions.die();
-
};
-
log.info("TSC estimate: {} MHz", .{arch.tsc.tsc_khz / 1000});
-
-
log.info("Setting up scheduling...", .{});
-
pg_ctx = arch.mm.paging.Context.get_current();
-
-
initApic() catch |err| {
-
log.err("Failed to set up APIC! {}", .{err});
-
@panic("apic");
-
};
-
-
log.info("Allocating code for userspace...", .{});
-
-
// Allocate a stack (0x3000 - 0x4000)
-
common.mm.paging.map(.{
-
.vaddr = 0x7ffe_0000_0000,
-
.size = 65536,
-
.memory_type = .MemoryWriteBack,
-
.perms = .{
-
.x = false,
-
.u = true,
-
.w = true,
-
},
-
.context = &pg_ctx,
-
}) catch @panic("couldn't map user stack");
-
-
const entry = common.loadRootTask(&pg_ctx) catch |err| {
-
log.err("Couldn't load the root task! {}", .{err});
-
@panic("ggz");
-
};
-
log.info("Dropping to userspace entry 0x{x:0>16}", .{entry});
-
-
init_syscalls();
-
-
arch.interrupts.apic.armTimer(1000);
-
enter_userspace(entry, 0x69, 0x7ffe_0001_0000);
-
}
-
-
// Get ready for system calls (set MSRs)
-
fn init_syscalls() void {
-
// Set up the STAR MSR with the segment descriptors
-
const IA32_STAR = arch.registers.MSR(u64, 0xC0000081);
-
const star_value: u64 = 0 | @as(u64, arch.structures.gdt.StandardGdt.selectors.kernel_code) << 32 | (@as(u64, arch.structures.gdt.StandardGdt.selectors.tss_desc + 8) | 3) << 48;
-
IA32_STAR.write(star_value);
-
-
// Set up the EFER MSR with SCE (System Call Enable)
-
const IA32_EFER = arch.registers.MSR(u64, 0xC0000080);
-
const efer_val = IA32_EFER.read() | 0b1;
-
IA32_EFER.write(efer_val);
-
-
// Set up LSTAR with the syscall handler and FMASK to clear interrupts
-
const IA32_LSTAR = arch.registers.MSR(u64, 0xC0000082);
-
IA32_LSTAR.write(@intFromPtr(syscall_entry));
-
-
const IA32_FMASK = arch.registers.MSR(u64, 0xC0000084);
-
IA32_FMASK.write(1 << 9);
-
}
-
-
const syscall_entry = @extern(*anyopaque, .{
-
.name = "syscall_entry",
-
});
-
export fn syscall_handler(rdi: usize, rsi: usize) callconv(.c) void {
-
std.log.info("Got a syscall! rdi=0x{x}, rsi=0x{x}", .{ rdi, rsi });
-
}
-
-
fn enter_userspace(entry: u64, arg: u64, stack: u64) noreturn {
-
log.info("usercode64 GDT 0x{x}, userdata64 GDT 0x{x}", .{ arch.structures.gdt.StandardGdt.selectors.user_code, arch.structures.gdt.StandardGdt.selectors.user_data });
-
const cr3 = arch.registers.ControlRegisters.Cr3.read();
-
arch.registers.ControlRegisters.Cr3.write(cr3);
-
asm volatile (
-
\\ push %[userdata64]
-
\\ push %[stack]
-
\\ push $0x202
-
\\ push %[usercode64]
-
\\ push %[entry]
-
\\
-
\\ mov %[userdata64], %%rax
-
\\ mov %%rax, %%es
-
\\ mov %%rax, %%ds
-
\\
-
\\ xor %%rsi, %%rsi
-
\\ xor %%rax, %%rax
-
\\ xor %%rdx, %%rdx
-
\\ xor %%rcx, %%rcx
-
\\ xor %%rbp, %%rbp
-
\\ xor %%rbx, %%rbx
-
\\
-
\\ xor %%r8, %%r8
-
\\ xor %%r9, %%r9
-
\\ xor %%r10, %%r10
-
\\ xor %%r11, %%r11
-
\\ xor %%r12, %%r12
-
\\ xor %%r13, %%r13
-
\\ xor %%r14, %%r14
-
\\ xor %%r15, %%r15
-
\\
-
\\ iretq
-
\\
-
:
-
: [arg] "{rdi}" (arg),
-
[stack] "r" (stack),
-
[entry] "r" (entry),
-
[userdata64] "i" (arch.structures.gdt.StandardGdt.selectors.user_data),
-
[usercode64] "i" (arch.structures.gdt.StandardGdt.selectors.user_code),
-
);
-
unreachable;
-
}
-
-
fn initApic() !void {
-
const has_x2apic = limine_requests.mp.response.?.flags.x2apic;
-
arch.interrupts.apic.singleton = switch (has_x2apic) {
-
true => .x2apic,
-
false => blk: {
-
// Map the APIC first!
-
const apic_base = common.mm.physToHHDM([*]volatile u8, 0xFEE0_0000);
-
try common.mm.paging.mapPhys(.{
-
.vaddr = @intFromPtr(apic_base),
-
.paddr = 0xFEE0_0000,
-
.size = 0x1000,
-
.memory_type = .DeviceUncacheable,
-
.perms = .{
-
.x = false,
-
.u = false,
-
.w = true,
-
},
-
.context = &pg_ctx,
-
});
-
break :blk .{ .xapic = apic_base };
-
},
-
};
-
// Set up the spurious vector and the TPR
-
arch.interrupts.apic.init.initialSetup();
-
-
// Calibrate the APIC timer
-
arch.interrupts.apic.init.calibrateTimer();
-
-
// Enable one-shot interrupts
-
arch.interrupts.apic.init.enableOneshotInterrupt();
-
}
-
-
fn initConsole() void {
if (limine_requests.framebuffer.response) |fb_response| {
if (fb_response.framebuffer_count > 0) {
-
const fb = common.aux.Framebuffer.from_limine(fb_response.getFramebuffers()[0]);
-
common.init_data.framebuffer = fb;
-
-
common.init_data.console = flanterm.Context.init(.{
-
.fb = fb.address,
.width = fb.width,
.height = fb.height,
.pitch = fb.pitch,
···
.green_mask_shift = fb.green_mask_shift,
.blue_mask_size = fb.blue_mask_size,
.blue_mask_shift = fb.blue_mask_shift,
-
});
}
}
}
···
const arch = @import("root.zig");
const common = @import("common");
const console = @import("console");
const log = std.log.scoped(.amd64_init);
const StandardGdt = arch.structures.gdt.StandardGdt;
const Tss = arch.structures.tss.Tss;
pub const limine_requests = struct {
export var start_marker: limine.RequestsStartMarker linksection(".limine_reqs_start") = .{};
···
pub export var mp: limine.SmpMpFeature.MpRequest linksection(".limine_reqs") = .{ .flags = .{ .x2apic = true } };
};
+
pub fn early_init() void {
// Don't optimize away the limine requests
inline for (@typeInfo(limine_requests).@"struct".decls) |decl| {
std.mem.doNotOptimizeAway(&@field(limine_requests, decl.name));
···
arch.instructions.die();
}
+
// If the base revision isn't supported, we can't boot
+
if (!limine_requests.base_revision.isSupported()) {
+
@branchHint(.cold);
+
arch.instructions.die();
+
}
+
// Die if we don't have a memory map or Higher Half Direct Mapping
if (limine_requests.memmap.response == null) {
@branchHint(.cold);
···
const hhdm_offset = limine_requests.hhdm.response.?.offset;
common.init_data.hhdm_slide = hhdm_offset;
+
// Get CPUID info
arch.instructions.cpuid.init();
+
// Set up the kernel paging context
+
common.init_data.kernel_paging_ctx = arch.mm.paging.Context.get_current();
+
}
+
pub fn bsp_init() void {
+
// Set up per-cpu data
arch.per_cpu_init_data.init(limine_requests.mp.response.?.cpu_count);
// Set up our own GDT and TSS
const gdt = &arch.per_cpu_init_data.gdt_buf[0];
gdt.* = .{};
const tss = &arch.per_cpu_init_data.tss_buf[0];
+
// TODO: create a fixed mapping for the pages maybe?
tss.* = .{
+
.rsp0 = common.init_data.hhdm_slide + arch.per_cpu_init_data.getStackPhys(0),
};
gdt.tss_desc.set_tss_addr(tss);
gdt.load();
+
// Add in the framebuffer
if (limine_requests.framebuffer.response) |fb_response| {
if (fb_response.framebuffer_count > 0) {
+
const fb = fb_response.getFramebuffers()[0];
+
common.init_data.framebuffer = .{
+
.address = @ptrCast(@alignCast(fb.address)),
.width = fb.width,
.height = fb.height,
.pitch = fb.pitch,
···
.green_mask_shift = fb.green_mask_shift,
.blue_mask_size = fb.blue_mask_size,
.blue_mask_shift = fb.blue_mask_shift,
+
.bypp = fb.bpp / 8,
+
};
+
}
+
}
+
+
// Add in ACPI/dtb if found, prefer ACPI
+
initHwDesc();
+
+
// Attach the root task
+
if (limine_requests.modules.response) |module_response| {
+
if (module_response.module_count > 0) {
+
const mod = module_response.modules.?[0];
+
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
+
const mod_size = mod.size;
+
log.info("Loading root task with {s} @ {*}", .{ mod.path, mod.address });
+
common.init_data.root_task_elf = mod_addr[0..mod_size];
+
}
+
} else {
+
@branchHint(.unlikely);
+
@panic("No root task found!");
+
}
+
+
bootstrapAPs();
+
}
+
+
pub fn loadTasks() void {
+
const tasks_buf: [*]arch.structures.Task = @ptrFromInt(common.init_data.bootmem.allocMem(std.heap.pageSize()) catch {
+
std.log.err("Couldn't allocate tasks!", .{});
+
@panic("allocPhys");
+
});
+
const tasks_scratch: []arch.structures.Task = tasks_buf[0 .. std.heap.pageSize() / @sizeOf(arch.structures.Task)];
+
+
if (limine_requests.modules.response) |module_response| {
+
if (module_response.module_count > 0) {
+
for (module_response.modules.?[0..module_response.module_count], 0..) |mod, i| {
+
const mod_addr: [*]align(4096) u8 = @ptrCast(mod.address);
+
const mod_size = mod.size;
+
common.loadTask(&tasks_scratch[i], mod_addr[0..mod_size]);
+
}
}
}
}
+32 -2
components/ukernel/arch/amd64/instructions/cpuid.zig
···
const res = cpuid(1, 0);
const feat_ecx: FeaturesEcx = @bitCast(res.ecx);
arch.interrupts.apic.tsc_deadline_available = feat_ecx.tsc_deadline;
}
const FeaturesEcx = packed struct(u32) {
-
_reserved0: u23,
tsc_deadline: bool,
-
_reserved1: u8,
};
pub inline fn cpuid(leaf: u32, sub: u32) DefaultResults {
···
const res = cpuid(1, 0);
const feat_ecx: FeaturesEcx = @bitCast(res.ecx);
arch.interrupts.apic.tsc_deadline_available = feat_ecx.tsc_deadline;
+
arch.interrupts.apic.has_x2apic = feat_ecx.x2apic;
}
const FeaturesEcx = packed struct(u32) {
+
sse3: bool,
+
pclmulqdq: bool,
+
dtes64: bool,
+
monitor: bool,
+
ds_cpl: bool,
+
vmx: bool,
+
smx: bool,
+
est: bool,
+
tm2: bool,
+
ssse3: bool,
+
cnxt_id: bool,
+
sdbg: bool,
+
fma: bool,
+
cx16: bool,
+
xtpr: bool,
+
pdcm: bool,
+
_reserved0: bool,
+
pcid: bool,
+
dca: bool,
+
sse4_1: bool,
+
sse4_2: bool,
+
x2apic: bool,
+
movbe: bool,
+
popcnt: bool,
tsc_deadline: bool,
+
aesni: bool,
+
xsave: bool,
+
osxsave: bool,
+
avx: bool,
+
f16c: bool,
+
rdrand: bool,
+
hypervisor: bool,
};
pub inline fn cpuid(leaf: u32, sub: u32) DefaultResults {
+6 -3
components/ukernel/arch/amd64/instructions/root.zig
···
pub const cpuid = @import("cpuid.zig");
pub inline fn die() noreturn {
-
while (true) {
-
asm volatile ("hlt");
-
}
}
···
pub const cpuid = @import("cpuid.zig");
pub inline fn die() noreturn {
+
asm volatile (
+
\\ mov $0xDEADDEADDEADDEAD, %%rax
+
\\ 1: hlt
+
\\ jmp 1b
+
);
+
unreachable;
}
+60 -12
components/ukernel/arch/amd64/interrupts/apic.zig
···
const std = @import("std");
const arch = @import("../root.zig");
const log = std.log.scoped(.apic);
pub var lapic_timer_khz: usize = 0;
pub var tsc_deadline_available = false;
// tbh every cpu will be either x2apic or not, and if xapic it will
// have the exact same base address anyways so this is fine
···
};
pub const init = struct {
-
// Get the APIC ready (call first)
pub fn initialSetup() void {
singleton.setSpuriousInterruptRegister(.{
.apic_soft_enable = true,
.idt_entry = 0xFF,
···
// .priority_class = 0,
// .priority_sub_class = 0,
// });
-
arch.interrupts.idt.add_handler(.{ .interrupt = 0xFF }, spurious_interrupt_handler, 3, 0);
-
arch.interrupts.idt.add_handler(.{ .interrupt = 48 }, periodic_handler, 3, 0);
}
-
pub fn calibrateTimer() void {
singleton.setDivideConfigurationRegister(.div2);
singleton.setLVTTimerRegister(.{
.idt_entry = 0x69,
···
lapic_timer_khz = norm / 5;
-
log.debug("APIC timer: {} kHz", .{lapic_timer_khz});
}
-
pub fn enableOneshotInterrupt() void {
const mode: LAPIC.LVTTimerRegister.Mode = switch (tsc_deadline_available) {
true => .tsc_deadline,
false => blk: {
···
}
}
-
pub fn spurious_interrupt_handler(_: *arch.interrupts.idt.InterruptFrame(u64)) callconv(.{ .x86_64_sysv = .{} }) void {
log.warn("Got a spurious interrupt!", .{});
}
-
pub fn periodic_handler(stack_trace: *arch.interrupts.idt.InterruptFrame(u64)) callconv(.{ .x86_64_sysv = .{} }) void {
-
log.warn("Got an APIC timer interrupt, incrementing user's rsi...", .{});
-
stack_trace.regs.rsi += 1;
-
singleton.setRegister(.eoi, 0);
-
armTimer(1000);
}
···
const std = @import("std");
const arch = @import("../root.zig");
+
const idt = arch.interrupts.idt;
const log = std.log.scoped(.apic);
+
const common = @import("common");
pub var lapic_timer_khz: usize = 0;
pub var tsc_deadline_available = false;
+
pub var has_x2apic: bool = false;
// tbh every cpu will be either x2apic or not, and if xapic it will
// have the exact same base address anyways so this is fine
···
};
pub const init = struct {
pub fn initialSetup() void {
+
// First, make the APIC accessible
+
initSingleton() catch |err| {
+
log.err("Failed to map APIC! {}", .{err});
+
@panic("initSingleton");
+
};
+
// Set up the interrupt handlers
singleton.setSpuriousInterruptRegister(.{
.apic_soft_enable = true,
.idt_entry = 0xFF,
···
// .priority_class = 0,
// .priority_sub_class = 0,
// });
+
arch.interrupts.idt.add_handler(.{ .interrupt = 0xFF }, u64, spurious_interrupt_handler, 0, 0);
+
arch.interrupts.idt.add_handler(.{ .interrupt = 48 }, u64, timer_handler, 0, 0);
+
+
// Calibrate against the TSC
+
calibrateTimer();
+
// Set up the LVT Timer Register
+
enableOneshotInterrupt();
+
}
+
+
fn initSingleton() !void {
+
arch.interrupts.apic.singleton = switch (has_x2apic) {
+
true => .x2apic,
+
false => blk: {
+
// Map the APIC first!
+
const apic_base = common.mm.physToHHDM([*]volatile u8, 0xFEE0_0000);
+
try common.mm.paging.mapPhys(.{
+
.vaddr = @intFromPtr(apic_base),
+
.paddr = 0xFEE0_0000,
+
.size = 0x1000,
+
.memory_type = .DeviceUncacheable,
+
.perms = .{
+
.x = false,
+
.u = false,
+
.w = true,
+
},
+
});
+
break :blk .{ .xapic = apic_base };
+
},
+
};
}
+
fn calibrateTimer() void {
singleton.setDivideConfigurationRegister(.div2);
singleton.setLVTTimerRegister(.{
.idt_entry = 0x69,
···
lapic_timer_khz = norm / 5;
+
log.debug("timer: {} kHz", .{lapic_timer_khz});
}
+
fn enableOneshotInterrupt() void {
const mode: LAPIC.LVTTimerRegister.Mode = switch (tsc_deadline_available) {
true => .tsc_deadline,
false => blk: {
···
}
}
+
pub fn spurious_interrupt_handler(_: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
log.warn("Got a spurious interrupt!", .{});
}
+
pub fn timer_handler(stack_trace: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
+
defer {
+
singleton.setRegister(.eoi, 0);
+
armTimer(20);
+
}
+
// 1. Get the next task. If there is no next task, just keep scheduling.
+
const task = common.scheduler.getNextTask() orelse return;
+
// 2. Swap the next task state with the current interrupt trace
+
std.mem.swap(arch.interrupts.idt.SavedRegisters, &task.regs, &stack_trace.regs);
+
std.mem.swap(u64, &task.rip, &stack_trace.rip);
+
std.mem.swap(u64, &task.rsp, &stack_trace.rsp);
+
// If task has a new cr3, swap current CR3 and task cr3 too
+
if (task.cr3_val != stack_trace.cr3) {
+
arch.registers.ControlRegisters.Cr3.write(task.cr3_val);
+
task.cr3_val = stack_trace.cr3;
+
}
+
// 3. Now, `task` has our current state, so enqueue it.
+
common.scheduler.pushTask(task);
}
+16 -12
components/ukernel/arch/amd64/interrupts/idt.zig
···
// handler, which pushes more information then calls the user
// defined handler. Use common sense and don't return from an exception
// which shouldn't be returned from.
-
const DefinedHandler = *const fn (*InterruptFrame(u64)) callconv(.{ .x86_64_sysv = .{} }) void;
-
pub export var defined_handlers: [entry_count]DefinedHandler = undefined;
// The actual handlers with addresses in the IDT.
const ActualHandler = *const fn () callconv(.naked) void;
···
const Self = @This();
pub fn parse(self: Self) Target {
return switch (self.interrupt) {
-
true => .{ .interrupt = @enumFromInt(self.idx) },
false => switch (self.type) {
.gdt => .{ .gdt_sel = self.idx },
.ldt => .{ .ldt_sel = self.idx },
···
r13: u64,
r14: u64,
r15: u64,
};
/// The Interrupt frame which we help generate
···
eflags: u64,
rsp: u64,
ss: u16 align(8),
};
}
···
// Set every IDT entry to the corresponding ActualHandler
for (0..entry_count) |i| {
const actual_handler = @intFromPtr(actual_handlers[i]);
-
interrupt_descriptor_table[i] = Entry.init(actual_handler, 3, 0);
}
// Now, set every defined handler to the default one
@memset(&defined_handlers, arch.interrupts.unhandled_interrupt);
// Finally, load the idt
load();
-
-
add_handler(.{ .exception = .breakpoint }, arch.interrupts.breakpoint, 3, 0);
-
add_handler(.{ .exception = .double_fault }, arch.interrupts.double_fault, 3, 0);
-
add_handler(.{ .exception = .general_protection_fault }, arch.interrupts.general_protection_fault, 3, 0);
-
add_handler(.{ .exception = .page_fault }, arch.mm.paging.page_fault_handler, 3, 0);
}
pub fn load() void {
···
idtr.load();
}
-
pub fn add_handler(interrupt: Interrupt, handler: anytype, dpl: u2, ist: u3) void {
// Modify the type, dpl, and ist in place
var tmp = interrupt_descriptor_table[interrupt.interrupt];
tmp.options.dpl = dpl;
tmp.options.ist_index = ist;
interrupt_descriptor_table[interrupt.interrupt] = tmp;
-
// Add the DefinedHandler
-
defined_handlers[interrupt.interrupt] = @ptrCast(&handler);
}
···
// handler, which pushes more information then calls the user
// defined handler. Use common sense and don't return from an exception
// which shouldn't be returned from.
+
pub const CallConv: std.builtin.CallingConvention = .{ .x86_64_sysv = .{} };
+
pub fn InterruptHandler(comptime E: type) type {
+
return *const fn (*InterruptFrame(E)) callconv(CallConv) void;
+
}
+
pub export var defined_handlers: [entry_count]InterruptHandler(u64) = undefined;
// The actual handlers with addresses in the IDT.
const ActualHandler = *const fn () callconv(.naked) void;
···
const Self = @This();
pub fn parse(self: Self) Target {
return switch (self.interrupt) {
+
true => .{ .interrupt = .{ .interrupt = @truncate(self.idx) } },
false => switch (self.type) {
.gdt => .{ .gdt_sel = self.idx },
.ldt => .{ .ldt_sel = self.idx },
···
r13: u64,
r14: u64,
r15: u64,
+
+
pub const default = std.mem.zeroes(SavedRegisters);
};
/// The Interrupt frame which we help generate
···
eflags: u64,
rsp: u64,
ss: u16 align(8),
+
+
pub fn normalize(self: *InterruptFrame(ErrorCode)) *InterruptFrame(u64) {
+
return @ptrCast(self);
+
}
};
}
···
// Set every IDT entry to the corresponding ActualHandler
for (0..entry_count) |i| {
const actual_handler = @intFromPtr(actual_handlers[i]);
+
interrupt_descriptor_table[i] = Entry.init(actual_handler, 0, 0);
}
// Now, set every defined handler to the default one
@memset(&defined_handlers, arch.interrupts.unhandled_interrupt);
// Finally, load the idt
load();
}
pub fn load() void {
···
idtr.load();
}
+
pub fn add_handler(interrupt: Interrupt, comptime E: type, handler: InterruptHandler(E), dpl: u2, ist: u3) void {
// Modify the type, dpl, and ist in place
var tmp = interrupt_descriptor_table[interrupt.interrupt];
tmp.options.dpl = dpl;
tmp.options.ist_index = ist;
interrupt_descriptor_table[interrupt.interrupt] = tmp;
+
// Add the InterruptHandler
+
defined_handlers[interrupt.interrupt] = @ptrCast(handler);
}
+3 -2
components/ukernel/arch/amd64/interrupts/pic.zig
···
/// Remap the 8259 PIC to an interrupt base of 0x32
const arch = @import("../root.zig");
const std = @import("std");
const log = std.log.scoped(.pic);
const out = arch.port.out;
···
wait();
// Set up a spurious IRQ7 handler
-
arch.interrupts.idt.add_handler(.{ .interrupt = 32 + 7 }, spurious_handler, 3, 0);
}
inline fn wait() void {
···
out(u8, PIC_ONE_CMD_PORT, CMD_EOI);
}
-
pub fn spurious_handler(_: *arch.interrupts.idt.InterruptFrame(u64)) callconv(.{ .x86_64_sysv = .{} }) void {
std.log.warn("Got a spurious IRQ7 (8259)", .{});
}
···
/// Remap the 8259 PIC to an interrupt base of 0x32
const arch = @import("../root.zig");
+
const idt = arch.interrupts.idt;
const std = @import("std");
const log = std.log.scoped(.pic);
const out = arch.port.out;
···
wait();
// Set up a spurious IRQ7 handler
+
arch.interrupts.idt.add_handler(.{ .interrupt = 32 + 7 }, u64, spurious_handler, 0, 0);
}
inline fn wait() void {
···
out(u8, PIC_ONE_CMD_PORT, CMD_EOI);
}
+
pub fn spurious_handler(_: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
std.log.warn("Got a spurious IRQ7 (8259)", .{});
}
+149 -6
components/ukernel/arch/amd64/interrupts/root.zig
···
pub const pit = @import("pit.zig");
pub const idt = @import("idt.zig");
const std = @import("std");
const arch = @import("../root.zig");
pub inline fn enable() void {
asm volatile ("sti");
···
asm volatile ("cli");
}
-
pub fn unhandled_interrupt(stack_frame: *idt.InterruptFrame(u64)) callconv(.{ .x86_64_sysv = .{} }) void {
-
std.log.err("Unhandled interrupt (0x{x})!!! rip = 0x{x}", .{ stack_frame.int_num.interrupt, stack_frame.rip });
arch.instructions.die();
}
-
pub fn breakpoint(stack_frame: *idt.InterruptFrame(u64)) callconv(.{ .x86_64_sysv = .{} }) void {
std.log.warn("Breakpoint @ 0x{x}, returning execution...", .{stack_frame.rip});
}
-
pub fn double_fault(stack_frame: *idt.InterruptFrame(u64)) callconv(.{ .x86_64_sysv = .{} }) void {
std.log.err("Double fault @ 0x{x}, dying!!!", .{stack_frame.rip});
arch.instructions.die();
}
-
pub fn general_protection_fault(stack_frame: *idt.InterruptFrame(idt.SelectorErrorCode)) callconv(.{ .x86_64_sysv = .{} }) void {
-
std.log.warn("General Protection Fault @ 0x{x} (Error Code {}), returning execution...", .{ stack_frame.rip, stack_frame.error_code });
arch.instructions.die();
}
···
pub const pit = @import("pit.zig");
pub const idt = @import("idt.zig");
const std = @import("std");
+
const log = std.log.scoped(.interrupts);
const arch = @import("../root.zig");
+
const common = @import("common");
pub inline fn enable() void {
asm volatile ("sti");
···
asm volatile ("cli");
}
+
const syscall_entry = @extern(*anyopaque, .{
+
.name = "syscall_entry",
+
});
+
+
export fn syscall_handler(rdi: usize, rsi: usize) callconv(.c) void {
+
std.log.info("Got a syscall! rdi=0x{x}, rsi=0x{x}", .{ rdi, rsi });
+
}
+
+
pub fn init_syscalls() void {
+
// Set up the STAR MSR with the segment descriptors
+
const IA32_STAR = arch.registers.MSR(u64, 0xC0000081);
+
const star_value: u64 = 0 | @as(u64, arch.structures.gdt.StandardGdt.selectors.kernel_code) << 32 | (@as(u64, arch.structures.gdt.StandardGdt.selectors.tss_desc + 8) | 3) << 48;
+
IA32_STAR.write(star_value);
+
+
// Set up the EFER MSR with SCE (System Call Enable)
+
const IA32_EFER = arch.registers.MSR(u64, 0xC0000080);
+
const efer_val = IA32_EFER.read() | 0b1;
+
IA32_EFER.write(efer_val);
+
+
// Set up LSTAR with the syscall handler and FMASK to clear interrupts
+
const IA32_LSTAR = arch.registers.MSR(u64, 0xC0000082);
+
IA32_LSTAR.write(@intFromPtr(syscall_entry));
+
+
const IA32_FMASK = arch.registers.MSR(u64, 0xC0000084);
+
IA32_FMASK.write(1 << 9);
+
}
+
+
pub fn print_regs(frame: *idt.InterruptFrame(u64)) void {
+
std.log.err("CR3: 0x{x:0>16}", .{frame.cr3});
+
std.log.err("RAX: 0x{x:0>16}, RBX: 0x{x:0>16}, RCX: 0x{x:0>16}, RDX: 0x{x:0>16}", .{ frame.regs.rax, frame.regs.rbx, frame.regs.rcx, frame.regs.rdx });
+
std.log.err("RSI: 0x{x:0>16}, RDI: 0x{x:0>16}, RBP: 0x{x:0>16}, RSP: 0x{x:0>16}", .{ frame.regs.rsi, frame.regs.rdi, frame.regs.rbp, frame.rsp });
+
std.log.err("R8: 0x{x:0>16}, R9: 0x{x:0>16}, R10: 0x{x:0>16}, R11: 0x{x:0>16}", .{ frame.regs.r8, frame.regs.r9, frame.regs.r10, frame.regs.r11 });
+
std.log.err("R12: 0x{x:0>16}, R13: 0x{x:0>16}, R14: 0x{x:0>16}, R15: 0x{x:0>16}", .{ frame.regs.r12, frame.regs.r13, frame.regs.r14, frame.regs.r15 });
+
std.log.err("RFL: 0x{x:0>16}, RIP: 0x{x:0>16}, CS: 0x{x:0>16}, SS: 0x{x:0>16}", .{ frame.eflags, frame.rip, frame.cs, frame.ss });
+
}
+
+
pub fn unhandled_interrupt(frame: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
+
if (std.enums.tagName(idt.Exception, frame.int_num.exception)) |exception_name| {
+
std.log.err("Unhandled interrupt (0x{x} : {s})!!!", .{ frame.int_num.interrupt, exception_name });
+
} else {
+
std.log.err("Unhandled interrupt (0x{x})!!!", .{frame.int_num.interrupt});
+
}
+
+
print_regs(frame);
+
+
arch.interrupts.disable();
arch.instructions.die();
}
+
pub fn breakpoint(stack_frame: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
std.log.warn("Breakpoint @ 0x{x}, returning execution...", .{stack_frame.rip});
}
+
pub fn double_fault(stack_frame: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
std.log.err("Double fault @ 0x{x}, dying!!!", .{stack_frame.rip});
+
print_regs(stack_frame);
+
arch.interrupts.disable();
arch.instructions.die();
}
+
pub fn general_protection_fault(stack_frame: *idt.InterruptFrame(idt.SelectorErrorCode)) callconv(idt.CallConv) void {
+
arch.interrupts.disable();
+
std.log.warn("General Protection Fault @ 0x{x}", .{stack_frame.rip});
+
+
const target = stack_frame.error_code.parse();
+
switch (target) {
+
.interrupt => |int| {
+
if (std.enums.tagName(idt.Exception, int.exception)) |exc_name| {
+
std.log.warn("Caused by interrupt 0x{x} ({s})", .{ int.interrupt, exc_name });
+
} else {
+
std.log.warn("Caused by interrupt 0x{x}", .{int.interrupt});
+
}
+
},
+
.gdt_sel => |gdt_sel| {
+
std.log.warn("GDT selector: 0x{x}", .{gdt_sel});
+
},
+
.ldt_sel => |ldt_sel| {
+
std.log.warn("LDT selector: 0x{x}", .{ldt_sel});
+
},
+
}
+
print_regs(stack_frame.normalize());
arch.instructions.die();
}
+
+
// Start scheduling
+
pub fn startScheduling() noreturn {
+
// 1. Pop off the task to run
+
const task = common.scheduler.getNextTask() orelse {
+
std.log.scoped(.startScheduling).err("No root task!", .{});
+
@panic("startScheduling");
+
};
+
// 2. Apply the paging context
+
task.getPagingContext().apply();
+
// 3. Give a slice of 1000ms and fire away
+
apic.armTimer(20);
+
enter_userspace(task.rip, 0x69, task.rsp);
+
}
+
+
// Set up the IDT, PIC, TSC, and APIC
+
pub fn init() void {
+
// Set up the IDT and associated vectors
+
idt.init();
+
idt.add_handler(.{ .exception = .breakpoint }, u64, arch.interrupts.breakpoint, 3, 0);
+
idt.add_handler(.{ .exception = .double_fault }, u64, arch.interrupts.double_fault, 0, 0);
+
idt.add_handler(.{ .exception = .general_protection_fault }, idt.SelectorErrorCode, arch.interrupts.general_protection_fault, 0, 0);
+
idt.add_handler(.{ .exception = .page_fault }, u64, arch.mm.paging.page_fault_handler, 0, 0);
+
// Set up the 8254's (we need 8259 timer to calibrate tsc)
+
pic.init();
+
// Calibrate the TSC against the 8259
+
arch.tsc.calibrate_pit() catch |err| {
+
log.err("Failed to calibrate TSC: {}", .{err});
+
};
+
// Set up everything needed to arm the timer
+
apic.init.initialSetup();
+
}
+
+
// TODO: make this slightly less shit
+
pub fn enter_userspace(entry: u64, arg: u64, stack: u64) noreturn {
+
log.info("usercode64 GDT 0x{x}, userdata64 GDT 0x{x}", .{ arch.structures.gdt.StandardGdt.selectors.user_code, arch.structures.gdt.StandardGdt.selectors.user_data });
+
const cr3 = arch.registers.ControlRegisters.Cr3.read();
+
arch.registers.ControlRegisters.Cr3.write(cr3);
+
asm volatile (
+
\\ push %[userdata64]
+
\\ push %[stack]
+
\\ push $0x202
+
\\ push %[usercode64]
+
\\ push %[entry]
+
\\
+
\\ mov %[userdata64], %%rax
+
\\ mov %%rax, %%es
+
\\ mov %%rax, %%ds
+
\\
+
\\ xor %%rsi, %%rsi
+
\\ xor %%rax, %%rax
+
\\ xor %%rdx, %%rdx
+
\\ xor %%rcx, %%rcx
+
\\ xor %%rbp, %%rbp
+
\\ xor %%rbx, %%rbx
+
\\
+
\\ xor %%r8, %%r8
+
\\ xor %%r9, %%r9
+
\\ xor %%r10, %%r10
+
\\ xor %%r11, %%r11
+
\\ xor %%r12, %%r12
+
\\ xor %%r13, %%r13
+
\\ xor %%r14, %%r14
+
\\ xor %%r15, %%r15
+
\\
+
\\ iretq
+
\\
+
:
+
: [arg] "{rdi}" (arg),
+
[stack] "r" (stack),
+
[entry] "r" (entry),
+
[userdata64] "i" (arch.structures.gdt.StandardGdt.selectors.user_data),
+
[usercode64] "i" (arch.structures.gdt.StandardGdt.selectors.user_code),
+
);
+
unreachable;
+
}
+24 -8
components/ukernel/arch/amd64/mm/paging.zig
···
level5: bool,
const Self = @This();
-
pub fn apply(self: *Self) void {
// NX Enable
const IA32_EFER = arch.registers.MSR(u64, 0xC0000080);
const efer_val = IA32_EFER.read() | (0b1 << 11);
···
};
}
pub fn can_map_at(_: *const Self, level: u3) bool {
return level < 2;
}
···
// We need the parameter because aarch64 has 2 root page tables
pub fn root_table(self: *Self, _: u64) TableHandle {
return .{
-
.paddr = self.cr3_val,
.level = if (self.level5) 5 else 4,
.context = self,
.perms = .{
···
}
pub const page_sizes = [_]usize{
-
0x1000, // 4K
-
0x200000, // 2M
-
0x40000000, // 1G
-
0x8000000000, // 512G
-
0x1000000000000, // 256T
};
const MappingHandle = struct {
···
ptr: usize,
};
-
pub fn page_fault_handler(stack_frame: *idt.InterruptFrame(u64)) callconv(.{ .x86_64_sysv = .{} }) void {
std.log.err("Page Fault @ 0x{x}, dying...", .{stack_frame.rip});
arch.instructions.die();
}
···
level5: bool,
const Self = @This();
+
pub fn apply(self: *const Self) void {
// NX Enable
const IA32_EFER = arch.registers.MSR(u64, 0xC0000080);
const efer_val = IA32_EFER.read() | (0b1 << 11);
···
};
}
+
pub fn make_user() !Context {
+
// Make a new root page table
+
const user_root_paddr = try make_page_table();
+
const user_root = common.mm.physToHHDM(*PageTable, user_root_paddr);
+
// Copy the entire higher half entries
+
const higher_half = common.init_data.kernel_paging_ctx.root_table(0).get_children();
+
@memcpy(user_root.entries[256..], higher_half[256..]);
+
return .{
+
.cr3_val = user_root_paddr,
+
.level5 = common.init_data.kernel_paging_ctx.level5,
+
};
+
}
+
pub fn can_map_at(_: *const Self, level: u3) bool {
return level < 2;
}
···
// We need the parameter because aarch64 has 2 root page tables
pub fn root_table(self: *Self, _: u64) TableHandle {
return .{
+
// Mask out the cr3 value
+
.paddr = self.cr3_val & 0xFFFFFFFF_FFFFF000,
.level = if (self.level5) 5 else 4,
.context = self,
.perms = .{
···
}
pub const page_sizes = [_]usize{
+
0x1000,
+
0x200000,
+
0x40000000,
+
0x8000000000,
+
0x1000000000000,
};
const MappingHandle = struct {
···
ptr: usize,
};
+
pub fn page_fault_handler(stack_frame: *idt.InterruptFrame(u64)) callconv(idt.CallConv) void {
std.log.err("Page Fault @ 0x{x}, dying...", .{stack_frame.rip});
+
arch.interrupts.print_regs(stack_frame.normalize());
+
std.log.err("Error CR2: 0x{x:0>16}, Error Code: 0x{x:0>16}", .{ arch.registers.ControlRegisters.Cr2.read(), stack_frame.error_code });
arch.instructions.die();
}
+25 -18
components/ukernel/arch/amd64/root.zig
···
const common = @import("common");
const std = @import("std");
-
fn pageSize() usize {
-
return 4 << 10;
-
}
-
-
pub const std_options: std.Options = .{
-
.logFn = common.aux.logFn,
-
.page_size_min = 4 << 10,
-
.page_size_max = 4 << 10,
-
.queryPageSize = pageSize,
};
-
pub const panic = std.debug.FullPanic(common.aux.panic);
pub var per_cpu_init_data: PerCpuInitData = .{};
···
gdt_buf: []StandardGdt = undefined,
tss_buf: []Tss = undefined,
const Self = @This();
pub fn init(self: *Self, cpu_count: u64) void {
-
// 1. Allocate space for GDT and TSS data
const gdt_size = @sizeOf(StandardGdt);
const tss_size = @sizeOf(Tss);
const total_required_size = gdt_size * cpu_count + tss_size * cpu_count;
const buf: [*]u8 = @ptrFromInt(common.init_data.bootmem.allocMem(total_required_size) catch |err| {
std.log.err("init PerCpuInitData: GDT/TSS alloc failed: {}", .{err});
-
@panic("rip bozo");
});
-
// 2. Transmute and fill out the structure
const gdt_buf: [*]StandardGdt = @ptrCast(@alignCast(buf[0 .. gdt_size * cpu_count]));
const tss_buf: [*]Tss = @ptrCast(@alignCast(buf[gdt_size * cpu_count ..][0 .. tss_size * cpu_count]));
self.gdt_buf = gdt_buf[0..cpu_count];
self.tss_buf = tss_buf[0..cpu_count];
}
-
};
-
comptime {
-
// Entry point (_start)
-
@export(&boot.bsp_init, .{ .name = "_start", .linkage = .strong });
-
}
···
const common = @import("common");
const std = @import("std");
+
// needed by std options
+
pub const page_size = struct {
+
pub const min = 4 << 10;
+
pub const max = 4 << 10;
+
pub fn get() usize {
+
return 4 << 10;
+
}
};
pub var per_cpu_init_data: PerCpuInitData = .{};
···
gdt_buf: []StandardGdt = undefined,
tss_buf: []Tss = undefined,
+
// Physical ptr
+
stack_buf: usize = undefined,
+
+
const stack_size = std.heap.page_size_max;
const Self = @This();
pub fn init(self: *Self, cpu_count: u64) void {
+
// 1. Allocate stack space for every core
+
self.stack_buf = common.init_data.bootmem.allocPhys(stack_size * cpu_count) catch |err| {
+
std.log.err("init PerCpuInitData: failed to allocate stack! {}", .{err});
+
@panic("stack_buf");
+
};
+
+
// 2. Allocate space for GDT and TSS data
const gdt_size = @sizeOf(StandardGdt);
const tss_size = @sizeOf(Tss);
const total_required_size = gdt_size * cpu_count + tss_size * cpu_count;
const buf: [*]u8 = @ptrFromInt(common.init_data.bootmem.allocMem(total_required_size) catch |err| {
std.log.err("init PerCpuInitData: GDT/TSS alloc failed: {}", .{err});
+
@panic("gdt_tss_buf");
});
+
// 3. Transmute and fill out the structure
const gdt_buf: [*]StandardGdt = @ptrCast(@alignCast(buf[0 .. gdt_size * cpu_count]));
const tss_buf: [*]Tss = @ptrCast(@alignCast(buf[gdt_size * cpu_count ..][0 .. tss_size * cpu_count]));
self.gdt_buf = gdt_buf[0..cpu_count];
self.tss_buf = tss_buf[0..cpu_count];
}
+
// returns a pointer to the TOP of the stack!
+
pub fn getStackPhys(self: *Self, core_num: usize) usize {
+
return self.stack_buf + (core_num + 1) * stack_size;
+
}
+
};
+24
components/ukernel/arch/amd64/structures/root.zig
···
pub const gdt = @import("gdt.zig");
pub const tss = @import("tss.zig");
···
pub const gdt = @import("gdt.zig");
pub const tss = @import("tss.zig");
+
const arch = @import("../root.zig");
+
const common = @import("common");
+
const Queue = @import("Queue");
+
+
// Uses an intrusive queue
+
pub const Task = struct {
+
// Saved Registers
+
regs: arch.interrupts.idt.SavedRegisters align(8),
+
// Address Space context
+
cr3_val: u64,
+
// Instruction Pointer
+
rip: u64,
+
// Stack Pointer
+
rsp: u64,
+
// Next task basically
+
node: Queue.Node = .{},
+
+
pub fn getPagingContext(self: Task) arch.mm.paging.Context {
+
return .{
+
.cr3_val = self.cr3_val,
+
.level5 = common.init_data.kernel_paging_ctx.level5,
+
};
+
}
+
};
+4
components/ukernel/arch/amd64/tsc.zig
···
const arch = @import("root.zig");
const out = arch.port.out;
const in = arch.port.in;
pub var tsc_khz: usize = 0;
···
if (pollcnt < 1000) return error.PitError;
tsc_khz = (end - start) / 50;
}
/// Delay for a set amount of ms using crappy polling
···
const arch = @import("root.zig");
+
const std = @import("std");
const out = arch.port.out;
const in = arch.port.in;
+
const log = std.log.scoped(.tsc);
pub var tsc_khz: usize = 0;
···
if (pollcnt < 1000) return error.PitError;
tsc_khz = (end - start) / 50;
+
+
log.debug("{} MHz", .{tsc_khz / 1000});
}
/// Delay for a set amount of ms using crappy polling
+14 -19
components/ukernel/build.zig
···
const target = b.resolveTargetQuery(target_query);
const optimize = b.standardOptimizeOption(.{ .preferred_optimize_mode = .ReleaseSafe });
-
const arch_module = b.createModule(.{
-
.root_source_file = b.path(arch_root_path),
.target = target,
.optimize = optimize,
.code_model = code_model,
});
switch (arch) {
.amd64 => {
arch_module.addAssemblyFile(b.path("arch/amd64/asm/traps.S"));
···
else => {},
}
-
const limine_dep = b.dependency("limine", .{
-
.api_revision = 3,
-
});
-
const spinlock_dep = b.dependency("spinlock", .{});
-
const flanterm_dep = b.dependency("flanterm", .{});
-
-
const limine_mod = limine_dep.module("limine");
-
const spinlock_mod = spinlock_dep.module("spinlock");
-
const flanterm_mod = flanterm_dep.module("flanterm");
-
-
const common_mod = b.createModule(.{
-
.root_source_file = b.path("common/root.zig"),
-
});
arch_module.addImport("limine", limine_mod);
-
arch_module.addImport("flanterm", flanterm_mod);
arch_module.addImport("common", common_mod);
common_mod.addImport("arch", arch_module);
common_mod.addImport("spinlock", spinlock_mod);
-
common_mod.addImport("flanterm", flanterm_mod);
-
common_mod.addImport("limine", limine_mod);
const kernel = b.addExecutable(.{
.name = "ukernel",
-
.root_module = arch_module,
// TODO: remove when x86 backend is less broken with removing CPU features
.use_llvm = true,
});
···
const target = b.resolveTargetQuery(target_query);
const optimize = b.standardOptimizeOption(.{ .preferred_optimize_mode = .ReleaseSafe });
+
const common_mod = b.createModule(.{
+
.root_source_file = b.path("common/root.zig"),
.target = target,
.optimize = optimize,
.code_model = code_model,
});
+
const arch_module = b.createModule(.{
+
.root_source_file = b.path(arch_root_path),
+
});
switch (arch) {
.amd64 => {
arch_module.addAssemblyFile(b.path("arch/amd64/asm/traps.S"));
···
else => {},
}
+
const spinlock_mod = b.dependency("spinlock", .{}).module("spinlock");
+
const limine_mod = b.dependency("limine", .{ .api_revision = 3 }).module("limine");
+
const console_mod = b.dependency("console", .{}).module("console");
+
const queue_mod = b.dependency("Queue", .{}).module("Queue");
arch_module.addImport("limine", limine_mod);
+
arch_module.addImport("console", console_mod);
arch_module.addImport("common", common_mod);
+
arch_module.addImport("Queue", queue_mod);
common_mod.addImport("arch", arch_module);
common_mod.addImport("spinlock", spinlock_mod);
+
common_mod.addImport("console", console_mod);
+
common_mod.addImport("Queue", queue_mod);
const kernel = b.addExecutable(.{
.name = "ukernel",
+
.root_module = common_mod,
// TODO: remove when x86 backend is less broken with removing CPU features
.use_llvm = true,
});
+4 -3
components/ukernel/build.zig.zon
···
.limine = .{ .path = "deps/limine-zig" },
.spinlock = .{ .path = "deps/spinlock" },
.build_helpers = .{ .path = "../build_helpers" },
-
.flanterm = .{
-
.url = "git+https://tangled.sh/@sydney.blue/flanterm.zig?ref=trunk#8071c825750c415b9e5502cdff34efc9c6dfeab7",
-
.hash = "flanterm-2.0.0-QnufngHlAQCGtHyca0PrvRvoOldHdJG4DdSz437r9fRr",
},
},
.paths = .{
···
.limine = .{ .path = "deps/limine-zig" },
.spinlock = .{ .path = "deps/spinlock" },
.build_helpers = .{ .path = "../build_helpers" },
+
.console = .{ .path = "deps/console" },
+
.Queue = .{
+
.url = "git+https://tangled.org/@sydney.blue/Queue.zig?ref=dev#6c0760e8a233c1d59554a40a87f0ef293a9697f3",
+
.hash = "Queue-0.0.0-upnEfhEPAADNV4Dvs3DVCRSnOh-BrhgsRR6scaE2qTIa",
},
},
.paths = .{
-108
components/ukernel/common/aux/root.zig
···
-
const console = @import("console");
-
const flanterm = @import("flanterm");
-
const common = @import("../root.zig");
-
const mm = common.mm;
-
const std = @import("std");
-
const arch = @import("arch");
-
const spinlock = @import("spinlock");
-
const limine = @import("limine");
-
-
// Types
-
pub const HardwareDescription = union(enum) {
-
/// Physical address of ACPI RSDP
-
acpi_rsdp: usize,
-
/// Virtual pointer to DTB
-
dtb: *anyopaque,
-
none,
-
};
-
-
pub const InitState = struct {
-
bootmem: mm.bootmem.BootPmm = .{},
-
console: ?flanterm.Context = null,
-
framebuffer: ?common.aux.Framebuffer = null,
-
hardware_description: HardwareDescription = .none,
-
root_task: []align(4096) u8 = undefined,
-
hhdm_slide: usize = 0,
-
};
-
-
pub const Framebuffer = struct {
-
const Self = @This();
-
address: [*]u32,
-
width: u64,
-
height: u64,
-
pitch: u64,
-
bypp: u16,
-
red_mask_size: u8,
-
red_mask_shift: u8,
-
green_mask_size: u8,
-
green_mask_shift: u8,
-
blue_mask_size: u8,
-
blue_mask_shift: u8,
-
-
pub fn from_limine(fb: *const limine.Framebuffer) Self {
-
return .{
-
.address = @ptrCast(@alignCast(fb.address)),
-
.width = fb.width,
-
.height = fb.height,
-
.pitch = fb.pitch,
-
.red_mask_size = fb.red_mask_size,
-
.red_mask_shift = fb.red_mask_shift,
-
.green_mask_size = fb.green_mask_size,
-
.green_mask_shift = fb.green_mask_shift,
-
.blue_mask_size = fb.blue_mask_size,
-
.blue_mask_shift = fb.blue_mask_shift,
-
.bypp = fb.bpp / 8,
-
};
-
}
-
};
-
-
var stdout_lock: spinlock.Spinlock = .{};
-
-
pub fn logFn(
-
comptime message_level: std.log.Level,
-
comptime scope: @TypeOf(.enum_literal),
-
comptime format: []const u8,
-
args: anytype,
-
) void {
-
if (common.init_data.console == null) return;
-
-
// Use the same naming as the default logger
-
const level, const color: flanterm.Colors.Color = switch (message_level) {
-
.debug => .{ "D", .green },
-
.err => .{ "E", .red },
-
.info => .{ "I", .cyan },
-
.warn => .{ "W", .yellow },
-
};
-
// Use same format as default once again
-
const scope_text = switch (scope) {
-
.default => "",
-
else => "<" ++ @tagName(scope) ++ ">",
-
};
-
const prefix = std.fmt.comptimePrint("{s}{s}: ", .{ level, scope_text });
-
-
{
-
const color_default: flanterm.Colors.Color = .default;
-
stdout_lock.lock();
-
defer stdout_lock.unlock();
-
-
var backing_buf = std.mem.zeroes([512]u8);
-
const buf = std.fmt.bufPrint(backing_buf[0..], color.esc_seq() ++ prefix ++ format ++ color_default.esc_seq() ++ "\n", args) catch return;
-
-
common.init_data.console.?.write_slice(buf);
-
// cons.setColor(color, 0);
-
// cons.writer().print(prefix ++ format ++ "\n", args) catch return;
-
}
-
}
-
-
pub fn panic(msg: []const u8, first_trace_addr: ?usize) noreturn {
-
_ = first_trace_addr;
-
const log = std.log.scoped(.panic);
-
log.err("PANIC: {s}", .{msg});
-
var it = std.debug.StackIterator.init(@returnAddress(), @frameAddress());
-
defer it.deinit();
-
while (it.next()) |addr| {
-
if (addr == 0) break;
-
log.err("Addr: 0x{x:0>16}", .{addr});
-
}
-
arch.instructions.die();
-
}
···
+83
components/ukernel/common/aux.zig
···
···
+
const console = @import("console");
+
const common = @import("root.zig");
+
const mm = common.mm;
+
const std = @import("std");
+
const arch = @import("arch");
+
const spinlock = @import("spinlock");
+
+
// Types
+
pub const HardwareDescription = union(enum) {
+
/// Physical address of ACPI RSDP
+
acpi_rsdp: usize,
+
/// Virtual pointer to DTB
+
dtb: *anyopaque,
+
none,
+
};
+
+
pub const InitState = struct {
+
bootmem: mm.bootmem.BootPmm = .{},
+
console: ?console.Console = null,
+
framebuffer: ?console.Framebuffer = null,
+
hardware_description: HardwareDescription = .none,
+
root_task_elf: []align(4096) u8 = undefined,
+
hhdm_slide: usize = 0,
+
kernel_paging_ctx: arch.mm.paging.Context = undefined,
+
};
+
+
pub fn initConsole() void {
+
const fb = common.init_data.framebuffer.?;
+
// Create a canvas for the console to render to
+
const canvas: [*]u8 = @ptrFromInt(common.init_data.bootmem.allocMem(fb.width * fb.height * fb.bypp) catch @panic("Couldn't allocate a canvas"));
+
@memset(canvas[0 .. fb.width * fb.height * fb.bypp], 0);
+
+
// Set the console instance
+
common.init_data.console = console.Console.init(fb, canvas);
+
}
+
+
var stdout_lock: spinlock.Spinlock = .{};
+
+
pub fn logFn(
+
comptime message_level: std.log.Level,
+
comptime scope: @TypeOf(.enum_literal),
+
comptime format: []const u8,
+
args: anytype,
+
) void {
+
if (common.init_data.console == null) return;
+
+
// Use the same naming as the default logger
+
const level, const color: u32 = switch (message_level) {
+
.debug => .{ "D", 0x3bcf1d },
+
.err => .{ "E", 0xff0000 },
+
.info => .{ "I", 0x00bbbb },
+
.warn => .{ "W", 0xfee409 },
+
};
+
// Use same format as default once again
+
const scope_text = switch (scope) {
+
.default => "",
+
else => "<" ++ @tagName(scope) ++ ">",
+
};
+
const prefix = std.fmt.comptimePrint("{s}{s}: ", .{ level, scope_text });
+
+
{
+
stdout_lock.lock();
+
defer stdout_lock.unlock();
+
+
common.init_data.console.?.setColor(color, 0);
+
// No buffering for now
+
var writer = console.Console.Writer.init(&common.init_data.console.?, &.{});
+
writer.interface.print(prefix ++ format ++ "\n", args) catch return;
+
}
+
}
+
+
pub fn panic(msg: []const u8, first_trace_addr: ?usize) noreturn {
+
_ = first_trace_addr;
+
const log = std.log.scoped(.panic);
+
log.err("PANIC: {s}", .{msg});
+
var it = std.debug.StackIterator.init(@returnAddress(), @frameAddress());
+
defer it.deinit();
+
while (it.next()) |addr| {
+
if (addr == 0) break;
+
log.err("Addr: 0x{x:0>16}", .{addr});
+
}
+
arch.instructions.die();
+
}
+4 -5
components/ukernel/common/loader.zig
···
const log = std.log.scoped(.elf_loader);
// Load root task, return the entry point
-
pub fn loadRootTask(context: *arch.mm.paging.Context) !usize {
-
const root_task = common.init_data.root_task;
const hdr = blk: {
-
const hdr: *elf.Elf64_Ehdr = @ptrCast(root_task);
break :blk elf.Header.init(hdr.*, .little);
};
-
var iter = hdr.iterateProgramHeadersBuffer(root_task);
while (try iter.next()) |entry| {
if ((entry.p_type != elf.PT_LOAD) or (entry.p_memsz == 0)) continue;
···
const dst = common.mm.physToHHDM([*]u8, page_backing + vaddr_shift);
const dst_slice = dst[0..entry.p_filesz];
-
const src_slice = root_task[entry.p_offset..][0..entry.p_filesz];
@memcpy(dst_slice, src_slice);
// 3. Add memsz - filesz zeroes
···
const log = std.log.scoped(.elf_loader);
// Load root task, return the entry point
+
pub fn loadElf(context: *arch.mm.paging.Context, task_slice: []align(4096) u8) !usize {
const hdr = blk: {
+
const hdr: *elf.Elf64_Ehdr = @ptrCast(task_slice);
break :blk elf.Header.init(hdr.*, .little);
};
+
var iter = hdr.iterateProgramHeadersBuffer(task_slice);
while (try iter.next()) |entry| {
if ((entry.p_type != elf.PT_LOAD) or (entry.p_memsz == 0)) continue;
···
const dst = common.mm.physToHHDM([*]u8, page_backing + vaddr_shift);
const dst_slice = dst[0..entry.p_filesz];
+
const src_slice = task_slice[entry.p_offset..][0..entry.p_filesz];
@memcpy(dst_slice, src_slice);
// 3. Add memsz - filesz zeroes
-1
components/ukernel/common/mm/bootmem.zig
···
// Finally, initialize the global bootmem
common.init_data.bootmem.initialize(bootmem_struct);
-
common.init_data.bootmem.debugInfo();
}
···
// Finally, initialize the global bootmem
common.init_data.bootmem.initialize(bootmem_struct);
}
+3 -2
components/ukernel/common/mm/paging.zig
···
const arch = @import("arch");
const std = @import("std");
const TableHandle = arch.mm.paging.TableHandle;
const MemoryType = arch.mm.paging.MemoryType;
const Context = arch.mm.paging.Context;
···
size: usize,
perms: Perms,
memory_type: MemoryType,
-
context: *Context,
}) !void {
const root = args.context.root_table(args.vaddr);
var vaddr = args.vaddr;
···
size: usize,
perms: Perms,
memory_type: MemoryType,
-
context: *Context,
}) !void {
const root = args.context.root_table(args.vaddr);
var vaddr = args.vaddr;
···
const arch = @import("arch");
const std = @import("std");
+
const common = @import("../root.zig");
const TableHandle = arch.mm.paging.TableHandle;
const MemoryType = arch.mm.paging.MemoryType;
const Context = arch.mm.paging.Context;
···
size: usize,
perms: Perms,
memory_type: MemoryType,
+
context: *Context = &common.init_data.kernel_paging_ctx,
}) !void {
const root = args.context.root_table(args.vaddr);
var vaddr = args.vaddr;
···
size: usize,
perms: Perms,
memory_type: MemoryType,
+
context: *Context = &common.init_data.kernel_paging_ctx,
}) !void {
const root = args.context.root_table(args.vaddr);
var vaddr = args.vaddr;
+81 -2
components/ukernel/common/root.zig
···
-
pub const aux = @import("aux/root.zig");
pub const mm = @import("mm/root.zig");
-
pub const loadRootTask = loader.loadRootTask;
const loader = @import("loader.zig");
// Arch init must set up appropriate fields!
pub var init_data: aux.InitState = .{};
···
+
pub const aux = @import("aux.zig");
pub const mm = @import("mm/root.zig");
+
pub const scheduler = @import("scheduler.zig");
+
pub const loadElf = loader.loadElf;
+
const arch = @import("arch");
+
const std = @import("std");
const loader = @import("loader.zig");
// Arch init must set up appropriate fields!
pub var init_data: aux.InitState = .{};
+
+
// Generic bsp init
+
pub fn generic_init() callconv(.c) noreturn {
+
const log = std.log.scoped(.generic_init);
+
// First, do early arch init
+
arch.boot.early_init();
+
+
// Now, set up the bootmem and console
+
mm.bootmem.init();
+
+
// Now, do the rest of the arch init
+
arch.boot.bsp_init();
+
+
// Next, set up the console
+
aux.initConsole();
+
+
// Now, set up interrupts
+
arch.interrupts.init();
+
arch.interrupts.init_syscalls();
+
+
log.info("Loading attached tasks...", .{});
+
arch.boot.loadTasks();
+
+
log.info("Dropping to userspace!", .{});
+
+
arch.interrupts.startScheduling();
+
}
+
+
pub fn loadTask(scratch: *arch.structures.Task, task_slice: []align(4096) u8) void {
+
// 1. Create a user address space
+
var user_ctx = arch.mm.paging.Context.make_user() catch |err| {
+
std.log.err("Failed to make user context! {}", .{err});
+
@panic("make_user_ctx");
+
};
+
+
// 2. Allocate a user stack
+
mm.paging.map(.{
+
.vaddr = 0x7ffe_0000_0000,
+
.size = 65536,
+
.memory_type = .MemoryWriteBack,
+
.perms = .{
+
.x = false,
+
.u = true,
+
.w = true,
+
},
+
.context = &user_ctx,
+
}) catch @panic("couldn't map user stack");
+
+
// 3. Map ELF into address space
+
const entry = loadElf(&user_ctx, task_slice) catch |err| {
+
std.log.err("Couldn't load the root task! {}", .{err});
+
@panic("ggz");
+
};
+
// 4. Add task to scheduler
+
scratch.* = .{
+
.cr3_val = user_ctx.cr3_val,
+
.regs = .default,
+
.rip = entry,
+
.rsp = 0x7ffe_0001_0000,
+
};
+
scheduler.pushTask(scratch);
+
}
+
+
// std options etc.
+
pub const panic = std.debug.FullPanic(aux.panic);
+
pub const std_options: std.Options = .{
+
.logFn = aux.logFn,
+
.page_size_min = arch.page_size.min,
+
.page_size_max = arch.page_size.max,
+
.queryPageSize = arch.page_size.get,
+
};
+
+
comptime {
+
// Entry point (_start)
+
@export(&generic_init, .{ .name = "_start", .linkage = .strong });
+
}
+16
components/ukernel/common/scheduler.zig
···
···
+
const std = @import("std");
+
const arch = @import("arch");
+
const Queue = @import("Queue");
+
const Task = arch.structures.Task;
+
+
var task_queue: Queue = .{};
+
+
pub fn pushTask(task: *Task) void {
+
task_queue.enqueue(&task.node);
+
}
+
+
pub fn getNextTask() ?*Task {
+
const node = task_queue.dequeue() orelse return null;
+
const task: *Task = @fieldParentPtr("node", node);
+
return task;
+
}
+6
components/ukernel/deps/console/build.zig
···
···
+
const std = @import("std");
+
pub fn build(b: *std.Build) void {
+
_ = b.addModule("console", .{
+
.root_source_file = b.path("src/root.zig"),
+
});
+
}
+13
components/ukernel/deps/console/build.zig.zon
···
···
+
.{
+
.name = .console,
+
.version = "0.0.0",
+
.fingerprint = 0x3603cfb621692996, // Changing this has security and trust implications.
+
.minimum_zig_version = "0.15.1",
+
.dependencies = .{},
+
.paths = .{
+
"build.zig",
+
"build.zig.zon",
+
"src",
+
"fonts",
+
},
+
}
+24
components/ukernel/deps/console/flake.lock
···
···
+
{
+
"nodes": {
+
"nixpkgs": {
+
"locked": {
+
"lastModified": 315532800,
+
"narHash": "sha256-t4zrLJk1EZWk1lUnvNEVjPBVNBHVzS3A0RsxkRSwwSE=",
+
"rev": "6d7ec06d6868ac6d94c371458fc2391ded9ff13d",
+
"type": "tarball",
+
"url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre861040.6d7ec06d6868/nixexprs.tar.xz?rev=6d7ec06d6868ac6d94c371458fc2391ded9ff13d"
+
},
+
"original": {
+
"type": "tarball",
+
"url": "https://channels.nixos.org/nixpkgs-unstable/nixexprs.tar.xz"
+
}
+
},
+
"root": {
+
"inputs": {
+
"nixpkgs": "nixpkgs"
+
}
+
}
+
},
+
"root": "root",
+
"version": 7
+
}
+23
components/ukernel/deps/console/flake.nix
···
···
+
{
+
inputs = {
+
nixpkgs.url = "https://channels.nixos.org/nixpkgs-unstable/nixexprs.tar.xz";
+
};
+
outputs =
+
{ nixpkgs, ... }@inputs:
+
let
+
inherit (inputs.nixpkgs) lib;
+
forAllSystems =
+
body: lib.genAttrs lib.systems.flakeExposed (system: body nixpkgs.legacyPackages.${system});
+
in
+
{
+
devShells = forAllSystems (pkgs: {
+
default = pkgs.mkShell {
+
packages = with pkgs; [
+
zig_0_15
+
];
+
};
+
});
+
+
formatter = forAllSystems (pkgs: pkgs.nixfmt-rfc-style);
+
};
+
}
+24
components/ukernel/deps/console/src/fonts/LICENSE.spleen
···
···
+
Copyright (c) 2018-2024, Frederic Cambus
+
All rights reserved.
+
+
Redistribution and use in source and binary forms, with or without
+
modification, are permitted provided that the following conditions are met:
+
+
* Redistributions of source code must retain the above copyright
+
notice, this list of conditions and the following disclaimer.
+
+
* Redistributions in binary form must reproduce the above copyright
+
notice, this list of conditions and the following disclaimer in the
+
documentation and/or other materials provided with the distribution.
+
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
+
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+
POSSIBILITY OF SUCH DAMAGE.
components/ukernel/deps/console/src/fonts/spleen-12x24.psf

This is a binary file and will not be displayed.

+39
components/ukernel/deps/console/src/psf2.zig
···
···
+
const std = @import("std");
+
+
pub const Psf2Header = extern struct {
+
magic: u32 = 0x864ab572,
+
version: u32,
+
header_size: u32,
+
flags: u32,
+
numglyph: u32,
+
bytes_per_glyph: u32,
+
height: u32,
+
width: u32,
+
+
pub fn bytesPerLine(self: *const Psf2Header) u32 {
+
return (self.width + 7) / 8;
+
}
+
};
+
+
pub const Font = struct {
+
const Self = @This();
+
fontdata: []const u8 align(4),
+
pub fn new(fontdata: []const u8) Self {
+
return .{
+
.fontdata = fontdata,
+
};
+
}
+
+
pub fn getHdr(self: *const Self) *const Psf2Header {
+
return @ptrCast(@alignCast(self.fontdata));
+
}
+
+
pub fn getGlyph(self: *const Self, ch: u8) ![]const u8 {
+
const hdr = self.getHdr();
+
const startpos: u64 = @as(u64, hdr.header_size) + @as(u64, ch) * @as(u64, hdr.bytes_per_glyph);
+
+
if (self.fontdata.len < startpos + @as(u64, hdr.bytes_per_glyph)) return error.InvalidCharacter;
+
+
return self.fontdata[startpos..][0..hdr.bytes_per_glyph];
+
}
+
};
+290
components/ukernel/deps/console/src/root.zig
···
···
+
const std = @import("std");
+
const builtin = @import("builtin");
+
pub const psf2 = @import("psf2.zig");
+
const are_we_le = builtin.cpu.arch.endian() == .little;
+
+
const fontdata_embed = @embedFile("fonts/spleen-12x24.psf");
+
const fontdata: [fontdata_embed.len]u8 align(@alignOf(u32)) = fontdata_embed.*;
+
+
/// Basic framebuffer container
+
pub const Framebuffer = struct {
+
const Self = @This();
+
address: [*]u32,
+
width: u64,
+
height: u64,
+
pitch: u64,
+
bypp: u16,
+
red_mask_size: u8,
+
red_mask_shift: u8,
+
green_mask_size: u8,
+
green_mask_shift: u8,
+
blue_mask_size: u8,
+
blue_mask_shift: u8,
+
};
+
+
/// Framebuffer based console
+
pub const Console = struct {
+
fb: Framebuffer,
+
canvas: [*]u8,
+
font: psf2.Font,
+
x_pos: usize = 0,
+
y_pos: usize = 0,
+
x_chrs_max: usize,
+
y_chrs_max: usize,
+
fg_color: u32 = 0xFFFFFFFF,
+
bg_color: u32 = 0,
+
+
/// Create an instance given a framebuffer
+
/// Canvas must be exactly fb.width * fb.height * fb.bypp bytes
+
pub fn init(fb: Framebuffer, canvas: [*]u8) Console {
+
const font = psf2.Font.new(&fontdata);
+
return init_with_font(fb, canvas, font);
+
}
+
+
/// Create an instance given a framebuffer and font
+
pub fn init_with_font(fb: Framebuffer, canvas: [*]u8, font: psf2.Font) Console {
+
const font_hdr = font.getHdr();
+
return .{
+
.fb = fb,
+
.font = font,
+
.canvas = canvas,
+
// TODO: implement spacing between chars?
+
.x_chrs_max = fb.width / font_hdr.width,
+
.y_chrs_max = fb.height / font_hdr.height,
+
};
+
}
+
+
/// Write a string to the console
+
pub fn puts(self: *Console, msg: []const u8) usize {
+
var written: usize = 0;
+
+
const start_line, const num_lines = blk: {
+
const start_line = self.y_pos;
+
var scrolled: bool = false;
+
for (msg) |ch| {
+
// TODO: handle characters that failed to print
+
scrolled |= self.putc(ch) catch false;
+
written += 1;
+
}
+
if (scrolled) break :blk .{ 0, self.y_chrs_max };
+
break :blk .{ start_line, self.y_pos - start_line + 1 };
+
};
+
self.renderCanvas(start_line, num_lines);
+
return written;
+
}
+
+
// Copy in the given lines from the canvas to the framebuffer
+
fn renderCanvas(
+
self: *Console,
+
start_line: usize,
+
num_lines: usize,
+
) void {
+
const glyph_height: usize = @intCast(self.font.getHdr().height);
+
// Not necessarily fb pitch!
+
const canvas_pitch = self.fb.width * self.fb.bypp;
+
const byte_fb: [*]u8 = @ptrCast(self.fb.address);
+
+
if (canvas_pitch == self.fb.pitch) {
+
const src_buf = self.canvas[canvas_pitch * glyph_height * start_line ..][0 .. canvas_pitch * glyph_height * num_lines];
+
const dst_buf = byte_fb[self.fb.pitch * glyph_height * start_line ..][0 .. self.fb.pitch * glyph_height * num_lines];
+
@memcpy(dst_buf, src_buf);
+
} else {
+
// Unfortunately we have to copy line by line
+
var i: usize = 0;
+
const canvas_start = canvas_pitch * start_line * glyph_height;
+
const fb_start = self.fb.pitch * start_line * glyph_height;
+
while (i < num_lines * glyph_height) : (i += 1) {
+
const src_line = self.canvas[canvas_start + i * canvas_pitch ..][0..canvas_pitch];
+
const dst_line = byte_fb[fb_start + i * self.fb.pitch ..][0..canvas_pitch];
+
@memcpy(dst_line, src_line);
+
}
+
}
+
}
+
+
/// Write a character to the console, return true if scrolled
+
/// If putchar failed we did not scroll for sure
+
fn putc(self: *Console, ch: u8) !bool {
+
var scrolled: bool = false;
+
// Handle newlines
+
if (ch == '\r') return scrolled;
+
if (ch == '\n') {
+
// Reset to the beginning of the next line
+
self.x_pos = 0;
+
self.y_pos += 1;
+
// If we've overrun, scroll the entire framebuffer up one
+
// and then reset to the last line
+
if (self.y_pos >= self.y_chrs_max) {
+
self.scrollUp();
+
scrolled = true;
+
}
+
return scrolled;
+
}
+
// TODO: color palette and escape codes?
+
try self.putchar(ch, self.x_pos, self.y_pos, self.fg_color, self.bg_color);
+
self.x_pos += 1;
+
+
// If our x is too far, go down a line
+
if (self.x_pos < self.x_chrs_max) return scrolled;
+
self.x_pos = 0;
+
self.y_pos += 1;
+
if (self.y_pos >= self.y_chrs_max) {
+
self.scrollUp();
+
scrolled = true;
+
}
+
return scrolled;
+
}
+
+
fn putchar(self: *const Console, ch: u8, x_pos: usize, y_pos: usize, fg_val: u32, bg_val: u32) !void {
+
const raw_color_choice: [2]u32 = [2]u32{
+
if (are_we_le) bg_val else @byteSwap(bg_val),
+
if (are_we_le) fg_val else @byteSwap(fg_val),
+
};
+
+
const font = self.font;
+
const hdr = font.getHdr();
+
+
const bytes_per_line = hdr.bytesPerLine();
+
const mask_shamt: u5 = @truncate(bytes_per_line * 8 - 1);
+
const mask_initial: u32 = @as(u32, 1) << mask_shamt;
+
const glyph = try font.getGlyph(ch);
+
+
// Offset into framebuffer of the beginning of the character
+
const canvas_pitch: usize = self.fb.width * self.fb.bypp;
+
var offset: usize = (y_pos * @as(usize, hdr.height) * canvas_pitch) + (x_pos * @as(usize, hdr.width) * self.fb.bypp);
+
// run for every line
+
var glyph_y: u32 = 0;
+
var mask: u32 = 0;
+
while (glyph_y < hdr.height) : (glyph_y += 1) {
+
// initialize the mask and current line
+
mask = mask_initial;
+
// TODO: endian
+
const line_value: u32 = std.mem.readVarInt(u32, glyph[glyph_y * bytes_per_line ..][0..bytes_per_line], .big);
+
// offset into the fb of the current line
+
var line_offset: usize = offset;
+
var glyph_x: u32 = 0;
+
while (glyph_x < hdr.width) : (glyph_x += 1) {
+
// Write the fb or bg color
+
const color: [4]u8 = @bitCast(raw_color_choice[@intFromBool(line_value & mask != 0)]);
+
@memcpy(self.canvas[line_offset..][0..self.fb.bypp], color[0..]);
+
// Move right a pixel
+
line_offset += self.fb.bypp;
+
mask >>= 1;
+
}
+
// Move down a line
+
offset += canvas_pitch;
+
}
+
}
+
+
// Set the fg and bg color
+
pub fn setColor(self: *Console, new_fg: u32, new_bg: u32) void {
+
self.fg_color = self.convertColor(new_fg);
+
self.bg_color = self.convertColor(new_bg);
+
}
+
+
// Convert a normal _RGB u32 to the actual framebuffer format
+
fn convertColor(self: *const Console, color: u32) u32 {
+
// The color value also needs to be scaled. For example,
+
// if it's 10 bits per color and we're starting from 8 bits,
+
// Full bright will only be 0xFF/0x3FF or about 25% brightness.
+
// To fix this hypothetical, shift left by (10 - 8). This isn't
+
// perfectly accurate but close enough.
+
const red_left_shift: u5 = @truncate(self.fb.red_mask_size);
+
const green_left_shift: u5 = @truncate(self.fb.red_mask_size);
+
const blue_left_shift: u5 = @truncate(self.fb.red_mask_size);
+
+
// Get our source RGB 888
+
const right_shift = 8;
+
const red_src: u32 = (color >> 16) & 0xFF;
+
const green_src: u32 = (color >> 8) & 0xFF;
+
const blue_src: u32 = color & 0xFF;
+
+
// These shifts are the offsets to place each color.
+
const red_dest_shift: u5 = @truncate(self.fb.red_mask_shift);
+
const green_dest_shift: u5 = @truncate(self.fb.green_mask_shift);
+
const blue_dest_shift: u5 = @truncate(self.fb.blue_mask_shift);
+
+
// Do the calculations
+
const red_dst = switch (std.math.order(red_left_shift, right_shift)) {
+
.gt => red_src << (red_left_shift - right_shift),
+
.lt => red_src >> (right_shift - red_left_shift),
+
.eq => red_src,
+
} << red_dest_shift;
+
+
const green_dst = switch (std.math.order(green_left_shift, right_shift)) {
+
.gt => green_src << (green_left_shift - right_shift),
+
.lt => green_src >> (right_shift - green_left_shift),
+
.eq => green_src,
+
} << green_dest_shift;
+
+
const blue_dst = switch (std.math.order(blue_left_shift, right_shift)) {
+
.gt => blue_src << (blue_left_shift - right_shift),
+
.lt => blue_src >> (right_shift - blue_left_shift),
+
.eq => blue_src,
+
} << blue_dest_shift;
+
+
return red_dst | green_dst | blue_dst;
+
}
+
+
// Make sure to set damage to the entire screen!
+
fn scrollUp(self: *Console) void {
+
const glyph_height: usize = @intCast(self.font.getHdr().height);
+
const canvas_pitch: usize = self.fb.width * self.fb.bypp;
+
+
// Copy 1:n line up to 0:n-1 with memmove
+
const dst_buf = self.canvas[0 .. canvas_pitch * glyph_height * (self.y_chrs_max - 1)];
+
const src_buf = self.canvas[canvas_pitch * glyph_height ..][0 .. canvas_pitch * glyph_height * (self.y_chrs_max - 1)];
+
@memmove(dst_buf, src_buf);
+
+
// Clear last line
+
const last_line = self.canvas[canvas_pitch * glyph_height * (self.y_chrs_max - 1) ..][0 .. canvas_pitch * glyph_height];
+
@memset(last_line, 0);
+
self.x_pos = 0;
+
self.y_pos = self.y_chrs_max - 1;
+
}
+
+
// Get a writer with optional buffering
+
pub fn writer(self: *Console, buffer: []u8) Writer {
+
return Writer.init(self, buffer);
+
}
+
+
// Writer with the new std.Io.Writer interface
+
pub const Writer = struct {
+
console: *Console,
+
interface: std.Io.Writer,
+
+
pub fn init(console: *Console, buffer: []u8) Writer {
+
return .{
+
.console = console,
+
.interface = .{
+
.buffer = buffer,
+
.vtable = &.{ .drain = Writer.drain },
+
},
+
};
+
}
+
+
pub fn drain(w: *std.Io.Writer, data: []const []const u8, splat: usize) !usize {
+
// fieldParentPtr is so cool
+
const self: *Writer = @fieldParentPtr("interface", w);
+
var written: usize = 0;
+
// First, consume the buffer
+
if (w.end < w.buffer.len) {
+
const n = self.console.puts(w.buffer[w.end..]);
+
written += n;
+
w.end += n;
+
}
+
+
// Iterate over all the provided slices
+
for (data, 0..) |slice, i| {
+
// If we are the last slice, splat by the amount
+
if (i == data.len - 1) {
+
for (0..splat) |_| {
+
written += self.console.puts(slice);
+
}
+
break;
+
}
+
written += self.console.puts(slice);
+
}
+
return written;
+
}
+
};
+
};