Microkernel thing OS experiment (Zig ⚡)

apic: get ready for scheduling

While getting the TSC rate is easy on Intel, I need to set up a delay
loop for AMD to compare to the PIT frequency. All of this logic will
be in a future commit.

pci.express 495d11bf c9b9b1c6

verified
Changed files
+337 -99
components
+162
components/ukernel/arch/amd64/apic.zig
···
···
+
const std = @import("std");
+
const arch = @import("root.zig");
+
const log = std.log.scoped(.apic);
+
+
// Must instantiate this!
+
pub const LAPIC = union(enum) {
+
xapic: [*]volatile u8,
+
x2apic,
+
+
const Self = @This();
+
+
// ID Register
+
+
pub const IdRegister = packed struct(u32) {
+
_reserved0: u24 = 0,
+
id: u8,
+
};
+
+
pub fn getIdRegister(lapic: Self) IdRegister {
+
return @bitCast(lapic.getRegister(.lapic_id));
+
}
+
+
// Version Register
+
pub const VersionRegister = packed struct(u32) {
+
version: u8,
+
_reserved0: u8 = 0,
+
max_lvt_entry: u8,
+
support_for_eoi_broadcast_suppression: bool,
+
_reserved1: u7 = 0,
+
};
+
+
pub fn getVersionRegister(lapic: Self) VersionRegister {
+
return @bitCast(lapic.getRegister(.version));
+
}
+
+
// Spurious Interrupt
+
pub const SpuriousInterruptRegister = packed struct(u32) {
+
idt_entry: u8,
+
apic_soft_enable: bool,
+
focus_processor_checking: bool = false,
+
_reserved0: u2 = 0,
+
eoi_broadcast_suppression: bool = false,
+
_reserved1: u19 = 0,
+
};
+
+
pub fn getSpuriousInterruptRegister(lapic: Self) VersionRegister {
+
return @bitCast(lapic.getRegister(.spurious_vector));
+
}
+
+
pub fn setSpuriousInterruptRegister(lapic: Self, val: SpuriousInterruptRegister) void {
+
lapic.setRegister(.spurious_vector, @bitCast(val));
+
}
+
+
pub fn getRegister(lapic: Self, reg: Register) u32 {
+
switch (lapic) {
+
.xapic => |base| {
+
const ptr: *align(0x10) volatile u32 = @ptrCast(@alignCast(base + reg.xapic()));
+
return ptr.*;
+
},
+
.x2apic => {
+
return arch.registers.readMSR(u32, reg.x2apic());
+
},
+
}
+
}
+
+
pub fn setRegister(lapic: Self, reg: Register, value: u32) void {
+
switch (lapic) {
+
.xapic => |base| {
+
const ptr: *align(0x10) volatile u32 = @ptrCast(@alignCast(base + reg.xapic()));
+
ptr.* = value;
+
},
+
.x2apic => {
+
arch.registers.writeMSR(u32, reg.x2apic(), value);
+
},
+
}
+
}
+
+
pub const Register = enum(u32) {
+
// From Intel® 64 and IA-32 Architectures Software Developer’s Manual Volume 3
+
lapic_id = 0x2,
+
version = 0x3,
+
task_priority = 0x8,
+
process_priority = 0xa,
+
eoi = 0xb,
+
logical_destination = 0xd,
+
spurious_vector = 0xf,
+
+
in_service_0_31 = 0x10,
+
in_service_63_32 = 0x11,
+
in_service_95_64 = 0x12,
+
in_service_127_96 = 0x13,
+
in_service_159_128 = 0x14,
+
in_service_191_160 = 0x15,
+
in_service_223_192 = 0x16,
+
in_service_255_224 = 0x17,
+
+
trigger_mode_0_31 = 0x18,
+
trigger_mode_63_32 = 0x19,
+
trigger_mode_95_64 = 0x1a,
+
trigger_mode_127_96 = 0x1b,
+
trigger_mode_159_128 = 0x1c,
+
trigger_mode_191_160 = 0x1d,
+
trigger_mode_223_192 = 0x1e,
+
trigger_mode_255_224 = 0x1f,
+
+
interrupt_request_0_31 = 0x20,
+
interrupt_request_63_32 = 0x21,
+
interrupt_request_95_64 = 0x22,
+
interrupt_request_127_96 = 0x23,
+
interrupt_request_159_128 = 0x24,
+
interrupt_request_191_160 = 0x25,
+
interrupt_request_223_192 = 0x26,
+
interrupt_request_255_224 = 0x27,
+
+
error_status = 0x28,
+
lvt_cmi = 0x2f,
+
+
interrupt_command_0_31 = 0x30,
+
interrupt_command_32_63 = 0x31,
+
lvt_timer = 0x32,
+
lvt_thermal_sensor = 0x33,
+
lvt_performance_monitoring = 0x34,
+
lvt_lint0 = 0x35,
+
lvt_lint1 = 0x36,
+
lvt_error = 0x37,
+
initial_count = 0x38,
+
current_count = 0x39,
+
divide_configuration = 0x3e,
+
self_ipi = 0x3f,
+
+
const Self = @This();
+
+
// Get an offset to apply to the xAPIC base
+
pub fn xapic(reg: Register) usize {
+
return @intFromEnum(reg) * 0x10;
+
}
+
+
// Get an MSR number to write to
+
pub fn x2apic(reg: Register) u32 {
+
return 0x800 | @intFromEnum(reg);
+
}
+
};
+
};
+
+
pub const init = struct {
+
// Get the APIC ready (call first)
+
pub fn initialSetup(lapic: LAPIC) void {
+
lapic.setSpuriousInterruptRegister(.{
+
.apic_soft_enable = true,
+
.idt_entry = 0xFF,
+
});
+
var lol = arch.instructions.cpuid.cpuid(0x15, 0x00);
+
lol = arch.instructions.cpuid.cpuid(0x40000010, 0x00);
+
}
+
+
// Calibrate the APIC timer
+
+
};
+
+
pub fn spurious_interrupt_handler(_: *arch.structures.Idt.InterruptStackFrame) callconv(.{ .x86_64_interrupt = .{} }) void {
+
log.warn("Got a spurious interrupt!", .{});
+
}
+31 -47
components/ukernel/arch/amd64/boot.zig
···
const StandardGdt = arch.structures.gdt.StandardGdt;
const Tss = arch.structures.tss.Tss;
-
var per_cpu_init_data: PerCpuInitData = .{};
-
pub const limine_requests = struct {
export var start_marker: limine.RequestsStartMarker linksection(".limine_reqs_start") = .{};
export var end_marker: limine.RequestsEndMarker linksection(".limine_reqs_end") = .{};
···
}
// Initialize per-cpu data (GDT and TSS)
-
per_cpu_init_data.init();
// Install the IDT
initIdt();
···
bootstrapAPs();
// Set up our own GDT and TSS
-
const gdt = &per_cpu_init_data.gdt_buf[0];
gdt.* = .{};
-
const tss = &per_cpu_init_data.tss_buf[0];
// TSS rsp 0x3800
tss.* = .{
.rsp0 = 0x3800,
···
gdt.tss_desc.set_tss_addr(tss);
gdt.load();
log.info("BSP successfully setup GDT+TSS!", .{});
log.info("Allocating code for userspace...", .{});
···
}
pub fn initIdt() void {
-
const idt_addr: usize = @intFromPtr(per_cpu_init_data.idt);
// Install the known exception handlers
-
per_cpu_init_data.idt.breakpoint.installHandler(breakpoint_handler);
-
per_cpu_init_data.idt.double_fault.installHandler(double_fault);
-
per_cpu_init_data.idt.general_protection_fault.installHandler(gpf);
-
per_cpu_init_data.idt.page_fault.installHandler(page_fault);
// Load the Idt Register
const reg: Idt.Idtr = .{ .addr = idt_addr, .limit = @sizeOf(Idt) - 1 };
···
// TODO: update the type reflection thing to make a custom
// function type for the ISR
-
pub const PageFaultErrorCode = packed struct {
present: bool,
write: bool,
user: bool,
···
fn ap_init(mp_info: *limine.SmpMpFeature.MpInfo) callconv(.c) noreturn {
// Set up the IDT
-
const idt_addr: usize = @intFromPtr(per_cpu_init_data.idt);
const reg: Idt.Idtr = .{ .addr = idt_addr, .limit = @sizeOf(Idt) - 1 };
reg.load();
// Set up our GDT and TSS
-
const gdt = &per_cpu_init_data.gdt_buf[mp_info.processor_id];
gdt.* = .{};
-
const tss = &per_cpu_init_data.tss_buf[mp_info.processor_id];
tss.* = .{};
gdt.tss_desc.set_tss_addr(tss);
···
arch.instructions.die();
}
-
-
const PerCpuInitData = struct {
-
gdt_buf: []StandardGdt = undefined,
-
tss_buf: []Tss = undefined,
-
idt: *Idt = undefined,
-
-
const Self = @This();
-
pub fn init(self: *Self) void {
-
// 1. Allocate an IDT
-
const idt_addr = common.init_data.bootmem.allocMem(@sizeOf(Idt)) catch |err| {
-
std.log.err("init PerCpuInitData: IDT alloc failed: {}", .{err});
-
@panic("rip bozo");
-
};
-
self.idt = @ptrFromInt(idt_addr);
-
-
// 2. Allocate space for GDT and TSS data
-
const cpu_count = limine_requests.mp.response.?.cpu_count;
-
const gdt_size = @sizeOf(StandardGdt);
-
const tss_size = @sizeOf(Tss);
-
-
const total_required_size = gdt_size * cpu_count + tss_size * cpu_count;
-
const buf: [*]u8 = @ptrFromInt(common.init_data.bootmem.allocMem(total_required_size) catch |err| {
-
std.log.err("init PerCpuInitData: GDT/TSS alloc failed: {}", .{err});
-
@panic("rip bozo");
-
});
-
-
// 3. Transmute and fill out the structure
-
const gdt_buf: [*]StandardGdt = @ptrCast(@alignCast(buf[0 .. gdt_size * cpu_count]));
-
const tss_buf: [*]Tss = @ptrCast(@alignCast(buf[gdt_size * cpu_count ..][0 .. tss_size * cpu_count]));
-
self.gdt_buf = gdt_buf[0..cpu_count];
-
self.tss_buf = tss_buf[0..cpu_count];
-
}
-
};
···
const StandardGdt = arch.structures.gdt.StandardGdt;
const Tss = arch.structures.tss.Tss;
pub const limine_requests = struct {
export var start_marker: limine.RequestsStartMarker linksection(".limine_reqs_start") = .{};
export var end_marker: limine.RequestsEndMarker linksection(".limine_reqs_end") = .{};
···
}
// Initialize per-cpu data (GDT and TSS)
+
arch.per_cpu_init_data.init(limine_requests.mp.response.?.cpu_count);
// Install the IDT
initIdt();
···
bootstrapAPs();
// Set up our own GDT and TSS
+
const gdt = &arch.per_cpu_init_data.gdt_buf[0];
gdt.* = .{};
+
const tss = &arch.per_cpu_init_data.tss_buf[0];
// TSS rsp 0x3800
tss.* = .{
.rsp0 = 0x3800,
···
gdt.tss_desc.set_tss_addr(tss);
gdt.load();
log.info("BSP successfully setup GDT+TSS!", .{});
+
+
log.info("Setting up scheduling...", .{});
+
// Initialize the APIC
+
// Map the APIC first!
+
const apic_base = common.mm.physToHHDM([*]volatile u8, 0xFEE0_0000);
+
common.mm.paging.mapPhys(.{
+
.vaddr = @intFromPtr(apic_base),
+
.paddr = 0xFEE0_0000,
+
.size = 0x1000,
+
.memory_type = .DeviceUncacheable,
+
.perms = .{
+
.executable = false,
+
.userspace_accessible = false,
+
.writable = true,
+
},
+
}) catch @panic("apic bruh");
+
const apic: arch.apic.LAPIC = .{ .xapic = apic_base };
+
arch.apic.init.initialSetup(apic);
log.info("Allocating code for userspace...", .{});
···
}
pub fn initIdt() void {
+
const idt_addr: usize = @intFromPtr(arch.per_cpu_init_data.idt);
// Install the known exception handlers
+
arch.per_cpu_init_data.idt.breakpoint.installHandler(breakpoint_handler);
+
arch.per_cpu_init_data.idt.double_fault.installHandler(double_fault);
+
arch.per_cpu_init_data.idt.general_protection_fault.installHandler(gpf);
+
arch.per_cpu_init_data.idt.page_fault.installHandler(page_fault);
+
arch.per_cpu_init_data.idt.interrupts[0xFF - 32].installHandler(arch.apic.spurious_interrupt_handler);
// Load the Idt Register
const reg: Idt.Idtr = .{ .addr = idt_addr, .limit = @sizeOf(Idt) - 1 };
···
// TODO: update the type reflection thing to make a custom
// function type for the ISR
+
pub const PageFaultErrorCode = packed struct(u64) {
present: bool,
write: bool,
user: bool,
···
fn ap_init(mp_info: *limine.SmpMpFeature.MpInfo) callconv(.c) noreturn {
// Set up the IDT
+
const idt_addr: usize = @intFromPtr(arch.per_cpu_init_data.idt);
const reg: Idt.Idtr = .{ .addr = idt_addr, .limit = @sizeOf(Idt) - 1 };
reg.load();
// Set up our GDT and TSS
+
const gdt = &arch.per_cpu_init_data.gdt_buf[mp_info.processor_id];
gdt.* = .{};
+
const tss = &arch.per_cpu_init_data.tss_buf[mp_info.processor_id];
tss.* = .{};
gdt.tss_desc.set_tss_addr(tss);
···
arch.instructions.die();
}
-5
components/ukernel/arch/amd64/instructions.zig
···
-
pub inline fn die() noreturn {
-
while (true) {
-
asm volatile ("hlt");
-
}
-
}
···
+30
components/ukernel/arch/amd64/instructions/cpuid.zig
···
···
+
pub inline fn cpuid(leaf: u32, sub: u32) DefaultResults {
+
var eax: u32 = undefined;
+
var ebx: u32 = undefined;
+
var edx: u32 = undefined;
+
var ecx: u32 = undefined;
+
+
asm volatile (
+
\\cpuid
+
: [eax] "={eax}" (eax),
+
[ebx] "={ebx}" (ebx),
+
[edx] "={edx}" (edx),
+
[ecx] "={ecx}" (ecx),
+
: [leaf] "{eax}" (leaf),
+
[sub] "{ecx}" (sub),
+
);
+
+
return .{
+
.eax = eax,
+
.ebx = ebx,
+
.ecx = ecx,
+
.edx = edx,
+
};
+
}
+
+
pub const DefaultResults = struct {
+
eax: u32 = 0,
+
ebx: u32 = 0,
+
edx: u32 = 0,
+
ecx: u32 = 0,
+
};
+19
components/ukernel/arch/amd64/instructions/root.zig
···
···
+
pub const cpuid = @import("cpuid.zig");
+
+
pub inline fn die() noreturn {
+
while (true) {
+
asm volatile ("hlt");
+
}
+
}
+
+
pub inline fn rdtsc() u64 {
+
var low: u32 = undefined;
+
var high: u32 = undefined;
+
+
asm volatile ("rdtsc"
+
: [low] "={eax}" (low),
+
[high] "={eax}" (high),
+
);
+
+
return (@as(u64, high) << 32) | @as(u64, low);
+
}
+1 -1
components/ukernel/arch/amd64/mm/paging.zig
···
pub const PageTable = extern struct {
entries: [512]Entry,
-
pub const Entry = packed struct {
present: bool,
writable: bool,
user_accessible: bool,
···
pub const PageTable = extern struct {
entries: [512]Entry,
+
pub const Entry = packed struct(u64) {
present: bool,
writable: bool,
user_accessible: bool,
+46 -38
components/ukernel/arch/amd64/registers.zig
···
};
}
pub fn MSR(comptime T: type, comptime num: u32) type {
return struct {
pub fn read() T {
-
// TODO: switch on bit size to allow custom structs
-
switch (T) {
-
u32 => return asm volatile ("rdmsr"
-
: [_] "={eax}" (-> u32),
-
: [_] "{ecx}" (num),
-
),
-
u64 => {
-
var low_val: u32 = undefined;
-
var high_val: u32 = undefined;
-
asm volatile ("rdmsr"
-
: [_] "={eax}" (low_val),
-
[_] "={edx}" (high_val),
-
: [_] "{ecx}" (num),
-
);
-
return (@as(u64, high_val) << 32) | @as(u64, low_val);
-
},
-
else => @compileError("Unimplemented for type"),
-
}
}
-
pub fn write(value: T) void {
-
switch (T) {
-
u32 => asm volatile ("wrmsr"
-
:
-
: [_] "{eax}" (value),
-
[_] "{edx}" (@as(u32, 0)),
-
[_] "{ecx}" (num),
-
),
-
u64 => {
-
const low_val: u32 = @truncate(value);
-
const high_val: u32 = @truncate(value >> 32);
-
asm volatile ("wrmsr"
-
:
-
: [_] "{eax}" (low_val),
-
[_] "{edx}" (high_val),
-
[_] "{ecx}" (num),
-
);
-
},
-
else => @compileError("Unimplemented for type"),
-
}
}
};
}
···
};
}
+
pub inline fn readMSR(comptime T: type, num: u32) T {
+
return switch (@bitSizeOf(T)) {
+
32 => @bitCast(asm volatile ("rdmsr"
+
: [_] "={eax}" (-> u32),
+
: [_] "{ecx}" (num),
+
)),
+
64 => blk: {
+
var low_val: u32 = undefined;
+
var high_val: u32 = undefined;
+
asm volatile ("rdmsr"
+
: [_] "={eax}" (low_val),
+
[_] "={edx}" (high_val),
+
: [_] "{ecx}" (num),
+
);
+
const combined_val = (@as(u64, high_val) << 32) | @as(u64, low_val);
+
break :blk @as(T, combined_val);
+
},
+
else => @compileError("Unimplemented for type of this size"),
+
};
+
}
+
+
pub inline fn writeMSR(comptime T: type, num: u32, val: T) void {
+
switch (@bitSizeOf(T)) {
+
32 => asm volatile ("wrmsr"
+
:
+
: [_] "{eax}" (val),
+
[_] "{edx}" (@as(u32, 0)),
+
[_] "{ecx}" (num),
+
),
+
64 => {
+
const low_val: u32 = @truncate(@as(u64, val));
+
const high_val: u32 = @truncate(@as(u64, val >> 32));
+
asm volatile ("wrmsr"
+
:
+
: [_] "{eax}" (low_val),
+
[_] "{edx}" (high_val),
+
[_] "{ecx}" (num),
+
);
+
},
+
else => @compileError("Unimplemented for type of this size"),
+
}
+
}
+
pub fn MSR(comptime T: type, comptime num: u32) type {
return struct {
pub fn read() T {
+
return readMSR(T, num);
}
+
pub fn write(val: T) void {
+
writeMSR(T, num, val);
}
};
}
+41 -1
components/ukernel/arch/amd64/root.zig
···
pub const boot = @import("boot.zig");
-
pub const instructions = @import("instructions.zig");
pub const mm = @import("mm/root.zig");
pub const structures = @import("structures/root.zig");
pub const registers = @import("registers.zig");
const common = @import("common");
···
.queryPageSize = pageSize,
};
pub const panic = std.debug.FullPanic(common.aux.panic);
comptime {
// Entry point (_start)
···
+
pub const apic = @import("apic.zig");
pub const boot = @import("boot.zig");
+
pub const instructions = @import("instructions/root.zig");
pub const mm = @import("mm/root.zig");
+
pub const port = @import("port.zig");
pub const structures = @import("structures/root.zig");
pub const registers = @import("registers.zig");
const common = @import("common");
···
.queryPageSize = pageSize,
};
pub const panic = std.debug.FullPanic(common.aux.panic);
+
+
pub var per_cpu_init_data: PerCpuInitData = .{};
+
+
const PerCpuInitData = struct {
+
const StandardGdt = structures.gdt.StandardGdt;
+
const Tss = structures.tss.Tss;
+
const Idt = structures.Idt;
+
+
gdt_buf: []StandardGdt = undefined,
+
tss_buf: []Tss = undefined,
+
idt: *Idt = undefined,
+
+
const Self = @This();
+
pub fn init(self: *Self, cpu_count: u64) void {
+
// 1. Allocate an IDT
+
const idt_addr = common.init_data.bootmem.allocMem(@sizeOf(Idt)) catch |err| {
+
std.log.err("init PerCpuInitData: IDT alloc failed: {}", .{err});
+
@panic("rip bozo");
+
};
+
self.idt = @ptrFromInt(idt_addr);
+
+
// 2. Allocate space for GDT and TSS data
+
const gdt_size = @sizeOf(StandardGdt);
+
const tss_size = @sizeOf(Tss);
+
+
const total_required_size = gdt_size * cpu_count + tss_size * cpu_count;
+
const buf: [*]u8 = @ptrFromInt(common.init_data.bootmem.allocMem(total_required_size) catch |err| {
+
std.log.err("init PerCpuInitData: GDT/TSS alloc failed: {}", .{err});
+
@panic("rip bozo");
+
});
+
+
// 3. Transmute and fill out the structure
+
const gdt_buf: [*]StandardGdt = @ptrCast(@alignCast(buf[0 .. gdt_size * cpu_count]));
+
const tss_buf: [*]Tss = @ptrCast(@alignCast(buf[gdt_size * cpu_count ..][0 .. tss_size * cpu_count]));
+
self.gdt_buf = gdt_buf[0..cpu_count];
+
self.tss_buf = tss_buf[0..cpu_count];
+
}
+
};
comptime {
// Entry point (_start)
+2 -2
components/ukernel/arch/amd64/structures/Idt.zig
···
const FuncType = @Type(FunctionTypeInfo);
-
pub const Options = packed struct {
/// Interrupt Stack Table Index
ist_index: u3,
_reserved: u5 = 0,
···
}
/// IDT Register
-
pub const Idtr = packed struct {
limit: u16,
addr: u64,
···
const FuncType = @Type(FunctionTypeInfo);
+
pub const Options = packed struct(u16) {
/// Interrupt Stack Table Index
ist_index: u3,
_reserved: u5 = 0,
···
}
/// IDT Register
+
pub const Idtr = packed struct(u80) {
limit: u16,
addr: u64,
+5 -5
components/ukernel/arch/amd64/structures/gdt.zig
···
const std = @import("std");
const arch = @import("../root.zig");
-
pub const Descriptor = packed struct {
limit_low: u16 = 0,
base_low: u16 = 0,
base_mid: u8 = 0,
···
const Self = @This();
-
pub const Access = packed struct {
// Accessed
accessed: bool = true,
// Readable/Writable
···
};
};
-
pub const Flags = packed struct {
// Reserved
_reserved: u1 = 0,
// Long Mode code flag
···
}
};
-
pub const Gdtr = packed struct {
limit: u16,
base: u64,
···
};
const TssDescriptor = extern struct {
-
const Low = packed struct {
limit_low: u16 = 0,
base_low: u16 = 0,
base_mid: u8 = 0,
···
const std = @import("std");
const arch = @import("../root.zig");
+
pub const Descriptor = packed struct(u64) {
limit_low: u16 = 0,
base_low: u16 = 0,
base_mid: u8 = 0,
···
const Self = @This();
+
pub const Access = packed struct(u8) {
// Accessed
accessed: bool = true,
// Readable/Writable
···
};
};
+
pub const Flags = packed struct(u4) {
// Reserved
_reserved: u1 = 0,
// Long Mode code flag
···
}
};
+
pub const Gdtr = packed struct(u80) {
limit: u16,
base: u64,
···
};
const TssDescriptor = extern struct {
+
const Low = packed struct(u64) {
limit_low: u16 = 0,
base_low: u16 = 0,
base_mid: u8 = 0,