1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
|
//! # Memory Management Module
//!
//! This module provides memory management primitives for the Strix OS kernel,
//! including page table access and physical frame allocation.
//!
//! ## x86-64 Paging Overview
//!
//! x86-64 uses a 4-level page table hierarchy to translate virtual addresses
//! to physical addresses:
//!
//! ```text
//! Virtual Address (48-bit canonical):
//! ┌─────────┬─────────┬─────────┬─────────┬─────────┬──────────────┐
//! │ Sign │ Level 4 │ Level 3 │ Level 2 │ Level 1 │ Offset │
//! │ Extend │ Index │ Index │ Index │ Index │ (12 bits) │
//! │(16 bits)│ (9 bits)│ (9 bits)│ (9 bits)│ (9 bits)│ │
//! └─────────┴─────────┴─────────┴─────────┴─────────┴──────────────┘
//!
//! Translation:
//! CR3 Register → Level 4 Table → Level 3 Table → Level 2 Table → Level 1 Table → Physical Frame
//! ```
//!
//! ### Page Table Levels
//!
//! | Level | Also Called | Each Entry Maps |
//! |-------|-------------|-----------------|
//! | 4 | PML4 | 512 GiB |
//! | 3 | PDPT | 1 GiB |
//! | 2 | PD | 2 MiB |
//! | 1 | PT | 4 KiB (page) |
//!
//! ### Physical Memory Mapping
//!
//! The bootloader maps all physical memory to virtual addresses starting at a
//! fixed offset. This allows the kernel to access any physical address by adding
//! this offset:
//!
//! ```text
//! Virtual Address = Physical Address + physical_memory_offset
//! ```
//!
//! This mapping is essential because page tables contain *physical* addresses,
//! but the CPU can only access *virtual* addresses.
//!
//! ## Module Components
//!
//! - [`init()`]: Creates an [`OffsetPageTable`] for virtual memory management
//! - [`BootInfoFrameAllocator`]: Allocates physical frames from the memory map
//! - [`EmptyFrameAllocator`]: A no-op allocator for testing
//! - [`address_space`]: Per-process page table management
pub mod address_space;
use core::sync::atomic::{AtomicU64, Ordering};
use bootloader::bootinfo::{MemoryMap, MemoryRegionType};
use x86_64::{
structures::paging::{FrameAllocator, OffsetPageTable, PageTable, PhysFrame, Size4KiB},
PhysAddr, VirtAddr,
};
/// Physical memory offset: virtual address where physical address 0 is mapped.
///
/// Stored here so that [`address_space`] and other submodules can read it
/// without threading the `VirtAddr` through every call. Initialized by
/// [`init()`] and immutable thereafter.
pub static PHYS_MEM_OFFSET: AtomicU64 = AtomicU64::new(0);
/// Returns the physical memory offset as a `u64`.
///
/// # Panics
///
/// Panics (in debug) if called before [`init()`].
#[inline]
pub fn phys_mem_offset() -> u64 {
PHYS_MEM_OFFSET.load(Ordering::Relaxed)
}
/// Initializes the page table interface.
///
/// Creates an [`OffsetPageTable`] that can be used for virtual memory operations
/// like mapping pages and translating addresses.
///
/// ## Physical Memory Offset
///
/// The bootloader maps all of physical memory to a contiguous region in virtual
/// memory. The `physical_memory_offset` is the virtual address where physical
/// address 0 is mapped. For example, if the offset is `0xFFFF_8000_0000_0000`:
///
/// - Physical `0x1000` → Virtual `0xFFFF_8000_0000_1000`
/// - Physical `0x2000` → Virtual `0xFFFF_8000_0000_2000`
///
/// ## How It Works
///
/// 1. Read the CR3 register to get the physical address of the level 4 page table
/// 2. Add the physical memory offset to get the virtual address
/// 3. Create an `OffsetPageTable` that uses this offset for all translations
///
/// # Arguments
///
/// * `physical_memory_offset` - The virtual address where physical memory starts
///
/// # Returns
///
/// An [`OffsetPageTable`] that can translate virtual ↔ physical addresses and
/// modify page table mappings.
///
/// # Safety
///
/// This function is unsafe because:
///
/// 1. The caller must ensure that the complete physical memory is mapped to
/// virtual memory at the passed `physical_memory_offset`.
///
/// 2. This function must only be called once. Calling it multiple times would
/// create multiple `&mut` references to the page tables, which is undefined
/// behavior.
///
/// # Example
///
/// ```ignore
/// let phys_mem_offset = VirtAddr::new(boot_info.physical_memory_offset);
/// let mut mapper = unsafe { memory::init(phys_mem_offset) };
///
/// // Now you can use mapper to translate addresses or create mappings
/// let phys = mapper.translate_addr(VirtAddr::new(0x1000));
/// ```
pub unsafe fn init(physical_memory_offset: VirtAddr) -> OffsetPageTable<'static> {
// Store the offset so that address_space and other submodules can use it.
PHYS_MEM_OFFSET.store(physical_memory_offset.as_u64(), Ordering::Relaxed);
// SAFETY: Caller guarantees that physical memory is mapped at the offset
// and that this function is only called once.
unsafe {
let level_4_table = active_level_4_table(physical_memory_offset);
OffsetPageTable::new(level_4_table, physical_memory_offset)
}
}
/// Returns a mutable reference to the active level 4 page table.
///
/// This function reads the CR3 register to find the physical address of the
/// currently active level 4 page table (PML4), then converts it to a virtual
/// address using the physical memory offset.
///
/// ## CR3 Register
///
/// The CR3 (Control Register 3) contains:
/// - Bits 12-51: Physical address of the level 4 page table (4 KiB aligned)
/// - Bits 3-4: PCID (Process Context Identifier) flags
/// - Bit 63: PCIDE (PCID Enable) flag
///
/// ## Why Level 4?
///
/// x86-64 uses a 4-level page table hierarchy. The level 4 table (PML4) is the
/// root of this hierarchy and is pointed to by CR3. Each level 4 entry covers
/// 512 GiB of virtual address space.
///
/// # Arguments
///
/// * `physical_memory_offset` - The virtual address where physical memory starts
///
/// # Returns
///
/// A mutable reference to the active level 4 page table.
///
/// # Safety
///
/// This function is unsafe because:
///
/// 1. The caller must ensure that physical memory is mapped at the given offset.
///
/// 2. This function must only be called once to avoid creating multiple
/// mutable references to the same page table (undefined behavior).
unsafe fn active_level_4_table(physical_memory_offset: VirtAddr) -> &'static mut PageTable {
use x86_64::registers::control::Cr3;
// Read CR3 to get the physical address of the level 4 page table
let (level_4_table_frame, _flags) = Cr3::read();
// Convert the frame's physical start address to a virtual address
let phys = level_4_table_frame.start_address();
let virt = physical_memory_offset + phys.as_u64();
// Convert to a raw pointer and dereference
let page_table_ptr: *mut PageTable = virt.as_mut_ptr();
// SAFETY: Caller guarantees this is called only once and physical memory
// is correctly mapped.
unsafe { &mut *page_table_ptr }
}
/// A frame allocator that always fails to allocate.
///
/// This allocator is useful for testing scenarios where frame allocation
/// should not occur, or as a placeholder before a real allocator is available.
///
/// ## Use Cases
///
/// - Testing page table code without actual memory allocation
/// - Verifying that code handles allocation failures gracefully
/// - Early boot before the memory map is available
///
/// # Example
///
/// ```ignore
/// let mut allocator = EmptyFrameAllocator;
/// assert!(allocator.allocate_frame().is_none());
/// ```
pub struct EmptyFrameAllocator;
/// Safety: This allocator never returns a frame, so there's no risk of
/// returning an invalid frame.
unsafe impl FrameAllocator<Size4KiB> for EmptyFrameAllocator {
/// Always returns `None`, indicating allocation failure.
fn allocate_frame(&mut self) -> Option<PhysFrame> {
None
}
}
/// A frame allocator that uses the bootloader's memory map.
///
/// This allocator provides physical memory frames (4 KiB each) from regions
/// marked as "Usable" in the memory map. It's a simple bump allocator that
/// never frees frames.
///
/// ## Memory Map Regions
///
/// The bootloader provides a memory map describing physical memory:
///
/// | Region Type | Description |
/// |-------------------|--------------------------------------|
/// | `Usable` | Available for kernel use ✓ |
/// | `InUse` | Used by bootloader/kernel code |
/// | `Reserved` | Reserved by hardware/firmware |
/// | `AcpiReclaimable` | ACPI tables (usable after parsing) |
/// | `AcpiNvs` | ACPI non-volatile storage |
/// | `BadMemory` | Faulty memory regions |
/// | `BootloaderReclaimable` | Usable after bootloader cleanup |
///
/// ## Allocation Strategy
///
/// This is a simple bump allocator:
/// 1. Maintain an index into the usable frames
/// 2. On each allocation, return the frame at `index` and increment
/// 3. Never free frames (no deallocation support)
///
/// ## Limitations
///
/// - **No Deallocation**: Frames cannot be freed. This is acceptable for early
/// boot but will eventually need to be replaced with a proper allocator.
/// - **Linear Scan**: Each allocation iterates through the memory map to find
/// the nth usable frame, which is O(n).
/// - **No Fragmentation Handling**: Does not coalesce or reuse freed memory.
pub struct BootInfoFrameAllocator {
/// The bootloader-provided memory map.
memory_map: &'static MemoryMap,
/// Index of the next frame to allocate.
/// This is incremented after each successful allocation.
next: usize,
}
impl BootInfoFrameAllocator {
/// Creates a new frame allocator from the bootloader's memory map.
///
/// The allocator will return frames from memory regions marked as
/// [`MemoryRegionType::Usable`] in the memory map.
///
/// # Arguments
///
/// * `memory_map` - The memory map from the bootloader's [`BootInfo`]
///
/// # Returns
///
/// A new `BootInfoFrameAllocator` ready to allocate frames.
///
/// # Safety
///
/// The caller must guarantee that:
///
/// 1. The memory map is valid and accurately describes physical memory.
///
/// 2. All frames in `Usable` regions are actually unused. If the kernel
/// or bootloader code resides in a region marked as `Usable`, allocating
/// those frames could overwrite kernel code!
///
/// 3. The allocator is only created once per memory map. Creating multiple
/// allocators from the same map would cause them to return the same
/// frames, leading to memory corruption.
///
/// # Example
///
/// ```ignore
/// let frame_allocator = unsafe {
/// BootInfoFrameAllocator::init(&boot_info.memory_map)
/// };
/// ```
pub unsafe fn init(memory_map: &'static MemoryMap) -> Self {
BootInfoFrameAllocator {
memory_map,
next: 0,
}
}
/// Returns an iterator over all usable physical frames.
///
/// This method filters the memory map for `Usable` regions, extracts
/// their address ranges, and converts them to frame iterators.
///
/// ## Implementation Details
///
/// 1. Filter memory map for `Usable` regions
/// 2. Extract the start and end addresses of each region
/// 3. Step through each region in 4 KiB increments
/// 4. Convert each address to a `PhysFrame`
///
/// # Returns
///
/// An iterator yielding [`PhysFrame`] objects for each 4 KiB frame in
/// usable memory regions.
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
// Get all regions from the memory map
let regions = self.memory_map.iter();
// Filter to only usable regions
let usable_regions = regions.filter(|r| r.region_type == MemoryRegionType::Usable);
// Extract the address range from each region
let addr_ranges = usable_regions.map(|r| r.range.start_addr()..r.range.end_addr());
// Step through each range in 4 KiB (page size) increments
let frame_addresses = addr_ranges.flat_map(|r| r.step_by(4096));
// Convert each address to a PhysFrame
frame_addresses.map(|addr| PhysFrame::containing_address(PhysAddr::new(addr)))
}
}
/// Implementation of the [`FrameAllocator`] trait for [`BootInfoFrameAllocator`].
///
/// This allows the allocator to be used with the x86_64 crate's mapping functions.
unsafe impl FrameAllocator<Size4KiB> for BootInfoFrameAllocator {
/// Allocates a single physical frame.
///
/// Returns the next available frame from the usable memory regions, or
/// `None` if all frames have been allocated.
///
/// # Performance Note
///
/// This implementation uses `Iterator::nth()` to skip to the `next` frame,
/// which requires iterating through the memory map each time. For better
/// performance in production, consider caching the usable frames or using
/// a more sophisticated allocator.
///
/// # Returns
///
/// - `Some(frame)`: A physical frame that is now allocated
/// - `None`: No more frames available
fn allocate_frame(&mut self) -> Option<PhysFrame> {
// Get the nth usable frame (where n = self.next)
let frame = self.usable_frames().nth(self.next);
// Move to the next frame for the next allocation
self.next += 1;
frame
}
}
|