diff --git a/kernel/memory/Memory.h b/kernel/memory/Memory.h index 829a671c..6cd1c03c 100644 --- a/kernel/memory/Memory.h +++ b/kernel/memory/Memory.h @@ -5,6 +5,7 @@ #include "../kstd/kstddef.h" #include "../api/page_size.h" +#include "../kstd/kstdlib.h" #define PAGING_4KiB 0 #define PAGING_4MiB 1 @@ -41,6 +42,22 @@ struct VirtualRange { size_t size; [[nodiscard]] VirtualAddress end() const { return start + size; } [[nodiscard]] bool contains(VirtualAddress address) const { return address >= start && address < end(); } + [[nodiscard]] bool overlaps(VirtualRange range) const { return max(start, range.start) < min(end(), range.end()); } +}; + +/** A struct to be used for the keys in memory maps. **/ +struct ComparableVirtualRange: public VirtualRange { + ComparableVirtualRange(VirtualAddress start, size_t size): VirtualRange(start, size) {} + ComparableVirtualRange(VirtualAddress start): VirtualRange(start, 1) {} + ComparableVirtualRange(VirtualRange range): VirtualRange(range.start, range.size) {} + + bool operator<(ComparableVirtualRange other) const { + return start < other.start; + } + + bool operator==(ComparableVirtualRange other) const { + return overlaps(other); + } }; struct PageFault { diff --git a/kernel/memory/VMSpace.cpp b/kernel/memory/VMSpace.cpp index 711f894b..10d2bb2a 100644 --- a/kernel/memory/VMSpace.cpp +++ b/kernel/memory/VMSpace.cpp @@ -17,71 +17,52 @@ const VMProt VMSpace::default_prot = { VMSpace::VMSpace(VirtualAddress start, size_t size, PageDirectory& page_directory): m_start(start), m_size(size), - m_region_map(new VMSpaceRegion {.start = start, .size = size, .used = false, .next = nullptr, .prev = nullptr}), m_page_directory(page_directory) {} -VMSpace::~VMSpace() { - auto cur_region = m_region_map; - while(cur_region) { - auto next = cur_region->next; - delete cur_region; - cur_region = next; - } -} +VMSpace::~VMSpace() = default; kstd::Arc VMSpace::fork(PageDirectory& page_directory, kstd::vector>& regions_vec) { LOCK(m_lock); auto new_space = kstd::Arc(new VMSpace(m_start, m_size, page_directory)); new_space->m_used = m_used; - delete new_space->m_region_map; // Clone regions - auto cur_region = m_region_map; - VMSpaceRegion* prev_new_region = nullptr; - while(cur_region) { - // Clone the VMSpaceRegion - auto new_region = new VMSpaceRegion(*cur_region); - if(cur_region == m_region_map) - new_space->m_region_map = new_region; - new_region->prev = prev_new_region; - if(prev_new_region) - prev_new_region->next = new_region; - prev_new_region = new_region; - - // Clone the vmRegion - if(cur_region->vmRegion) { - auto region = cur_region->vmRegion; - kstd::Arc new_object = region->object(); - // Mark as CoW / share if necessary - switch(region->object()->fork_action()) { - case VMObject::ForkAction::BecomeCoW: { - auto new_object_res = region->object()->clone(); - m_page_directory.map(*region); - if(new_object_res.is_error()) { - KLog::err("VMSpace", "Could not clone a VMObject: %d!", new_object_res.code()); - break; - } - new_object = new_object_res.value(); - [[fallthrough]]; - } - case VMObject::ForkAction::Share: { - auto new_vmRegion = kstd::Arc::make( - new_object, - new_space, - region->range(), region->object_start(), - region->prot()); - page_directory.map(*new_vmRegion); - new_region->vmRegion = new_vmRegion.get(); - regions_vec.push_back(new_vmRegion); + for(auto& region_pair : m_region_map) { + auto region = region_pair.second; + if(!region) { + // Reserved space + ASSERT(new_space->m_region_map.insert({region_pair.first, nullptr})); + } + + kstd::Arc new_object = region->object(); + // Mark as CoW / share if necessary + switch(region->object()->fork_action()) { + case VMObject::ForkAction::BecomeCoW: { + auto new_object_res = region->object()->clone(); + m_page_directory.map(*region); + if(new_object_res.is_error()) { + KLog::err("VMSpace", "Could not clone a VMObject: %d!", new_object_res.code()); break; } - case VMObject::ForkAction::Ignore: - break; + new_object = new_object_res.value(); + [[fallthrough]]; + } + case VMObject::ForkAction::Share: { + auto new_vmRegion = kstd::Arc::make( + new_object, + new_space, + region->range(), + region->object_start(), + region->prot()); + ASSERT(new_space->m_region_map.insert({region_pair.first, new_vmRegion.get()})); + page_directory.map(*new_vmRegion); + regions_vec.push_back(new_vmRegion); + break; } + case VMObject::ForkAction::Ignore: + break; } - - cur_region = cur_region->next; } return new_space; @@ -96,399 +77,216 @@ ResultRet> VMSpace::map_object(kstd::Arc object, V if(range.start % PAGE_SIZE != 0 || range.size % PAGE_SIZE != 0 || object_start % PAGE_SIZE != 0 || object_start + range.size > object->size()) return Result(EINVAL); - // Allocate the space region appropriately - VMSpaceRegion* region; - if(range.start) - region = TRY(alloc_space_at(range.size, range.start)); - else - region = TRY(alloc_space(range.size)); + LOCK(m_lock); + + // Find the appropriate range to use + if(range.start) { + // Make sure there's no region occupying that space already + if(m_region_map.find_node(range)) + return Result(ENOSPC); + } else { + // Map in the first available space + range.start = TRY(find_free_space(range.size)); + } // Create and map the region auto vmRegion = kstd::make_shared( object, self(), - VirtualRange {region->start, object->size()}, + range, object_start, prot); - region->vmRegion = vmRegion.get(); + auto new_node = m_region_map.insert({range, vmRegion.get()}); + ASSERT(new_node); m_page_directory.map(*vmRegion); + m_used += range.size; return vmRegion; } ResultRet> VMSpace::map_stack(kstd::Arc object, VMProt prot) { LOCK(m_lock); - // Find the endmost region with space in it - auto cur_region = m_region_map; - while(cur_region->next) - cur_region = cur_region->next; - while((cur_region->used || cur_region->size < object->size()) && cur_region) - cur_region = cur_region->prev; - if(!cur_region) - return Result(ENOMEM); - return map_object(object, prot, {cur_region->end() - object->size(), object->size()}); + // If there's room at the very end, put it there + if(!m_region_map.find_node(end() - object->size())) + return map_object(object, prot, {end() - object->size(), object->size()}); + + // Try mapping right before every object starting from the end until we find space + auto iter = m_region_map.end() - 1; + while(iter != m_region_map.end()) { + auto map_res = map_object(object, prot, {iter->first.start - object->size(), object->size()}); + if(!map_res.is_error()) + return map_res.value(); + iter--; + } + + // No room :( + return Result(ENOSPC); } Result VMSpace::unmap_region(VMRegion& region) { - m_lock.acquire(); - VMSpaceRegion* cur_region = m_region_map; - while(cur_region) { - if(cur_region->vmRegion == ®ion) { - if(cur_region->vmRegion) { - cur_region->vmRegion->m_space.reset(); - m_page_directory.unmap(*cur_region->vmRegion); - m_lock.release(); - auto free_res = free_region(cur_region); - ASSERT(!free_res.is_error()); - return free_res; - } - m_lock.release(); - return Result(ENOENT); - } - cur_region = cur_region->next; - } - m_lock.release(); - return Result(ENOENT); + return unmap_region(region.start()); } Result VMSpace::unmap_region(VirtualAddress address) { - m_lock.acquire(); - VMSpaceRegion* cur_region = m_region_map; - while(cur_region) { - if(cur_region->start == address) { - if(cur_region->vmRegion) { - cur_region->vmRegion->m_space.reset(); - m_page_directory.unmap(*cur_region->vmRegion); - m_lock.release(); - auto free_res = free_region(cur_region); - ASSERT(!free_res.is_error()); - return free_res; - } - m_lock.release(); - return Result(ENOENT); + LOCK(m_lock); + auto node = m_region_map.find_node(address); + if(node) { + auto region = node->data.second; + if(region) { + if(region->start() != address) + return Result(ENOENT); + region->m_space.reset(); + m_page_directory.unmap(*region); } - cur_region = cur_region->next; + m_region_map.erase(address); + m_used -= node->data.first.size; + return Result(SUCCESS); } - m_lock.release(); return Result(ENOENT); } ResultRet> VMSpace::get_region_at(VirtualAddress address) { LOCK(m_lock); - VMSpaceRegion* cur_region = m_region_map; - while(cur_region) { - if(cur_region->start == address) { - if(cur_region->vmRegion) - return cur_region->vmRegion->self(); + auto node = m_region_map.find_node(address); + if(node) { + auto region = node->data.second; + if(!region || node->data.first.start != address) return Result(ENOENT); - } - cur_region = cur_region->next; + return region->self(); } return Result(ENOENT); } ResultRet> VMSpace::get_region_containing(VirtualAddress address) { LOCK(m_lock); - VMSpaceRegion* cur_region = m_region_map; - while(cur_region) { - if(cur_region->contains(address)) { - if(cur_region->vmRegion) - return cur_region->vmRegion->self(); + auto node = m_region_map.find_node(address); + if(node) { + auto region = node->data.second; + if(!region) return Result(ENOENT); - } - cur_region = cur_region->next; + return region->self(); } return Result(ENOENT); } Result VMSpace::reserve_region(VirtualAddress start, size_t size) { LOCK(m_lock); - return alloc_space_at(size, start).result(); + if(!m_region_map.insert({{start, size}, nullptr})) + return Result(ENOSPC); + m_used += size; + return Result(SUCCESS); } Result VMSpace::try_pagefault(PageFault fault) { LOCK(m_lock); - auto cur_region = m_region_map; - while(cur_region) { - if(cur_region->contains(fault.address)) { - auto vmRegion = cur_region->vmRegion; - if(!vmRegion) - return Result(EINVAL); - - // First, sanity check. If the region doesn't have the proper permissions, we can just fail here. - auto prot = vmRegion->prot(); - if( - (!prot.read && fault.type == PageFault::Type::Read) || - (!prot.write && fault.type == PageFault::Type::Write) || - (!prot.execute && fault.type == PageFault::Type::Execute) - ) { - return Result(EINVAL); - } - - PageIndex error_page = (fault.address - vmRegion->start()) / PAGE_SIZE; - - // Check if the region is a mapped inode. - if(vmRegion->object()->is_inode()) { - auto inode_object = kstd::static_pointer_cast(vmRegion->object()); - - // Check to see if it needs to be read in - LOCK_N(inode_object->lock(), inode_locker); - if(inode_object->physical_page_index(error_page)) { - // This page may be marked CoW, so copy it if it is - if(vmRegion->prot().write && inode_object->page_is_cow(error_page)) { - auto res = vmRegion->m_object->try_cow_page(error_page); - if(res.is_error()) - return res; - } - - // Or, we may have encountered a race where the page was created by another thread after the fault. - m_page_directory.map(*vmRegion, VirtualRange { error_page * PAGE_SIZE, PAGE_SIZE }); - return Result(SUCCESS); - } + auto node = m_region_map.find_node(fault.address); + if(!node) + return Result(ENOENT); + + auto vmRegion = node->data.second; + if(!vmRegion) + return Result(ENOENT); + + // First, sanity check. If the region doesn't have the proper permissions, we can just fail here. + auto prot = vmRegion->prot(); + if( + (!prot.read && fault.type == PageFault::Type::Read) || + (!prot.write && fault.type == PageFault::Type::Write) || + (!prot.execute && fault.type == PageFault::Type::Execute) + ) { + return Result(EINVAL); + } - // Allocate a new physical page. - auto new_page = TRY(MM.alloc_physical_page()); - - // We read directly from the shared VMObject if this page exists in it. - auto inode = inode_object->inode(); - auto shared_object = inode->shared_vm_object(); - PageIndex shared_page_index = error_page + (vmRegion->object_start() / PAGE_SIZE); - auto shared_page = shared_object->physical_page_index(shared_page_index); - if(shared_object != inode_object && shared_page) { - MM.copy_page(shared_page, new_page); - } else { - // Read the appropriate part of the file into the buffer. - kstd::Arc buf((uint8_t*) kmalloc(PAGE_SIZE)); - ssize_t nread = inode->read(error_page * PAGE_SIZE + vmRegion->object_start(), PAGE_SIZE, KernelPointer(buf.get()), nullptr); - if(nread < 0) - return Result(-nread); - - // Read the contents of the buffer into the newly allocated physical page. - MM.with_quickmapped(new_page, [&](void* page_buf) { - memcpy_uint32((uint32_t*) page_buf, (uint32_t*) buf.get(), PAGE_SIZE / sizeof(uint32_t)); - }); - } + PageIndex error_page = (fault.address - vmRegion->start()) / PAGE_SIZE; - // Remap the page. - inode_object->physical_page_index(error_page) = new_page; - m_page_directory.map(*vmRegion, VirtualRange { error_page * PAGE_SIZE, PAGE_SIZE }); + // Check if the region is a mapped inode. + if(vmRegion->object()->is_inode()) { + auto inode_object = kstd::static_pointer_cast(vmRegion->object()); - return Result(SUCCESS); + // Check to see if it needs to be read in + LOCK_N(inode_object->lock(), inode_locker); + if(inode_object->physical_page_index(error_page)) { + // This page may be marked CoW, so copy it if it is + if(vmRegion->prot().write && inode_object->page_is_cow(error_page)) { + auto res = vmRegion->m_object->try_cow_page(error_page); + if(res.is_error()) + return res; } - // CoW if the region is writeable. - if(vmRegion->prot().write) { - auto result = vmRegion->m_object->try_cow_page(error_page); - if(result.is_success()) - m_page_directory.map(*vmRegion, VirtualRange { error_page * PAGE_SIZE, PAGE_SIZE }); - return result; - } + // Or, we may have encountered a race where the page was created by another thread after the fault. + m_page_directory.map(*vmRegion, VirtualRange { error_page * PAGE_SIZE, PAGE_SIZE }); + return Result(SUCCESS); + } - return Result(EINVAL); + // Allocate a new physical page. + auto new_page = TRY(MM.alloc_physical_page()); + + // We read directly from the shared VMObject if this page exists in it. + auto inode = inode_object->inode(); + auto shared_object = inode->shared_vm_object(); + PageIndex shared_page_index = error_page + (vmRegion->object_start() / PAGE_SIZE); + auto shared_page = shared_object->physical_page_index(shared_page_index); + if(shared_object != inode_object && shared_page) { + MM.copy_page(shared_page, new_page); + } else { + // Read the appropriate part of the file into the buffer. + kstd::Arc buf((uint8_t*) kmalloc(PAGE_SIZE)); + ssize_t nread = inode->read(error_page * PAGE_SIZE + vmRegion->object_start(), PAGE_SIZE, KernelPointer(buf.get()), nullptr); + if(nread < 0) + return Result(-nread); + + // Read the contents of the buffer into the newly allocated physical page. + MM.with_quickmapped(new_page, [&](void* page_buf) { + memcpy_uint32((uint32_t*) page_buf, (uint32_t*) buf.get(), PAGE_SIZE / sizeof(uint32_t)); + }); } - cur_region = cur_region->next; + + // Remap the page. + inode_object->physical_page_index(error_page) = new_page; + m_page_directory.map(*vmRegion, VirtualRange { error_page * PAGE_SIZE, PAGE_SIZE }); + + return Result(SUCCESS); } - return Result(ENOENT); + // CoW if the region is writeable. + if(vmRegion->prot().write) { + auto result = vmRegion->m_object->try_cow_page(error_page); + if(result.is_success()) + m_page_directory.map(*vmRegion, VirtualRange { error_page * PAGE_SIZE, PAGE_SIZE }); + return result; + } + + return Result(EINVAL); } ResultRet VMSpace::find_free_space(size_t size) { LOCK(m_lock); - auto cur_region = m_region_map; - while(cur_region) { - if(!cur_region->used && cur_region->size >= size) - return cur_region->start; - cur_region = cur_region->next; + VirtualRange range = { m_start, size }; + auto iter = m_region_map.begin(); + while(iter != m_region_map.end()) { + ASSERT(iter->first.start >= range.start); + if(iter->first.start - range.start >= size) + return range.start; + range.start = iter->first.end(); + iter++; } - return Result(ENOMEM); + if(range.end() > end()) + return Result(ENOSPC); + return range.start; } size_t VMSpace::calculate_regular_anonymous_total() { LOCK(m_lock); size_t total = 0; - auto cur_region = m_region_map; - while(cur_region) { - if(cur_region->used) { - auto& object = cur_region->vmRegion->m_object; - if(object->is_anonymous()) { - auto anon_object = kstd::static_pointer_cast(object); - if(!anon_object->is_shared()) - total += anon_object->size(); - } + for(auto& region_pair : m_region_map) { + auto region = region_pair.second; + if(!region) + continue; + if(region->object()->is_anonymous()) { + auto anon_object = kstd::static_pointer_cast(region->object()); + if(!anon_object->is_shared()) + total += anon_object->size(); } - cur_region = cur_region->next; } return total; } - -ResultRet VMSpace::alloc_space(size_t size) { - ASSERT(size % PAGE_SIZE == 0); - - /** - * We allocate a new region if we need one BEFORE iterating through the regions, because there's a chance we'll - * need to allocate more pages for the heap and if we're in the middle of iterating through regions when that - * happens, it could get ugly. - */ - auto new_region = new VMSpaceRegion; - - { - LOCK(m_lock); - auto cur_region = m_region_map; - while(cur_region) { - if(cur_region->used || cur_region->size < size) { - cur_region = cur_region->next; - continue; - } - - if(cur_region->size == size) { - cur_region->used = true; - m_used += cur_region->size; - delete new_region; - return cur_region; - } - - *new_region = VMSpaceRegion { - .start = cur_region->start, - .size = size, - .used = true, - .next = cur_region, - .prev = cur_region->prev - }; - - if(cur_region->prev) - cur_region->prev->next = new_region; - - cur_region->start += size; - cur_region->size -= size; - cur_region->prev = new_region; - m_used += new_region->size; - - if(m_region_map == cur_region) - m_region_map = new_region; - return new_region; - } - } - - delete new_region; - return Result(ENOMEM); -} - -ResultRet VMSpace::alloc_space_at(size_t size, VirtualAddress address) { - ASSERT(address % PAGE_SIZE == 0); - ASSERT(size % PAGE_SIZE == 0); - - /** - * We allocate new regions if we need one BEFORE iterating through the regions, because there's a chance we'll - * need to allocate more pages for the heap and if we're in the middle of iterating through regions when that - * happens, it could get ugly. - */ - auto new_region_before = new VMSpaceRegion; - auto new_region_after = new VMSpaceRegion; - - { - LOCK(m_lock); - auto cur_region = m_region_map; - while(cur_region) { - if(cur_region->contains(address)) { - if(cur_region->used) { - delete new_region_before; - delete new_region_after; - return Result(ENOMEM); - } - - if(cur_region->size == size) { - cur_region->used = true; - m_used += cur_region->size; - delete new_region_before; - delete new_region_after; - return cur_region; - } - - if(cur_region->size - (address - cur_region->start) >= size) { - // Create new region before if needed - if(cur_region->start < address) { - *new_region_before = VMSpaceRegion { - .start = cur_region->start, - .size = address - cur_region->start, - .used = false, - .next = cur_region, - .prev = cur_region->prev - }; - if(cur_region->prev) - cur_region->prev->next = new_region_before; - cur_region->prev = new_region_before; - if(m_region_map == cur_region) - m_region_map = new_region_before; - } else { - delete new_region_before; - } - - // Create new region after if needed - if(cur_region->end() > address + size) { - *new_region_after = VMSpaceRegion { - .start = address + size, - .size = cur_region->end() - (address + size), - .used = false, - .next = cur_region->next, - .prev = cur_region - }; - if(cur_region->next) - cur_region->next->prev = new_region_after; - cur_region->next = new_region_after; - } else { - delete new_region_after; - } - - cur_region->start = address; - cur_region->size = size; - cur_region->used = true; - m_used += cur_region->size; - return cur_region; - } - - return Result(ENOMEM); - } - - cur_region = cur_region->next; - } - } - - return Result(ENOMEM); -} - -Result VMSpace::free_region(VMSpaceRegion* region) { - VMSpaceRegion* to_delete[2] = {nullptr, nullptr}; - { - LOCK(m_lock); - region->used = false; - region->vmRegion = nullptr; - m_used -= region->size; - - // Merge previous region if needed - if(region->prev && !region->prev->used) { - to_delete[0] = region->prev; - region->prev = region->prev->prev; - if(to_delete[0]->prev) - to_delete[0]->prev->next = region; - region->start -= to_delete[0]->size; - region->size += to_delete[0]->size; - if(m_region_map == to_delete[0]) - m_region_map = region; - } - - // Merge next region if needed - if(region->next && !region->next->used) { - to_delete[1] = region->next; - region->next = region->next->next; - if(to_delete[1]->next) - to_delete[1]->next->prev = region; - region->size += to_delete[1]->size; - } - } - - // We do this while not holding the lock just in case this triggers a page free in the allocator. - delete to_delete[0]; - delete to_delete[1]; - - return Result(SUCCESS); -} diff --git a/kernel/memory/VMSpace.h b/kernel/memory/VMSpace.h index 36756a8e..20f4c707 100644 --- a/kernel/memory/VMSpace.h +++ b/kernel/memory/VMSpace.h @@ -8,6 +8,7 @@ #include "../Result.hpp" #include "../tasking/SpinLock.h" #include "PageDirectory.h" +#include "../kstd/map.hpp" /** * This class represents a virtual memory address space and all of the regions it contains. It's used to allocate and @@ -109,25 +110,9 @@ class VMSpace: public kstd::ArcSelf { SpinLock& lock() { return m_lock; } private: - struct VMSpaceRegion { - VirtualAddress start; - size_t size; - bool used; - VMSpaceRegion* next; - VMSpaceRegion* prev; - VMRegion* vmRegion; - - size_t end() const { return start + size; } - bool contains(VirtualAddress address) const { return start <= address && end() > address; } - }; - - ResultRet alloc_space(size_t size); - ResultRet alloc_space_at(size_t size, VirtualAddress address); - Result free_region(VMSpaceRegion* region); - VirtualAddress m_start; size_t m_size; - VMSpaceRegion* m_region_map; + kstd::map m_region_map; size_t m_used = 0; SpinLock m_lock; PageDirectory& m_page_directory;