This commit is contained in:
PSISP 2025-07-27 08:41:54 +01:00 committed by GitHub
commit e8e0fc3463
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 684 additions and 414 deletions

View file

@ -337,6 +337,7 @@ set(KERNEL_SOURCE_FILES src/core/kernel/kernel.cpp src/core/kernel/resource_limi
src/core/kernel/address_arbiter.cpp src/core/kernel/error.cpp
src/core/kernel/file_operations.cpp src/core/kernel/directory_operations.cpp
src/core/kernel/idle_thread.cpp src/core/kernel/timers.cpp
src/core/kernel/fcram.cpp
)
set(SERVICE_SOURCE_FILES src/core/services/service_manager.cpp src/core/services/apt.cpp src/core/services/hid.cpp
src/core/services/fs.cpp src/core/services/gsp_gpu.cpp src/core/services/gsp_lcd.cpp
@ -416,7 +417,7 @@ set(HEADER_FILES include/emulator.hpp include/helpers.hpp include/termcolor.hpp
include/fs/archive_twl_sound.hpp include/fs/archive_card_spi.hpp include/services/ns.hpp include/audio/audio_device.hpp
include/audio/audio_device_interface.hpp include/audio/libretro_audio_device.hpp include/services/ir/ir_types.hpp
include/services/ir/ir_device.hpp include/services/ir/circlepad_pro.hpp include/services/service_intercept.hpp
include/screen_layout.hpp include/services/service_map.hpp include/audio/dsp_binary.hpp
include/screen_layout.hpp include/services/service_map.hpp include/audio/dsp_binary.hpp include/kernel/fcram.hpp
)
if(IOS)

47
include/kernel/fcram.hpp Normal file
View file

@ -0,0 +1,47 @@
#pragma once
#include <list>
#include <memory>
#include "kernel_types.hpp"
class Memory;
using FcramBlockList = std::list<FcramBlock>;
class KFcram {
struct Region {
struct Block {
s32 pages;
s32 pageOffs;
bool used;
Block(s32 pages, u32 pageOffs) : pages(pages), pageOffs(pageOffs), used(false) {}
};
std::list<Block> blocks;
u32 start;
s32 pages;
s32 freePages;
public:
Region() : start(0), pages(0) {}
void reset(u32 start, size_t size);
void alloc(std::list<FcramBlock>& out, s32 pages, bool linear);
u32 getUsedCount();
u32 getFreeCount();
};
Memory& mem;
Region appRegion, sysRegion, baseRegion;
uint8_t* fcram;
std::unique_ptr<u32> refs;
public:
KFcram(Memory& memory);
void reset(size_t ramSize, size_t appSize, size_t sysSize, size_t baseSize);
void alloc(FcramBlockList& out, s32 pages, FcramRegion region, bool linear);
void incRef(FcramBlockList& list);
void decRef(FcramBlockList& list);
u32 getUsedCount(FcramRegion region);
};

View file

@ -7,6 +7,7 @@
#include <vector>
#include "config.hpp"
#include "fcram.hpp"
#include "helpers.hpp"
#include "kernel_types.hpp"
#include "logger.hpp"
@ -25,6 +26,8 @@ class Kernel {
CPU& cpu;
Memory& mem;
KFcram fcramManager;
// The handle number for the next kernel object to be created
u32 handleCounter;
// A list of our OS threads, the max number of which depends on the resource limit (hardcoded 32 per process on retail it seems).
@ -247,6 +250,7 @@ class Kernel {
}
ServiceManager& getServiceManager() { return serviceManager; }
KFcram& getFcramManager() { return fcramManager; }
Scheduler& getScheduler();
void sendGPUInterrupt(GPUInterrupt type) { serviceManager.sendGPUInterrupt(type); }

View file

@ -44,6 +44,12 @@ enum class ProcessorID : s32 {
New3DSExtra2 = 3
};
enum class FcramRegion {
App = 0x100,
Sys = 0x200,
Base = 0x300
};
struct AddressArbiter {};
struct ResourceLimits {
@ -250,4 +256,11 @@ struct KernelObject {
Helpers::panic("Called GetWaitList on kernel object without a waitlist (Type: %s)", getTypeName());
}
}
};
struct FcramBlock {
u32 paddr;
s32 pages;
FcramBlock(u32 paddr, s32 pages) : paddr(paddr), pages(pages) {}
};

View file

@ -3,6 +3,7 @@
#include <bitset>
#include <filesystem>
#include <fstream>
#include <list>
#include <optional>
#include <vector>
@ -10,8 +11,10 @@
#include "crypto/aes_engine.hpp"
#include "handles.hpp"
#include "helpers.hpp"
#include "loader/ncsd.hpp"
#include "kernel/kernel_types.hpp"
#include "loader/3dsx.hpp"
#include "loader/ncsd.hpp"
#include "result/result.hpp"
#include "services/region_codes.hpp"
namespace PhysicalAddrs {
@ -38,15 +41,15 @@ namespace VirtualAddrs {
DefaultStackSize = 0x4000,
NormalHeapStart = 0x08000000,
LinearHeapStartOld = 0x14000000, // If kernel version < 0x22C
LinearHeapStartOld = 0x14000000, // If kernel version < 0x22C
LinearHeapEndOld = 0x1C000000,
LinearHeapStartNew = 0x30000000,
LinearHeapEndNew = 0x40000000,
// Start of TLS for first thread. Next thread's storage will be at TLSBase + 0x1000, and so on
TLSBase = 0xFF400000,
TLSSize = 0x1000,
// Start of TLS for first thread. Next thread's storage will be at TLSBase + 0x200, and so on
TLSBase = 0x1FF82000,
TLSSize = 0x200,
VramStart = 0x1F000000,
VramSize = 0x00600000,
@ -76,63 +79,83 @@ namespace KernelMemoryTypes {
PERMISSION_W = 1 << 1,
PERMISSION_X = 1 << 2
};
// I assume this is referring to a single piece of allocated memory? If it's for pages, it makes no sense.
// If it's for multiple allocations, it also makes no sense
struct MemoryInfo {
u32 baseAddr; // Base process virtual address. Used as a paddr in lockedMemoryInfo instead
u32 size; // Of what?
u32 perms; // Is this referring to a single page or?
u32 baseAddr;
u32 pages;
u32 perms;
u32 state;
u32 end() { return baseAddr + size; }
MemoryInfo(u32 baseAddr, u32 size, u32 perms, u32 state) : baseAddr(baseAddr), size(size)
, perms(perms), state(state) {}
u32 end() { return baseAddr + (pages << 12); }
MemoryInfo() : baseAddr(0), pages(0), perms(0), state(0) {}
MemoryInfo(u32 baseAddr, u32 pages, u32 perms, u32 state) : baseAddr(baseAddr), pages(pages), perms(perms), state(state) {}
};
// Shared memory block for HID, GSP:GPU etc
struct SharedMemoryBlock {
u32 paddr; // Physical address of this block's memory
u32 size; // Size of block
u32 handle; // The handle of the shared memory block
bool mapped; // Has this block been mapped at least once?
u32 paddr; // Physical address of this block's memory
u32 size; // Size of block
u32 handle; // The handle of the shared memory block
bool mapped; // Has this block been mapped at least once?
SharedMemoryBlock(u32 paddr, u32 size, u32 handle) : paddr(paddr), size(size), handle(handle), mapped(false) {}
};
}
} // namespace KernelMemoryTypes
struct FcramBlock;
class KFcram;
enum class FcramRegion;
class Memory {
// Used internally by changeMemoryState
struct Operation {
KernelMemoryTypes::MemoryState newState = KernelMemoryTypes::MemoryState::Free;
bool r = false, w = false, x = false;
bool changeState = false;
bool changePerms = false;
};
using Handle = HorizonHandle;
u8* fcram;
u8* dspRam; // Provided to us by Audio
u8* vram; // Provided to the memory class by the GPU class
u64& cpuTicks; // Reference to the CPU tick counter
u64& cpuTicks; // Reference to the CPU tick counter
using SharedMemoryBlock = KernelMemoryTypes::SharedMemoryBlock;
// TODO: remove this reference when Peach's excellent page table code is moved to a better home
KFcram& fcramManager;
// Our dynarmic core uses page tables for reads and writes with 4096 byte pages
std::vector<uintptr_t> readTable, writeTable;
// vaddr->paddr translation table
std::vector<u32> paddrTable;
// This tracks our OS' memory allocations
std::vector<KernelMemoryTypes::MemoryInfo> memoryInfo;
std::list<KernelMemoryTypes::MemoryInfo> memoryInfo;
std::array<SharedMemoryBlock, 5> sharedMemBlocks = {
SharedMemoryBlock(0, 0, KernelHandles::FontSharedMemHandle), // Shared memory for the system font (size is 0 because we read the size from the cmrc filesystem
SharedMemoryBlock(0, 0x1000, KernelHandles::GSPSharedMemHandle), // GSP shared memory
SharedMemoryBlock(0, 0x1000, KernelHandles::HIDSharedMemHandle), // HID shared memory
SharedMemoryBlock(0, 0x3000, KernelHandles::CSNDSharedMemHandle), // CSND shared memory
SharedMemoryBlock(
0, 0, KernelHandles::FontSharedMemHandle
), // Shared memory for the system font (size is 0 because we read the size from the cmrc filesystem
SharedMemoryBlock(0, 0x1000, KernelHandles::GSPSharedMemHandle), // GSP shared memory
SharedMemoryBlock(0, 0x1000, KernelHandles::HIDSharedMemHandle), // HID shared memory
SharedMemoryBlock(0, 0x3000, KernelHandles::CSNDSharedMemHandle), // CSND shared memory
SharedMemoryBlock(0, 0xE7000, KernelHandles::APTCaptureSharedMemHandle), // APT Capture Buffer memory
};
};
public:
public:
static constexpr u32 pageShift = 12;
static constexpr u32 pageSize = 1 << pageShift;
static constexpr u32 pageMask = pageSize - 1;
static constexpr u32 totalPageCount = 1 << (32 - pageShift);
static constexpr u32 FCRAM_SIZE = u32(128_MB);
static constexpr u32 FCRAM_APPLICATION_SIZE = u32(80_MB);
static constexpr u32 FCRAM_APPLICATION_SIZE = u32(64_MB + 16_MB);
static constexpr u32 FCRAM_SYSTEM_SIZE = u32(44_MB - 16_MB);
static constexpr u32 FCRAM_BASE_SIZE = u32(20_MB);
static constexpr u32 FCRAM_PAGE_COUNT = FCRAM_SIZE / pageSize;
static constexpr u32 FCRAM_APPLICATION_PAGE_COUNT = FCRAM_APPLICATION_SIZE / pageSize;
@ -140,18 +163,18 @@ public:
static constexpr u32 DSP_CODE_MEMORY_OFFSET = u32(0_KB);
static constexpr u32 DSP_DATA_MEMORY_OFFSET = u32(256_KB);
private:
std::bitset<FCRAM_PAGE_COUNT> usedFCRAMPages;
std::optional<u32> findPaddr(u32 size);
private:
// std::bitset<FCRAM_PAGE_COUNT> usedFCRAMPages;
// std::optional<u32> findPaddr(u32 size);
u64 timeSince3DSEpoch();
// https://www.3dbrew.org/wiki/Configuration_Memory#ENVINFO
// Report a retail unit without JTAG
static constexpr u32 envInfo = 1;
// Stored in Configuration Memory starting @ 0x1FF80060
// Stored in Configuration Memory starting @ 0x1FF80060
struct FirmwareInfo {
u8 unk; // Usually 0 according to 3DBrew
u8 unk; // Usually 0 according to 3DBrew
u8 revision;
u8 minor;
u8 major;
@ -167,12 +190,15 @@ private:
static constexpr std::array<u8, 6> MACAddress = {0x40, 0xF4, 0x07, 0xFF, 0xFF, 0xEE};
void changeMemoryState(u32 vaddr, s32 pages, const Operation& op);
void queryPhysicalBlocks(std::list<FcramBlock>& outList, u32 vaddr, s32 pages);
void mapPhysicalMemory(u32 vaddr, u32 paddr, s32 pages, bool r, bool w, bool x);
void unmapPhysicalMemory(u32 vaddr, u32 paddr, s32 pages);
public:
u16 kernelVersion = 0;
u32 usedUserMemory = u32(0_MB); // How much of the APPLICATION FCRAM range is used (allocated to the appcore)
u32 usedSystemMemory = u32(0_MB); // Similar for the SYSTEM range (reserved for the syscore)
Memory(u64& cpuTicks, const EmulatorConfig& config);
Memory(KFcram& fcramManager, u64& cpuTicks, const EmulatorConfig& config);
void reset();
void* getReadPointer(u32 address);
void* getWritePointer(u32 address);
@ -198,22 +224,6 @@ private:
u32 getLinearHeapVaddr();
u8* getFCRAM() { return fcram; }
// Total amount of OS-only FCRAM available (Can vary depending on how much FCRAM the app requests via the cart exheader)
u32 totalSysFCRAM() {
return FCRAM_SIZE - FCRAM_APPLICATION_SIZE;
}
// Amount of OS-only FCRAM currently available
u32 remainingSysFCRAM() {
return totalSysFCRAM() - usedSystemMemory;
}
// Physical FCRAM index to the start of OS FCRAM
// We allocate the first part of physical FCRAM for the application, and the rest to the OS. So the index for the OS = application ram size
u32 sysFCRAMIndex() {
return FCRAM_APPLICATION_SIZE;
}
enum class BatteryLevel {
Empty = 0,
AlmostEmpty,
@ -224,9 +234,9 @@ private:
};
u8 getBatteryState(bool adapterConnected, bool charging, BatteryLevel batteryLevel) {
u8 value = static_cast<u8>(batteryLevel) << 2; // Bits 2:4 are the battery level from 0 to 5
if (adapterConnected) value |= 1 << 0; // Bit 0 shows if the charger is connected
if (charging) value |= 1 << 1; // Bit 1 shows if we're charging
u8 value = static_cast<u8>(batteryLevel) << 2; // Bits 2:4 are the battery level from 0 to 5
if (adapterConnected) value |= 1 << 0; // Bit 0 shows if the charger is connected
if (charging) value |= 1 << 1; // Bit 1 shows if we're charging
return value;
}
@ -248,27 +258,19 @@ private:
}
// Returns whether "addr" is aligned to a page (4096 byte) boundary
static constexpr bool isAligned(u32 addr) {
return (addr & pageMask) == 0;
}
static constexpr bool isAligned(u32 addr) { return (addr & pageMask) == 0; }
// Allocate "size" bytes of RAM starting from FCRAM index "paddr" (We pick it ourself if paddr == 0)
// And map them to virtual address "vaddr" (We also pick it ourself if vaddr == 0).
// If the "linear" flag is on, the paddr pages must be adjacent in FCRAM
// This function is for interacting with the *user* portion of FCRAM mainly. For OS RAM, we use other internal functions below
// r, w, x: Permissions for the allocated memory
// adjustAddrs: If it's true paddr == 0 or vaddr == 0 tell the allocator to pick its own addresses. Used for eg svc ControlMemory
// isMap: Shows whether this is a reserve operation, that allocates memory and maps it to the addr space, or if it's a map operation,
// which just maps memory from paddr to vaddr without hassle. The latter is useful for shared memory mapping, the "map" ControlMemory, op, etc
// Returns the vaddr the FCRAM was mapped to or nullopt if allocation failed
std::optional<u32> allocateMemory(u32 vaddr, u32 paddr, u32 size, bool linear, bool r = true, bool w = true, bool x = true,
bool adjustsAddrs = false, bool isMap = false);
KernelMemoryTypes::MemoryInfo queryMemory(u32 vaddr);
bool allocMemory(u32 vaddr, s32 pages, FcramRegion region, bool r, bool w, bool x, KernelMemoryTypes::MemoryState state);
bool allocMemoryLinear(u32& outVaddr, u32 inVaddr, s32 pages, FcramRegion region, bool r, bool w, bool x);
bool mapVirtualMemory(
u32 dstVaddr, u32 srcVaddr, s32 pages, bool r, bool w, bool x, KernelMemoryTypes::MemoryState oldDstState,
KernelMemoryTypes::MemoryState oldSrcState, KernelMemoryTypes::MemoryState newDstState, KernelMemoryTypes::MemoryState newSrcState
);
void changePermissions(u32 vaddr, s32 pages, bool r, bool w, bool x);
Result::HorizonResult queryMemory(KernelMemoryTypes::MemoryInfo& out, u32 vaddr);
Result::HorizonResult testMemoryState(u32 vaddr, s32 pages, KernelMemoryTypes::MemoryState desiredState);
// For internal use
// Allocates a "size"-sized chunk of system FCRAM and returns the index of physical FCRAM used for the allocation
// Used for allocating things like shared memory and the like
u32 allocateSysMemory(u32 size);
void copyToVaddr(u32 dstVaddr, const u8* srcHost, s32 size);
// Map a shared memory block to virtual address vaddr with permissions "myPerms"
// The kernel has a second permission parameter in MapMemoryBlock but not sure what's used for
@ -276,10 +278,6 @@ private:
// Returns a pointer to the FCRAM block used for the memory if allocation succeeded
u8* mapSharedMemory(Handle handle, u32 vaddr, u32 myPerms, u32 otherPerms);
// Mirrors the page mapping for "size" bytes starting from sourceAddress, to "size" bytes in destAddress
// All of the above must be page-aligned.
void mirrorMapping(u32 destAddress, u32 sourceAddress, u32 size);
// Backup of the game's CXI partition info, if any
std::optional<NCCH> loadedCXI = std::nullopt;
std::optional<HB3DSX> loaded3DSX = std::nullopt;
@ -291,7 +289,6 @@ private:
u8* getDSPMem() { return dspRam; }
u8* getDSPDataMem() { return &dspRam[DSP_DATA_MEMORY_OFFSET]; }
u8* getDSPCodeMem() { return &dspRam[DSP_CODE_MEMORY_OFFSET]; }
u32 getUsedUserMem() { return usedUserMemory; }
void setVRAM(u8* pointer) { vram = pointer; }
void setDSPMem(u8* pointer) { dspRam = pointer; }

103
src/core/kernel/fcram.cpp Normal file
View file

@ -0,0 +1,103 @@
#include "fcram.hpp"
#include "memory.hpp"
void KFcram::Region::reset(u32 start, size_t size) {
this->start = start;
pages = size >> 12;
freePages = pages;
Block initialBlock(pages, 0);
blocks.clear();
blocks.push_back(initialBlock);
}
void KFcram::Region::alloc(std::list<FcramBlock>& out, s32 allocPages, bool linear) {
for (auto it = blocks.begin(); it != blocks.end(); it++) {
if (it->used) continue;
// On linear allocations, only a single contiguous block may be used
if (it->pages < allocPages && linear) continue;
// If the current block is bigger than the allocation, split it
if (it->pages > allocPages) {
Block newBlock(it->pages - allocPages, it->pageOffs + allocPages);
it->pages = allocPages;
blocks.insert(it, newBlock);
}
// Mark the block as allocated and add it to the output list
it->used = true;
allocPages -= it->pages;
freePages -= it->pages;
u32 paddr = start + (it->pageOffs << 12);
FcramBlock outBlock(paddr, it->pages);
out.push_back(outBlock);
if (allocPages < 1) {
return;
}
}
// Official kernel panics here
Helpers::panic("Failed to allocate FCRAM, not enough guest memory");
}
u32 KFcram::Region::getUsedCount() { return pages - freePages; }
u32 KFcram::Region::getFreeCount() { return freePages; }
KFcram::KFcram(Memory& mem) : mem(mem) {}
void KFcram::reset(size_t ramSize, size_t appSize, size_t sysSize, size_t baseSize) {
fcram = mem.getFCRAM();
refs = std::unique_ptr<u32>(new u32[ramSize >> 12]);
std::memset(refs.get(), 0, (ramSize >> 12) * sizeof(u32));
appRegion.reset(0, appSize);
sysRegion.reset(appSize, sysSize);
baseRegion.reset(appSize + sysSize, baseSize);
}
void KFcram::alloc(FcramBlockList& out, s32 pages, FcramRegion region, bool linear) {
switch (region) {
case FcramRegion::App: appRegion.alloc(out, pages, linear); break;
case FcramRegion::Sys: sysRegion.alloc(out, pages, linear); break;
case FcramRegion::Base: baseRegion.alloc(out, pages, linear); break;
default: Helpers::panic("Invalid FCRAM region chosen for allocation!"); break;
}
incRef(out);
}
void KFcram::incRef(FcramBlockList& list) {
for (auto it = list.begin(); it != list.end(); it++) {
for (int i = 0; i < it->pages; i++) {
u32 index = (it->paddr >> 12) + i;
refs.get()[index]++;
}
}
}
void KFcram::decRef(FcramBlockList& list) {
for (auto it = list.begin(); it != list.end(); it++) {
for (int i = 0; i < it->pages; i++) {
u32 index = (it->paddr >> 12) + i;
refs.get()[index]--;
if (!refs.get()[index]) {
Helpers::panic("TODO: Freeing FCRAM");
}
}
}
}
u32 KFcram::getUsedCount(FcramRegion region) {
switch (region) {
case FcramRegion::App: return appRegion.getUsedCount();
case FcramRegion::Sys: return sysRegion.getUsedCount();
case FcramRegion::Base: return baseRegion.getUsedCount();
default: Helpers::panic("Invalid FCRAM region in getUsedCount!");
}
}

View file

@ -17,6 +17,8 @@ idle_thread_main:
b idle_thread_main
*/
using namespace KernelMemoryTypes;
static constexpr u8 idleThreadCode[] = {
0x00, 0x00, 0xA0, 0xE3, // mov r0, #0
0x00, 0x10, 0xA0, 0xE3, // mov r1, #0
@ -27,18 +29,16 @@ static constexpr u8 idleThreadCode[] = {
// Set up an idle thread to run when no thread is able to run
void Kernel::setupIdleThread() {
Thread& t = threads[idleThreadIndex];
constexpr u32 codeAddress = 0xBFC00000;
// Reserve some memory for the idle thread's code. We map this memory to vaddr BFC00000 which is not userland-accessible
// Reserve some memory for the idle thread's code. We map this memory to vaddr 3FC00000 which shouldn't be accessed by applications
// We only allocate 4KB (1 page) because our idle code is pretty small
const u32 fcramIndex = mem.allocateSysMemory(Memory::pageSize);
auto vaddr = mem.allocateMemory(codeAddress, fcramIndex, Memory::pageSize, true, true, false, true, false, true);
if (!vaddr.has_value() || vaddr.value() != codeAddress) {
constexpr u32 codeAddress = 0x3FC00000;
if (!mem.allocMemory(codeAddress, 1, FcramRegion::Base, true, true, false, MemoryState::Locked)) {
Helpers::panic("Failed to setup idle thread");
}
// Copy idle thread code to the allocated FCRAM
std::memcpy(&mem.getFCRAM()[fcramIndex], idleThreadCode, sizeof(idleThreadCode));
mem.copyToVaddr(codeAddress, idleThreadCode, sizeof(idleThreadCode));
t.entrypoint = codeAddress;
t.initialSP = 0;

View file

@ -6,7 +6,7 @@
#include "kernel_types.hpp"
Kernel::Kernel(CPU& cpu, Memory& mem, GPU& gpu, const EmulatorConfig& config, LuaManager& lua)
: cpu(cpu), regs(cpu.regs()), mem(mem), handleCounter(0), serviceManager(regs, mem, gpu, currentProcess, *this, config, lua) {
: cpu(cpu), regs(cpu.regs()), mem(mem), handleCounter(0), serviceManager(regs, mem, gpu, currentProcess, *this, config, lua), fcramManager(mem) {
objects.reserve(512); // Make room for a few objects to avoid further memory allocs later
mutexHandles.reserve(8);
portHandles.reserve(32);
@ -180,9 +180,7 @@ void Kernel::reset() {
}
// Get pointer to thread-local storage
u32 Kernel::getTLSPointer() {
return VirtualAddrs::TLSBase + currentThreadIndex * VirtualAddrs::TLSSize;
}
u32 Kernel::getTLSPointer() { return VirtualAddrs::TLSBase + currentThreadIndex * VirtualAddrs::TLSSize; }
// Result CloseHandle(Handle handle)
void Kernel::svcCloseHandle() {
@ -271,7 +269,8 @@ void Kernel::getProcessInfo() {
// According to 3DBrew: Amount of private (code, data, heap) memory used by the process + total supervisor-mode
// stack size + page-rounded size of the external handle table
case 2:
regs[1] = mem.getUsedUserMem();
// FIXME
regs[1] = fcramManager.getUsedCount(FcramRegion::App);
regs[2] = 0;
break;
@ -364,7 +363,7 @@ void Kernel::getSystemInfo() {
switch (subtype) {
// Total used memory size in the APPLICATION memory region
case 1:
regs[1] = mem.getUsedUserMem();
regs[1] = fcramManager.getUsedCount(FcramRegion::App);
regs[2] = 0;
break;

View file

@ -30,10 +30,10 @@ namespace MemoryPermissions {
};
}
using namespace KernelMemoryTypes;
// Returns whether "value" is aligned to a page boundary (Ie a boundary of 4096 bytes)
static constexpr bool isAligned(u32 value) {
return (value & 0xFFF) == 0;
}
static constexpr bool isAligned(u32 value) { return (value & 0xFFF) == 0; }
// Result ControlMemory(u32* outaddr, u32 addr0, u32 addr1, u32 size,
// MemoryOperation operation, MemoryPermission permissions)
@ -44,6 +44,7 @@ void Kernel::controlMemory() {
u32 addr0 = regs[1];
u32 addr1 = regs[2];
u32 size = regs[3];
u32 pages = size >> 12; // Official kernel truncates nonaligned sizes
u32 perms = regs[4];
if (perms == MemoryPermissions::DontCare) {
@ -61,7 +62,7 @@ void Kernel::controlMemory() {
Helpers::panic("ControlMemory: attempted to allocate executable memory");
}
if (!isAligned(addr0) || !isAligned(addr1) || !isAligned(size)) {
if (!isAligned(addr0) || !isAligned(addr1)) {
Helpers::panic("ControlMemory: Unaligned parameters\nAddr0: %08X\nAddr1: %08X\nSize: %08X", addr0, addr1, size);
}
@ -72,22 +73,51 @@ void Kernel::controlMemory() {
switch (operation & 0xFF) {
case Operation::Commit: {
std::optional<u32> address = mem.allocateMemory(addr0, 0, size, linear, r, w, x, true);
if (!address.has_value()) {
Helpers::panic("ControlMemory: Failed to allocate memory");
// TODO: base this from the exheader
auto region = FcramRegion::App;
u32 outAddr = 0;
if (linear) {
if (!mem.allocMemoryLinear(outAddr, addr0, pages, region, r, w, false)) {
Helpers::panic("ControlMemory: Failed to allocate linear memory");
}
} else {
if (!mem.allocMemory(addr0, pages, region, r, w, false, MemoryState::Private)) {
Helpers::panic("ControlMemory: Failed to allocate memory");
}
outAddr = addr0;
}
regs[1] = address.value();
regs[1] = outAddr;
break;
}
case Operation::Map: mem.mirrorMapping(addr0, addr1, size); break;
case Operation::Map:
// Official kernel only allows Private regions to be mapped to Free regions. An Alias or Aliased region cannot be mapped again
if (!mem.mapVirtualMemory(
addr0, addr1, pages, r, w, false, MemoryState::Free, MemoryState::Private, MemoryState::Alias, MemoryState::Aliased
))
Helpers::panic("ControlMemory: Failed to map memory");
break;
case Operation::Unmap:
// The same as a Map operation, except in reverse
if (!mem.mapVirtualMemory(
addr0, addr1, pages, false, false, false, MemoryState::Alias, MemoryState::Aliased, MemoryState::Free, MemoryState::Private
))
Helpers::panic("ControlMemory: Failed to unmap memory");
break;
case Operation::Protect:
Helpers::warn(
"Ignoring mprotect! Hope nothing goes wrong but if the game accesses invalid memory or crashes then we prolly need to implement "
"this\n"
);
// Official kernel has an internal state bit to indicate that the region's permissions may be changed
// But this should account for all cases
if (!mem.testMemoryState(addr0, pages, MemoryState::Private) && !mem.testMemoryState(addr0, pages, MemoryState::Alias) &&
!mem.testMemoryState(addr0, pages, MemoryState::Aliased) && !mem.testMemoryState(addr0, pages, MemoryState::AliasCode))
Helpers::panic("Tried to mprotect invalid region!");
mem.changePermissions(addr0, pages, r, w, false);
regs[1] = addr0;
break;
default: Helpers::warn("ControlMemory: unknown operation %X\n", operation); break;
@ -104,10 +134,11 @@ void Kernel::queryMemory() {
logSVC("QueryMemory(mem info pointer = %08X, page info pointer = %08X, addr = %08X)\n", memInfo, pageInfo, addr);
const auto info = mem.queryMemory(addr);
regs[0] = Result::Success;
KernelMemoryTypes::MemoryInfo info;
const auto result = mem.queryMemory(info, addr);
regs[0] = result;
regs[1] = info.baseAddr;
regs[2] = info.size;
regs[2] = info.pages << 12;
regs[3] = info.perms;
regs[4] = info.state;
regs[5] = 0; // page flags

View file

@ -82,7 +82,7 @@ void Kernel::getResourceLimitCurrentValues() {
s32 Kernel::getCurrentResourceValue(const KernelObject* limit, u32 resourceName) {
const auto data = static_cast<ResourceLimits*>(limit->data);
switch (resourceName) {
case ResourceType::Commit: return mem.usedUserMemory;
case ResourceType::Commit: return fcramManager.getUsedCount(FcramRegion::App) << 12; // TODO: needs to use the current amount of memory allocated by the process
case ResourceType::Thread: return threadIndices.size();
default: Helpers::panic("Attempted to get current value of unknown kernel resource: %d\n", resourceName);
}

View file

@ -26,7 +26,7 @@ namespace {
} // namespace
bool Memory::map3DSX(HB3DSX& hb3dsx, const HB3DSX::Header& header) {
const LoadInfo hbInfo = {
/* const LoadInfo hbInfo = {
.codeSegSizeAligned = (header.codeSegSize + 0xFFF) & ~0xFFF,
.rodataSegSizeAligned = (header.rodataSegSize + 0xFFF) & ~0xFFF,
.dataSegSizeAligned = (header.dataSegSize + 0xFFF) & ~0xFFF,
@ -186,10 +186,10 @@ bool Memory::map3DSX(HB3DSX& hb3dsx, const HB3DSX::Header& header) {
return false;
}
}
}
}*/
// Detect and fill _prm structure
HB3DSX::PrmStruct pst;
/*HB3DSX::PrmStruct pst;
std::memcpy(&pst, &code[4], sizeof(pst));
if (pst.magic[0] == '_' && pst.magic[1] == 'p' && pst.magic[2] == 'r' && pst.magic[3] == 'm') {
// if there was any argv to put, it would go there
@ -205,7 +205,7 @@ bool Memory::map3DSX(HB3DSX& hb3dsx, const HB3DSX::Header& header) {
// RUNFLAG_APTREINIT: Reinitialize APT.
// From libctru. Because there's no previously running software here
pst.runFlags |= 1 << 1;
pst.runFlags |= 1 << 1;*/
/* s64 dummy;
bool isN3DS = svcGetSystemInfo(&dummy, 0x10001, 0) == 0;
@ -213,7 +213,7 @@ bool Memory::map3DSX(HB3DSX& hb3dsx, const HB3DSX::Header& header) {
{
pst->heapSize = u32(48_MB);
pst->linearHeapSize = u32(64_MB);
} else */ {
} else *//* {
pst.heapSize = u32(24_MB);
pst.linearHeapSize = u32(32_MB);
}
@ -228,7 +228,8 @@ bool Memory::map3DSX(HB3DSX& hb3dsx, const HB3DSX::Header& header) {
allocateMemory(rodataSegAddr, paddr + rodataOffset, hbInfo.rodataSegSizeAligned, true, true, false, false); // Rodata is R--
allocateMemory(dataSegAddr, paddr + dataOffset, hbInfo.dataSegSizeAligned + 0x1000, true, true, true, false); // Data+BSS+Extra is RW-
return true;
return true;*/
return false;
}
std::optional<u32> Memory::load3DSX(const std::filesystem::path& path) {

View file

@ -6,64 +6,61 @@
using namespace ELFIO;
std::optional<u32> Memory::loadELF(std::ifstream& file) {
loadedCXI = std::nullopt; // ELF files don't have a CXI, so set this to null
/* loadedCXI = std::nullopt; // ELF files don't have a CXI, so set this to null
elfio reader;
if (!file.good() || !reader.load(file)) {
printf("Woops failed to load ELF\n");
return std::nullopt;
}
// Allocate stack space. For ELFs we use the default stack size, which is 16KB
if (!allocateMainThreadStack(VirtualAddrs::DefaultStackSize)) {
// Should be unreachable
printf("Failed to allocate stack space for ELF file\n");
return std::nullopt;
}
auto segNum = reader.segments.size();
printf("Number of segments: %d\n", segNum);
printf(" # Perms Vaddr File Size Mem Size\n");
for (int i = 0; i < segNum; ++i) {
const auto seg = reader.segments[i];
const auto flags = seg->get_flags();
const u32 vaddr = static_cast<u32>(seg->get_virtual_address()); // Vaddr the segment is loaded in
u32 fileSize = static_cast<u32>(seg->get_file_size()); // Size of segment in file
u32 memorySize = static_cast<u32>(seg->get_memory_size()); // Size of segment in memory
u8* data = (u8*)seg->get_data();
// Get read/write/execute permissions for segment
const bool r = (flags & 0b100) != 0;
const bool w = (flags & 0b010) != 0;
const bool x = (flags & 0b001) != 0;
printf("[%d] (%c%c%c)\t%08X\t%08X\t%08X\n", i, r ? 'r' : '-', w ? 'w' : '-', x ? 'x' : '-', vaddr, fileSize, memorySize);
// Assert that the segment will be loaded in the executable region. If it isn't then panic.
// The executable region starts at 0x00100000 and has a maximum size of 0x03F00000
u64 endAddress = (u64)vaddr + (u64)fileSize;
const bool isGood = vaddr >= VirtualAddrs::ExecutableStart && endAddress < VirtualAddrs::ExecutableEnd;
if (!isGood) {
// We're ignoring this for now because some ELFs define a segment at the vaddr for IPC buffer mapping
// Helpers::panic("ELF is loaded at invalid place");
// return std::nullopt;
elfio reader;
if (!file.good() || !reader.load(file)) {
printf("Woops failed to load ELF\n");
return std::nullopt;
}
if (memorySize & pageMask) {
// Round up the size of the ELF segment to a page (4KB) boundary, as the OS can only alloc this way
memorySize = (memorySize + pageSize - 1) & -pageSize;
Helpers::warn("Rounding ELF segment size to %08X\n", memorySize);
// Allocate stack space. For ELFs we use the default stack size, which is 16KB
if (!allocateMainThreadStack(VirtualAddrs::DefaultStackSize)) {
// Should be unreachable
printf("Failed to allocate stack space for ELF file\n");
return std::nullopt;
}
// This should also assert that findPaddr doesn't fail
u32 fcramAddr = findPaddr(memorySize).value();
std::memcpy(&fcram[fcramAddr], data, fileSize);
auto segNum = reader.segments.size();
printf("Number of segments: %d\n", segNum);
printf(" # Perms Vaddr File Size Mem Size\n");
for (int i = 0; i < segNum; ++i) {
const auto seg = reader.segments[i];
const auto flags = seg->get_flags();
const u32 vaddr = static_cast<u32>(seg->get_virtual_address()); // Vaddr the segment is loaded in
u32 fileSize = static_cast<u32>(seg->get_file_size()); // Size of segment in file
u32 memorySize = static_cast<u32>(seg->get_memory_size()); // Size of segment in memory
u8* data = (u8*)seg->get_data();
// Allocate the segment on the OS side
allocateMemory(vaddr, fcramAddr, memorySize, true, r, w, x);
}
// Get read/write/execute permissions for segment
const bool r = (flags & 0b100) != 0;
const bool w = (flags & 0b010) != 0;
const bool x = (flags & 0b001) != 0;
// ELF can't specify a region, make it default to USA
region = Regions::USA;
return static_cast<u32>(reader.get_entry());
printf("[%d] (%c%c%c)\t%08X\t%08X\t%08X\n", i, r ? 'r' : '-', w ? 'w' : '-', x ? 'x' : '-', vaddr, fileSize, memorySize);
// Assert that the segment will be loaded in the executable region. If it isn't then panic.
// The executable region starts at 0x00100000 and has a maximum size of 0x03F00000
u64 endAddress = (u64)vaddr + (u64)fileSize;
const bool isGood = vaddr >= VirtualAddrs::ExecutableStart && endAddress < VirtualAddrs::ExecutableEnd;
if (!isGood) {
// We're ignoring this for now because some ELFs define a segment at the vaddr for IPC buffer mapping
// Helpers::panic("ELF is loaded at invalid place");
// return std::nullopt;
}
if (memorySize & pageMask) {
// Round up the size of the ELF segment to a page (4KB) boundary, as the OS can only alloc this way
memorySize = (memorySize + pageSize - 1) & -pageSize;
Helpers::warn("Rounding ELF segment size to %08X\n", memorySize);
}
// This should also assert that findPaddr doesn't fail
u32 fcramAddr = findPaddr(memorySize).value();
std::memcpy(&fcram[fcramAddr], data, fileSize);
// ELF can't specify a region, make it default to USA
region = Regions::USA;
return static_cast<u32>(reader.get_entry());*/
return std::nullopt;
}

View file

@ -4,6 +4,9 @@
#include <optional>
#include "memory.hpp"
#include "kernel/fcram.hpp"
using namespace KernelMemoryTypes;
bool Memory::mapCXI(NCSD& ncsd, NCCH& cxi) {
printf("Text address = %08X, size = %08X\n", cxi.text.address, cxi.text.size);
@ -24,12 +27,6 @@ bool Memory::mapCXI(NCSD& ncsd, NCCH& cxi) {
// Round up the size of the CXI stack size to a page (4KB) boundary, as the OS can only allocate memory this way
u32 stackSize = (cxi.stackSize + pageSize - 1) & -pageSize;
if (stackSize > 512_KB) {
// TODO: Figure out the actual max stack size
Helpers::warn("CXI stack size is %08X which seems way too big. Clamping to 512KB", stackSize);
stackSize = 512_KB;
}
// Allocate stack
if (!allocateMainThreadStack(stackSize)) {
// Should be unreachable
@ -49,15 +46,6 @@ bool Memory::mapCXI(NCSD& ncsd, NCCH& cxi) {
return false;
}
const auto opt = findPaddr(totalSize);
if (!opt.has_value()) {
Helpers::panic("Failed to find paddr to map CXI file's code to");
return false;
}
const auto paddr = opt.value();
std::memcpy(&fcram[paddr], &code[0], totalSize); // Copy the 3 segments + BSS to FCRAM
// Map the ROM on the kernel side
u32 textOffset = 0;
u32 textAddr = cxi.text.address;
@ -71,11 +59,20 @@ bool Memory::mapCXI(NCSD& ncsd, NCCH& cxi) {
u32 dataAddr = cxi.data.address;
u32 dataSize = cxi.data.pageCount * pageSize + bssSize; // We're merging the data and BSS segments, as BSS is just pre-initted .data
allocateMemory(textAddr, paddr + textOffset, textSize, true, true, false, true); // Text is R-X
allocateMemory(rodataAddr, paddr + rodataOffset, rodataSize, true, true, false, false); // Rodata is R--
allocateMemory(dataAddr, paddr + dataOffset, dataSize, true, true, true, false); // Data+BSS is RW-
// TODO: base this off the exheader
auto region = FcramRegion::App;
ncsd.entrypoint = textAddr;
allocMemory(textAddr, cxi.text.pageCount, region, true, false, true, MemoryState::Code);
allocMemory(rodataAddr, cxi.rodata.pageCount, region, true, false, false, MemoryState::Code);
allocMemory(dataAddr, cxi.data.pageCount, region, true, true, false, MemoryState::Private);
allocMemory(dataAddr + (cxi.data.pageCount << 12), bssSize >> 12, region, true, true, false, MemoryState::Private);
// Copy .code file to FCRAM
copyToVaddr(textAddr, code.data(), textSize);
copyToVaddr(rodataAddr, code.data() + textSize, rodataSize);
copyToVaddr(dataAddr, code.data() + textSize + rodataSize, cxi.data.pageCount << 12);
ncsd.entrypoint = cxi.text.address;
// Back the IOFile for accessing the ROM, as well as the ROM's CXI partition, in the memory class.
CXIFile = ncsd.file;

View file

@ -6,6 +6,7 @@
#include <ctime>
#include "config_mem.hpp"
#include "kernel/fcram.hpp"
#include "resource_limits.hpp"
#include "services/fonts.hpp"
#include "services/ptm.hpp"
@ -14,38 +15,33 @@ CMRC_DECLARE(ConsoleFonts);
using namespace KernelMemoryTypes;
Memory::Memory(u64& cpuTicks, const EmulatorConfig& config) : cpuTicks(cpuTicks), config(config) {
Memory::Memory(KFcram& fcramManager, u64& cpuTicks, const EmulatorConfig& config) : fcramManager(fcramManager), cpuTicks(cpuTicks), config(config) {
fcram = new uint8_t[FCRAM_SIZE]();
readTable.resize(totalPageCount, 0);
writeTable.resize(totalPageCount, 0);
memoryInfo.reserve(32); // Pre-allocate some room for memory allocation info to avoid dynamic allocs
paddrTable.resize(totalPageCount, 0);
}
void Memory::reset() {
// Unallocate all memory
// Mark the entire process address space as free
constexpr static int MAX_USER_PAGES = 0x40000000 >> 12;
memoryInfo.clear();
usedFCRAMPages.reset();
usedUserMemory = u32(0_MB);
usedSystemMemory = u32(0_MB);
memoryInfo.push_back(MemoryInfo(0, MAX_USER_PAGES, 0, KernelMemoryTypes::Free));
// TODO: remove this, only needed to make the subsequent allocations work for now
fcramManager.reset(FCRAM_SIZE, FCRAM_APPLICATION_SIZE, FCRAM_SYSTEM_SIZE, FCRAM_BASE_SIZE);
for (u32 i = 0; i < totalPageCount; i++) {
readTable[i] = 0;
writeTable[i] = 0;
paddrTable[i] = 0;
}
// Map (32 * 4) KB of FCRAM before the stack for the TLS of each thread
std::optional<u32> tlsBaseOpt = findPaddr(32 * 4_KB);
if (!tlsBaseOpt.has_value()) { // Should be unreachable but still good to have
Helpers::panic("Failed to allocate memory for thread-local storage");
}
u32 basePaddrForTLS = tlsBaseOpt.value();
for (u32 i = 0; i < appResourceLimits.maxThreads; i++) {
u32 vaddr = VirtualAddrs::TLSBase + i * VirtualAddrs::TLSSize;
allocateMemory(vaddr, basePaddrForTLS, VirtualAddrs::TLSSize, true);
basePaddrForTLS += VirtualAddrs::TLSSize;
}
// Allocate 512 bytes of TLS for each thread. Since the smallest allocatable unit is 4 KB, that means allocating one page for every 8 threads
// Note that TLS is always allocated in the Base region
s32 tlsPages = (appResourceLimits.maxThreads + 7) >> 3;
allocMemory(VirtualAddrs::TLSBase, tlsPages, FcramRegion::Base, true, true, false, MemoryState::Locked);
// Initialize shared memory blocks and reserve memory for them
for (auto& e : sharedMemBlocks) {
@ -56,19 +52,20 @@ void Memory::reset() {
}
e.mapped = false;
e.paddr = allocateSysMemory(e.size);
FcramBlockList memBlock;
fcramManager.alloc(memBlock, e.size >> 12, FcramRegion::Sys, false);
e.paddr = memBlock.begin()->paddr;
}
// Map DSP RAM as R/W at [0x1FF00000, 0x1FF7FFFF]
constexpr u32 dspRamPages = DSP_RAM_SIZE / pageSize; // Number of DSP RAM pages
constexpr u32 initialPage = VirtualAddrs::DSPMemStart / pageSize; // First page of DSP RAM in the virtual address space
for (u32 i = 0; i < dspRamPages; i++) {
auto pointer = uintptr_t(&dspRam[i * pageSize]);
u32 vaddr = VirtualAddrs::DSPMemStart;
u32 paddr = PhysicalAddrs::DSP_RAM;
readTable[i + initialPage] = pointer;
writeTable[i + initialPage] = pointer;
}
Operation op{ .newState = MemoryState::Static, .r = true, .w = true, .changeState = true, .changePerms = true };
changeMemoryState(vaddr, dspRamPages, op);
mapPhysicalMemory(vaddr, paddr, dspRamPages, true, true, false);
// Later adjusted based on ROM header when possible
region = Regions::USA;
@ -76,14 +73,9 @@ void Memory::reset() {
bool Memory::allocateMainThreadStack(u32 size) {
// Map stack pages as R/W
std::optional<u32> basePaddr = findPaddr(size);
if (!basePaddr.has_value()) { // Should also be unreachable but still good to have
return false;
}
// TODO: get the region from the exheader
const u32 stackBottom = VirtualAddrs::StackTop - size;
std::optional<u32> result = allocateMemory(stackBottom, basePaddr.value(), size, true); // Should never be nullopt
return result.has_value();
return allocMemory(stackBottom, size >> 12, FcramRegion::App, true, true, false, MemoryState::Locked);
}
u8 Memory::read8(u32 vaddr) {
@ -296,149 +288,235 @@ std::string Memory::readString(u32 address, u32 maxSize) {
// thanks to the New 3DS having more FCRAM
u32 Memory::getLinearHeapVaddr() { return (kernelVersion < 0x22C) ? VirtualAddrs::LinearHeapStartOld : VirtualAddrs::LinearHeapStartNew; }
std::optional<u32> Memory::allocateMemory(u32 vaddr, u32 paddr, u32 size, bool linear, bool r, bool w, bool x, bool adjustAddrs, bool isMap) {
// Kernel-allocated memory & size must always be aligned to a page boundary
// Additionally assert we don't OoM and that we don't try to allocate physical FCRAM past what's available to userland
// If we're mapping there's no fear of OoM, because we're not really allocating memory, just binding vaddrs to specific paddrs
assert(isAligned(vaddr) && isAligned(paddr) && isAligned(size));
assert(size <= FCRAM_APPLICATION_SIZE || isMap);
assert(usedUserMemory + size <= FCRAM_APPLICATION_SIZE || isMap);
assert(paddr + size <= FCRAM_APPLICATION_SIZE || isMap);
void Memory::changeMemoryState(u32 vaddr, s32 pages, const Operation& op) {
assert(!(vaddr & 0xFFF));
// Amount of available user FCRAM pages and FCRAM pages to allocate respectively
const u32 availablePageCount = (FCRAM_APPLICATION_SIZE - usedUserMemory) / pageSize;
const u32 neededPageCount = size / pageSize;
if (!op.changePerms && !op.changeState) Helpers::panic("Invalid op passed to changeMemoryState!");
assert(availablePageCount >= neededPageCount || isMap);
bool blockFound = false;
// If the paddr is 0, that means we need to select our own
// TODO: Fix. This method always tries to allocate blocks linearly.
// However, if the allocation is non-linear, the panic will trigger when it shouldn't.
// Non-linear allocation needs special handling
if (paddr == 0 && adjustAddrs) {
std::optional<u32> newPaddr = findPaddr(size);
if (!newPaddr.has_value()) {
Helpers::panic("Failed to find paddr");
for (auto it = memoryInfo.begin(); it != memoryInfo.end(); it++) {
// Find the block that the memory region is located in
u32 blockStart = it->baseAddr;
u32 blockEnd = it->end();
u32 reqStart = vaddr;
u32 reqEnd = vaddr + (pages << 12);
if (!(reqStart >= blockStart && reqEnd <= blockEnd)) continue;
// Now that the block has been found, fill it with the necessary info
auto oldState = it->state;
u32 oldPerms = it->perms;
it->baseAddr = reqStart;
it->pages = pages;
if (op.changePerms) it->perms = (op.r ? PERMISSION_R : 0) | (op.w ? PERMISSION_W : 0) | (op.x ? PERMISSION_X : 0);
if (op.changeState) it->state = op.newState;
// If the requested memory region is smaller than the block found, the block must be split
if (blockStart < reqStart) {
MemoryInfo startBlock(blockStart, (reqStart - blockStart) >> 12, oldPerms, oldState);
memoryInfo.insert(it, startBlock);
}
paddr = newPaddr.value();
assert(paddr + size <= FCRAM_APPLICATION_SIZE || isMap);
}
// If the vaddr is 0 that means we need to select our own
// Depending on whether our mapping should be linear or not we allocate from one of the 2 typical heap spaces
// We don't plan on implementing freeing any time soon, so we can pick added userUserMemory to the vaddr base to
// Get the full vaddr.
// TODO: Fix this
if (vaddr == 0 && adjustAddrs) {
// Linear memory needs to be allocated in a way where you can easily get the paddr by subtracting the linear heap base
// In order to be able to easily send data to hardware like the GPU
if (linear) {
vaddr = getLinearHeapVaddr() + paddr;
} else {
vaddr = usedUserMemory + VirtualAddrs::NormalHeapStart;
}
}
if (!isMap) {
usedUserMemory += size;
}
// Do linear mapping
u32 virtualPage = vaddr >> pageShift;
u32 physPage = paddr >> pageShift; // TODO: Special handle when non-linear mapping is necessary
for (u32 i = 0; i < neededPageCount; i++) {
if (r) {
readTable[virtualPage] = uintptr_t(&fcram[physPage * pageSize]);
}
if (w) {
writeTable[virtualPage] = uintptr_t(&fcram[physPage * pageSize]);
if (reqEnd < blockEnd) {
auto itAfter = std::next(it);
MemoryInfo endBlock(reqEnd, (blockEnd - reqEnd) >> 12, oldPerms, oldState);
memoryInfo.insert(itAfter, endBlock);
}
// Mark FCRAM page as allocated and go on
usedFCRAMPages[physPage] = true;
virtualPage++;
physPage++;
blockFound = true;
break;
}
// Back up the info for this allocation in our memoryInfo vector
u32 perms = (r ? PERMISSION_R : 0) | (w ? PERMISSION_W : 0) | (x ? PERMISSION_X : 0);
memoryInfo.push_back(std::move(MemoryInfo(vaddr, size, perms, KernelMemoryTypes::Reserved)));
if (!blockFound) Helpers::panic("Unable to find block in changeMemoryState!");
return vaddr;
// Merge all blocks with the same state and permissions
for (auto it = memoryInfo.begin(); it != memoryInfo.end();) {
auto next = std::next(it);
if (next == memoryInfo.end()) break;
if (it->state != next->state || it->perms != next->perms) {
it++;
continue;
}
next->baseAddr = it->baseAddr;
next->pages += it->pages;
it = memoryInfo.erase(it);
}
}
// Find a paddr which we can use for allocating "size" bytes
std::optional<u32> Memory::findPaddr(u32 size) {
assert(isAligned(size));
const u32 neededPages = size / pageSize;
void Memory::queryPhysicalBlocks(FcramBlockList& outList, u32 vaddr, s32 pages) {
s32 srcPages = pages;
for (auto& alloc : memoryInfo) {
u32 blockStart = alloc.baseAddr;
u32 blockEnd = alloc.end();
// The FCRAM page we're testing to see if it's appropriate to use
u32 candidatePage = 0;
// The number of linear available pages we could find starting from this candidate page.
// If this ends up >= than neededPages then the paddr is good (ie we can use the candidate page as a base address)
u32 counter = 0;
if (!(vaddr >= blockStart && vaddr < blockEnd)) continue;
for (u32 i = 0; i < FCRAM_APPLICATION_PAGE_COUNT; i++) {
if (usedFCRAMPages[i]) { // Page is occupied already, go to new candidate
candidatePage = i + 1;
counter = 0;
} else { // The paddr we're testing has 1 more free page
counter++;
// Check if there's enough free memory to use this page
// We use == instead of >= because some software does 0-byte allocations
if (counter >= neededPages) {
return candidatePage * pageSize;
}
}
s32 blockPaddr = paddrTable[vaddr >> 12];
s32 blockPages = alloc.pages - ((vaddr - blockStart) >> 12);
blockPages = std::min(srcPages, blockPages);
FcramBlock physicalBlock(blockPaddr, blockPages);
outList.push_back(physicalBlock);
vaddr += blockPages << 12;
srcPages -= blockPages;
if (srcPages == 0) break;
}
// Couldn't find any page :(
return std::nullopt;
if (srcPages != 0) Helpers::panic("Unable to find virtual pages to map!");
}
u32 Memory::allocateSysMemory(u32 size) {
// Should never be triggered, only here as a sanity check
if (!isAligned(size)) {
Helpers::panic("Memory::allocateSysMemory: Size is not page aligned (val = %08X)", size);
void Memory::mapPhysicalMemory(u32 vaddr, u32 paddr, s32 pages, bool r, bool w, bool x) {
assert(!(vaddr & 0xFFF));
assert(!(paddr & 0xFFF));
// TODO: make this a separate function
u8* hostPtr = nullptr;
if (paddr < FCRAM_SIZE) {
hostPtr = fcram + paddr; // FIXME
}
else if (paddr >= VirtualAddrs::DSPMemStart && paddr < VirtualAddrs::DSPMemStart + DSP_RAM_SIZE) {
hostPtr = dspRam + (paddr - VirtualAddrs::DSPMemStart);
}
// We use a pretty dumb allocator for OS memory since this is not really accessible to the app and is only used internally
// It works by just allocating memory linearly, starting from index 0 of OS memory and going up
// This should also be unreachable in practice and exists as a sanity check
if (size > remainingSysFCRAM()) {
Helpers::panic("Memory::allocateSysMemory: Overflowed OS FCRAM");
for (int i = 0; i < pages; i++) {
u32 index = (vaddr >> 12) + i;
paddrTable[index] = paddr + (i << 12);
if (r) readTable[index] = (uintptr_t)(hostPtr + (i << 12));
else readTable[index] = 0;
if (w) writeTable[index] = (uintptr_t)(hostPtr + (i << 12));
else writeTable[index] = 0;
}
const u32 pageCount = size / pageSize; // Number of pages that will be used up
const u32 startIndex = sysFCRAMIndex() + usedSystemMemory; // Starting FCRAM index
const u32 startingPage = startIndex / pageSize;
for (u32 i = 0; i < pageCount; i++) {
if (usedFCRAMPages[startingPage + i]) // Also a theoretically unreachable panic for safety
Helpers::panic("Memory::reserveMemory: Trying to reserve already reserved memory");
usedFCRAMPages[startingPage + i] = true;
}
usedSystemMemory += size;
return startIndex;
}
// The way I understand how the kernel's QueryMemory is supposed to work is that you give it a vaddr
// And the kernel looks up the memory allocations it's performed, finds which one it belongs in and returns its info?
// TODO: Verify this
MemoryInfo Memory::queryMemory(u32 vaddr) {
void Memory::unmapPhysicalMemory(u32 vaddr, u32 paddr, s32 pages) {
for (int i = 0; i < pages; i++) {
u32 index = (vaddr >> 12) + i;
paddrTable[index] = 0;
readTable[index] = 0;
writeTable[index] = 0;
}
}
bool Memory::allocMemory(u32 vaddr, s32 pages, FcramRegion region, bool r, bool w, bool x, MemoryState state) {
auto res = testMemoryState(vaddr, pages, MemoryState::Free);
if (res.isFailure()) return false;
FcramBlockList memList;
fcramManager.alloc(memList, pages, region, false);
for (auto it = memList.begin(); it != memList.end(); it++) {
Operation op{ .newState = state, .r = r, .w = w, .x = x, .changeState = true, .changePerms = true };
changeMemoryState(vaddr, it->pages, op);
mapPhysicalMemory(vaddr, it->paddr, it->pages, r, w, x);
vaddr += it->pages << 12;
}
return true;
}
bool Memory::allocMemoryLinear(u32& outVaddr, u32 inVaddr, s32 pages, FcramRegion region, bool r, bool w, bool x) {
if (inVaddr) Helpers::panic("inVaddr specified for linear allocation!");
FcramBlockList memList;
fcramManager.alloc(memList, pages, region, true);
u32 paddr = memList.begin()->paddr;
u32 vaddr = getLinearHeapVaddr() + paddr;
auto res = testMemoryState(vaddr, pages, MemoryState::Free);
if (res.isFailure()) Helpers::panic("Unable to map linear allocation (vaddr:%08X pages:%08X)", vaddr, pages);
Operation op{ .newState = MemoryState::Continuous, .r = r, .w = w, .x = x, .changeState = true, .changePerms = true };
changeMemoryState(vaddr, pages, op);
mapPhysicalMemory(vaddr, paddr, pages, r, w, x);
outVaddr = vaddr;
return true;
}
bool Memory::mapVirtualMemory(u32 dstVaddr, u32 srcVaddr, s32 pages, bool r, bool w, bool x, MemoryState oldDstState, MemoryState oldSrcState,
MemoryState newDstState, MemoryState newSrcState) {
// Check that the regions have the specified state
// TODO: check src perms
auto res = testMemoryState(srcVaddr, pages, oldSrcState);
if (res.isFailure()) return false;
res = testMemoryState(dstVaddr, pages, oldDstState);
if (res.isFailure()) return false;
// Change the virtual memory state for both regions
Operation srcOp{ .newState = newSrcState, .changeState = true };
changeMemoryState(srcVaddr, pages, srcOp);
Operation dstOp{ .newState = newDstState, .r = r, .w = w, .x = x, .changeState = true, .changePerms = true };
changeMemoryState(dstVaddr, pages, dstOp);
// Get a list of physical blocks in the source region
FcramBlockList physicalList;
queryPhysicalBlocks(physicalList, srcVaddr, pages);
// Map or unmap each physical block
for (auto& block : physicalList) {
if (newDstState == MemoryState::Free) unmapPhysicalMemory(dstVaddr, block.paddr, block.pages);
else mapPhysicalMemory(dstVaddr, block.paddr, block.pages, r, w, x);
dstVaddr += block.pages << 12;
}
return true;
}
void Memory::changePermissions(u32 vaddr, s32 pages, bool r, bool w, bool x) {
Operation op{ .r = r, .w = w, .x = x, .changePerms = true };
changeMemoryState(vaddr, pages, op);
// Now that permissions have been changed, update the corresponding host tables
FcramBlockList physicalList;
queryPhysicalBlocks(physicalList, vaddr, pages);
for (auto& block : physicalList) {
mapPhysicalMemory(vaddr, block.paddr, block.pages, r, w, x);
vaddr += block.pages;
}
}
Result::HorizonResult Memory::queryMemory(MemoryInfo& out, u32 vaddr) {
// Check each allocation
for (auto& alloc : memoryInfo) {
// Check if the memory address belongs in this allocation and return the info if so
if (vaddr >= alloc.baseAddr && vaddr < alloc.end()) {
return alloc;
out = alloc;
return Result::Success;
}
}
// Otherwise, if this vaddr was never allocated
// TODO: I think this is meant to return how much memory starting here is free as the size?
return MemoryInfo(vaddr, pageSize, 0, KernelMemoryTypes::Free);
// Official kernel just returns an error here
Helpers::panic("Failed to find block in QueryMemory!");
return Result::FailurePlaceholder;
}
Result::HorizonResult Memory::testMemoryState(u32 vaddr, s32 pages, MemoryState desiredState) {
for (auto& alloc : memoryInfo) {
// Don't bother checking if we're to the left of the requested region
if (vaddr >= alloc.end()) continue;
if (alloc.state != desiredState) return Result::FailurePlaceholder; // TODO: error for state mismatch
// If the end of this block comes after the end of the requested range with no errors, it's a success
if (alloc.end() >= vaddr + (pages << 12)) return Result::Success;
}
// TODO: error for when address is outside of userland
return Result::FailurePlaceholder;
}
void Memory::copyToVaddr(u32 dstVaddr, const u8* srcHost, s32 size) {
// TODO: check for noncontiguous allocations
u8* dstHost = (u8*)readTable[dstVaddr >> 12] + (dstVaddr & 0xFFF);
memcpy(dstHost, srcHost, size);
}
u8* Memory::mapSharedMemory(Handle handle, u32 vaddr, u32 myPerms, u32 otherPerms) {
@ -459,13 +537,11 @@ u8* Memory::mapSharedMemory(Handle handle, u32 vaddr, u32 myPerms, u32 otherPerm
bool w = myPerms & 0b010;
bool x = myPerms & 0b100;
const auto result = allocateMemory(vaddr, paddr, size, true, r, w, x, false, true);
e.mapped = true;
if (!result.has_value()) {
Helpers::panic("Memory::mapSharedMemory: Failed to map shared memory block");
return nullptr;
}
Operation op{ .newState = MemoryState::Shared, .r = r, .w = x, .x = x, .changeState = true, .changePerms = true };
changeMemoryState(vaddr, size >> 12, op);
mapPhysicalMemory(vaddr, paddr, size >> 12, r, w, x);
e.mapped = true;
return &fcram[paddr];
}
}
@ -475,24 +551,6 @@ u8* Memory::mapSharedMemory(Handle handle, u32 vaddr, u32 myPerms, u32 otherPerm
return nullptr;
}
void Memory::mirrorMapping(u32 destAddress, u32 sourceAddress, u32 size) {
// Should theoretically be unreachable, only here for safety purposes
assert(isAligned(destAddress) && isAligned(sourceAddress) && isAligned(size));
const u32 pageCount = size / pageSize; // How many pages we need to mirror
for (u32 i = 0; i < pageCount; i++) {
// Redo the shift here to "properly" handle wrapping around the address space instead of reading OoB
const u32 sourcePage = sourceAddress / pageSize;
const u32 destPage = destAddress / pageSize;
readTable[destPage] = readTable[sourcePage];
writeTable[destPage] = writeTable[sourcePage];
sourceAddress += pageSize;
destAddress += pageSize;
}
}
// Get the number of ms since Jan 1 1900
u64 Memory::timeSince3DSEpoch() {
using namespace std::chrono;

View file

@ -23,7 +23,8 @@ namespace CROHeader {
NameOffset = 0x084,
NextCRO = 0x088,
PrevCRO = 0x08C,
FixedSize = 0x98,
FileSize = 0x090,
FixedSize = 0x098,
OnUnresolved = 0x0AC,
CodeOffset = 0x0B0,
DataOffset = 0x0B8,
@ -146,6 +147,8 @@ static const std::string CRO_MAGIC("CRO0");
static const std::string CRO_MAGIC_FIXED("FIXD");
static const std::string CRR_MAGIC("CRR0");
using namespace KernelMemoryTypes;
class CRO {
Memory &mem;
@ -164,25 +167,17 @@ class CRO {
return mem.readString(moduleName.offset, moduleName.size);
}
u32 getNextCRO() {
return mem.read32(croPointer + CROHeader::NextCRO);
}
u32 getPrevCRO() {
return mem.read32(croPointer + CROHeader::PrevCRO);
}
u32 getNextCRO() { return mem.read32(croPointer + CROHeader::NextCRO); }
u32 getFixedSize() {
return mem.read32(croPointer + CROHeader::FixedSize);
}
u32 getPrevCRO() { return mem.read32(croPointer + CROHeader::PrevCRO); }
void setNextCRO(u32 nextCRO) {
mem.write32(croPointer + CROHeader::NextCRO, nextCRO);
}
u32 getFixedSize() { return mem.read32(croPointer + CROHeader::FixedSize); }
void setPrevCRO(u32 prevCRO) {
mem.write32(croPointer + CROHeader::PrevCRO, prevCRO);
}
void setNextCRO(u32 nextCRO) { mem.write32(croPointer + CROHeader::NextCRO, nextCRO); }
void setPrevCRO(u32 prevCRO) { mem.write32(croPointer + CROHeader::PrevCRO, prevCRO); }
u32 getSize() { return mem.read32(croPointer + CROHeader::FileSize); }
void write32(u32 addr, u32 value) {
// Note: some games export symbols to the static module, which doesn't contain any segments.
@ -190,11 +185,11 @@ class CRO {
// can't be accessed via mem.write32()
auto writePointer = mem.getWritePointer(addr);
if (writePointer) {
*(u32*)writePointer = value;
*(u32 *)writePointer = value;
} else {
auto readPointer = mem.getReadPointer(addr);
if (readPointer) {
*(u32*)readPointer = value;
*(u32 *)readPointer = value;
} else {
Helpers::panic("LDR_RO write to invalid address = %X\n", addr);
}
@ -228,11 +223,9 @@ class CRO {
return entryOffset + offset;
}
u32 getOnUnresolvedAddr() {
return getSegmentAddr(mem.read32(croPointer + CROHeader::OnUnresolved));
}
u32 getOnUnresolvedAddr() { return getSegmentAddr(mem.read32(croPointer + CROHeader::OnUnresolved)); }
u32 getNamedExportSymbolAddr(const std::string& symbolName) {
u32 getNamedExportSymbolAddr(const std::string &symbolName) {
// Note: The CRO contains a trie for fast symbol lookup. For simplicity,
// we won't use it and instead look up the symbol in the named export symbol table
@ -446,13 +439,16 @@ class CRO {
const u32 segmentID = mem.read32(segmentTable.offset + 12 * segment + SegmentTable::ID);
switch (segmentID) {
case SegmentTable::SegmentID::DATA:
*oldDataVaddr = segmentOffset + croPointer; oldDataSegmentOffset = segmentOffset; segmentOffset = dataVaddr; break;
*oldDataVaddr = segmentOffset + croPointer;
oldDataSegmentOffset = segmentOffset;
segmentOffset = dataVaddr;
break;
case SegmentTable::SegmentID::BSS: segmentOffset = bssVaddr; break;
case SegmentTable::SegmentID::TEXT:
case SegmentTable::SegmentID::RODATA:
if (segmentOffset != 0) segmentOffset += croPointer; break;
default:
Helpers::panic("Unknown segment ID = %u", segmentID);
if (segmentOffset != 0) segmentOffset += croPointer;
break;
default: Helpers::panic("Unknown segment ID = %u", segmentID);
}
mem.write32(segmentTable.offset + 12 * segment + SegmentTable::Offset, segmentOffset);
@ -473,9 +469,9 @@ class CRO {
case SegmentTable::SegmentID::BSS: segmentOffset = 0; break;
case SegmentTable::SegmentID::TEXT:
case SegmentTable::SegmentID::RODATA:
if (segmentOffset != 0) segmentOffset -= croPointer; break;
default:
Helpers::panic("Unknown segment ID = %u", segmentID);
if (segmentOffset != 0) segmentOffset -= croPointer;
break;
default: Helpers::panic("Unknown segment ID = %u", segmentID);
}
mem.write32(segmentTable.offset + 12 * segment + SegmentTable::Offset, segmentOffset);
@ -639,7 +635,9 @@ class CRO {
u32 relocationOffset = mem.read32(anonymousImportTable.offset + 8 * anonymousImport + AnonymousImportTable::RelocationOffset);
if (relocationOffset != 0) {
mem.write32(anonymousImportTable.offset + 8 * anonymousImport + AnonymousImportTable::RelocationOffset, relocationOffset + croPointer);
mem.write32(
anonymousImportTable.offset + 8 * anonymousImport + AnonymousImportTable::RelocationOffset, relocationOffset + croPointer
);
}
}
@ -653,7 +651,9 @@ class CRO {
u32 relocationOffset = mem.read32(anonymousImportTable.offset + 8 * anonymousImport + AnonymousImportTable::RelocationOffset);
if (relocationOffset != 0) {
mem.write32(anonymousImportTable.offset + 8 * anonymousImport + AnonymousImportTable::RelocationOffset, relocationOffset - croPointer);
mem.write32(
anonymousImportTable.offset + 8 * anonymousImport + AnonymousImportTable::RelocationOffset, relocationOffset - croPointer
);
}
}
@ -661,7 +661,7 @@ class CRO {
}
bool relocateInternalSymbols(u32 oldDataVaddr) {
const u8* header = (u8*)mem.getReadPointer(croPointer);
const u8 *header = (u8 *)mem.getReadPointer(croPointer);
const CROHeaderEntry relocationPatchTable = getHeaderEntry(CROHeader::RelocationPatchTableOffset);
const CROHeaderEntry segmentTable = getHeaderEntry(CROHeader::SegmentTableOffset);
@ -1198,9 +1198,7 @@ class CRO {
}
};
void LDRService::reset() {
loadedCRS = 0;
}
void LDRService::reset() { loadedCRS = 0; }
void LDRService::handleSyncRequest(u32 messagePointer) {
const u32 command = mem.read32(messagePointer);
@ -1245,7 +1243,13 @@ void LDRService::initialize(u32 messagePointer) {
}
// Map CRO to output address
mem.mirrorMapping(mapVaddr, crsPointer, size);
// TODO: how to handle permissions?
bool succeeded = mem.mapVirtualMemory(
mapVaddr, crsPointer, size >> 12, true, true, true, MemoryState::Free, MemoryState::Private, MemoryState::Locked, MemoryState::AliasCode
);
if (!succeeded) {
Helpers::panic("Failed to map CRS");
}
CRO crs(mem, mapVaddr, false);
@ -1312,7 +1316,9 @@ void LDRService::loadCRO(u32 messagePointer, bool isNew) {
const u32 fixLevel = mem.read32(messagePointer + 40);
const Handle process = mem.read32(messagePointer + 52);
log("LDR_RO::LoadCRO (isNew = %d, buffer = %08X, vaddr = %08X, size = %08X, .data vaddr = %08X, .data size = %08X, .bss vaddr = %08X, .bss size = %08X, auto link = %d, fix level = %X, process = %X)\n", isNew, croPointer, mapVaddr, size, dataVaddr, dataSize, bssVaddr, bssSize, autoLink, fixLevel, process);
log("LDR_RO::LoadCRO (isNew = %d, buffer = %08X, vaddr = %08X, size = %08X, .data vaddr = %08X, .data size = %08X, .bss vaddr = %08X, .bss size "
"= %08X, auto link = %d, fix level = %X, process = %X)\n",
isNew, croPointer, mapVaddr, size, dataVaddr, dataSize, bssVaddr, bssSize, autoLink, fixLevel, process);
// Sanity checks
if (size < CRO_HEADER_SIZE) {
@ -1332,7 +1338,13 @@ void LDRService::loadCRO(u32 messagePointer, bool isNew) {
}
// Map CRO to output address
mem.mirrorMapping(mapVaddr, croPointer, size);
// TODO: how to handle permissions?
bool succeeded = mem.mapVirtualMemory(
mapVaddr, croPointer, size >> 12, true, true, true, MemoryState::Free, MemoryState::Private, MemoryState::Locked, MemoryState::AliasCode
);
if (!succeeded) {
Helpers::panic("Failed to map CRO");
}
CRO cro(mem, mapVaddr, true);
@ -1392,7 +1404,17 @@ void LDRService::unloadCRO(u32 messagePointer) {
Helpers::panic("Failed to unrebase CRO");
}
u32 size = cro.getSize();
bool succeeded = mem.mapVirtualMemory(
mapVaddr, croPointer, size >> 12, false, false, false, MemoryState::Locked, MemoryState::AliasCode, MemoryState::Free, MemoryState::Private
);
if (!succeeded) {
Helpers::panic("Failed to unmap CRO");
}
kernel.clearInstructionCacheRange(mapVaddr, cro.getFixedSize());
mem.write32(messagePointer, IPC::responseHeader(0x5, 1, 0));
mem.write32(messagePointer + 4, Result::Success);
}

View file

@ -20,8 +20,8 @@ __declspec(dllexport) DWORD AmdPowerXpressRequestHighPerformance = 1;
Emulator::Emulator()
: config(getConfigPath()), kernel(cpu, memory, gpu, config, lua), cpu(memory, kernel, *this), gpu(memory, config),
memory(cpu.getTicksRef(), config), cheats(memory, kernel.getServiceManager().getHID()), audioDevice(config.audioDeviceConfig), lua(*this),
running(false)
memory(kernel.getFcramManager(), cpu.getTicksRef(), config), cheats(memory, kernel.getServiceManager().getHID()),
audioDevice(config.audioDeviceConfig), lua(*this), running(false)
#ifdef PANDA3DS_ENABLE_HTTP_SERVER
,
httpServer(this)
@ -159,20 +159,21 @@ void Emulator::pollScheduler() {
scheduler.updateNextTimestamp();
switch (eventType) {
case Scheduler::EventType::VBlank: [[likely]] {
// Signal that we've reached the end of a frame
frameDone = true;
lua.signalEvent(LuaEvent::Frame);
case Scheduler::EventType::VBlank:
[[likely]] {
// Signal that we've reached the end of a frame
frameDone = true;
lua.signalEvent(LuaEvent::Frame);
// Send VBlank interrupts
ServiceManager& srv = kernel.getServiceManager();
srv.sendGPUInterrupt(GPUInterrupt::VBlank0);
srv.sendGPUInterrupt(GPUInterrupt::VBlank1);
// Send VBlank interrupts
ServiceManager& srv = kernel.getServiceManager();
srv.sendGPUInterrupt(GPUInterrupt::VBlank0);
srv.sendGPUInterrupt(GPUInterrupt::VBlank1);
// Queue next VBlank event
scheduler.addEvent(Scheduler::EventType::VBlank, time + CPU::ticksPerSec / 60);
break;
}
// Queue next VBlank event
scheduler.addEvent(Scheduler::EventType::VBlank, time + CPU::ticksPerSec / 60);
break;
}
case Scheduler::EventType::UpdateTimers: kernel.pollTimers(); break;
case Scheduler::EventType::RunDSP: {
@ -352,8 +353,7 @@ bool Emulator::loadELF(std::ifstream& file) {
std::span<u8> Emulator::getSMDH() {
switch (romType) {
case ROMType::NCSD:
case ROMType::CXI:
return memory.getCXI()->smdh;
case ROMType::CXI: return memory.getCXI()->smdh;
default: {
return std::span<u8>();
}
@ -385,7 +385,7 @@ static void dumpRomFSNode(const RomFS::RomFSNode& node, const char* romFSBase, c
for (auto& directory : node.directories) {
const auto newPath = path / directory->name;
// Create the directory for the new folder
std::error_code ec;
std::filesystem::create_directories(newPath, ec);
@ -464,7 +464,7 @@ void Emulator::reloadSettings() {
loadRenderdoc();
}
gpu.getRenderer()->setHashTextures(config.hashTextures);
gpu.getRenderer()->setHashTextures(config.hashTextures);
#ifdef PANDA3DS_ENABLE_DISCORD_RPC
// Reload RPC setting if we're compiling with RPC support