diff --git a/.gitignore b/.gitignore index 54b77b4ef..c8c423006 100644 --- a/.gitignore +++ b/.gitignore @@ -38,6 +38,9 @@ *.x86_64 *.hex +# Deko3d shaders +*.dksh + # Switch Executables *.nso *.nro diff --git a/Makefile b/Makefile index 4ef600202..759ee1d77 100644 --- a/Makefile +++ b/Makefile @@ -65,6 +65,7 @@ dist-no-debug: all mkdir -p atmosphere-$(AMSVER)/atmosphere/contents/0100000000000037 mkdir -p atmosphere-$(AMSVER)/atmosphere/contents/010000000000003C mkdir -p atmosphere-$(AMSVER)/atmosphere/contents/0100000000000015 + mkdir -p atmosphere-$(AMSVER)/atmosphere/contents/0100000000000042 mkdir -p atmosphere-$(AMSVER)/atmosphere/fatal_errors mkdir -p atmosphere-$(AMSVER)/atmosphere/config_templates mkdir -p atmosphere-$(AMSVER)/atmosphere/config @@ -93,11 +94,13 @@ dist-no-debug: all cp stratosphere/ro/ro.nsp atmosphere-$(AMSVER)/atmosphere/contents/0100000000000037/exefs.nsp cp stratosphere/jpegdec/jpegdec.nsp atmosphere-$(AMSVER)/atmosphere/contents/010000000000003C/exefs.nsp cp stratosphere/lm/lm.nsp atmosphere-$(AMSVER)/atmosphere/contents/0100000000000015/exefs.nsp + cp stratosphere/pgl/pgl.nsp atmosphere-$(AMSVER)/atmosphere/contents/0100000000000042/exefs.nsp mkdir -p atmosphere-$(AMSVER)/atmosphere/contents/0100000000000032/flags touch atmosphere-$(AMSVER)/atmosphere/contents/0100000000000032/flags/boot2.flag mkdir -p atmosphere-$(AMSVER)/atmosphere/contents/0100000000000037/flags touch atmosphere-$(AMSVER)/atmosphere/contents/0100000000000037/flags/boot2.flag cp troposphere/reboot_to_payload/reboot_to_payload.nro atmosphere-$(AMSVER)/switch/reboot_to_payload.nro + cp troposphere/daybreak/daybreak.nro atmosphere-$(AMSVER)/switch/daybreak.nro cd atmosphere-$(AMSVER); zip -r ../atmosphere-$(AMSVER).zip ./*; cd ../; rm -r atmosphere-$(AMSVER) mkdir out @@ -144,6 +147,8 @@ dist: dist-no-debug cp stratosphere/erpt/erpt.elf atmosphere-$(AMSVER)-debug/erpt.elf cp stratosphere/jpegdec/jpegdec.elf atmosphere-$(AMSVER)-debug/jpegdec.elf cp stratosphere/lm/lm.elf atmosphere-$(AMSVER)-debug/lm.elf + cp stratosphere/pgl/pgl.elf atmosphere-$(AMSVER)-debug/pgl.elf + cp troposphere/daybreak/daybreak.elf atmosphere-$(AMSVER)-debug/daybreak.elf cd atmosphere-$(AMSVER)-debug; zip -r ../atmosphere-$(AMSVER)-debug.zip ./*; cd ../; rm -r atmosphere-$(AMSVER)-debug mv atmosphere-$(AMSVER)-debug.zip out/atmosphere-$(AMSVER)-debug.zip diff --git a/docs/changelog.md b/docs/changelog.md index da0287668..8f436a89f 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,4 +1,31 @@ # Changelog +## 0.14.1 ++ An issue was fixed in 0.14.0 that would cause a black screen on boot when the INI1's size was not aligned to 8 bytes. ++ General system stability improvements to enhance the user's experience. +## 0.14.0 ++ An API (`ams:su`) was added to allow homebrew to safely install system upgrades or downgrades. + + This is a re-implementation of the logic that `ns` uses to install gamecard system updates. + + Nintendo (and now atmosphère) uses an installation process that can recover no matter where a failure occurs, which should significantly improve the safety of custom system update installation. ++ Support was added to `exosphère` for running on Mariko hardware. + + **Please note**: Atmosphère still does not support Mariko, and should not be run on Mariko yet. + + Certain stratosphere components do not handle mariko-specific logic fully correctly yet, and may initialize or interact with hardware incorrectly. + + This will be fixed and support will be added over the remainder of the Summer. ++ A homebrew application (`daybreak`) was added that uses the system updater API (with thanks to @Adubbz for both design and implementation). + + `daybreak` is included with atmosphère, and functions as a safer/more accurate equivalent to e.g. ChoiDujourNX. + + Upgrades/downgrades can be installed from a folder containing the update NCAs on the SD card. + + Because the update logic functions identically to Nintendo's, `daybreak` will be safe to use on Mariko when the rest of atmosphère has support. + + **Please note**: Daybreak requires that meta (.cnmt) NCAs have the correct extension `.cnmt.nca`. + + This is because gamecard system update logic uses extension to determine whether to mount the content. + + [Several](https://gist.github.com/HookedBehemoth/df36b5970e1c5b1b512ec7bdd9043c6e) [scripts](https://gist.github.com/antiKk/279966c27fdfd9c7fe63b4ae410f89c4) have been made by community members to automatically rename folders with incorrect extensions. ++ A bug was fixed that would cause file-based emummc to throw an error (showing a hexdump) on boot. + + Major thanks to @hexkyz for tracking down and resolving this. ++ A number of minor issues were resolved, including: + + fusee now prints information to the screen when an error occurs, instead of getting stuck trying to initialize the display. + + A race condition in Horizon was worked around that could prevent boot under certain circumstances. + + A bug was fixed that would cause atmosphère modules to open ten copies of certain filesystems instead of one. + + This could cause object exhaustion under certain circumstances. ++ For those interested in atmosphère's future development plans, the project's [roadmap](https://github.com/Atmosphere-NX/Atmosphere/blob/ac9832c5ce7be5832f6d29f6564a9c03e7efd22f/docs/roadmap.md) was updated. ++ General system stability improvements to enhance the user's experience. ## 0.13.0 + `exosphère`, atmosphère's secure monitor re-implementation, was completely re-written. + `exosphère` was the first component authored for the project in early 2018. It is written in C, and in a style very different from the rest of atmosphère's code. diff --git a/docs/roadmap.md b/docs/roadmap.md index 94b6cba6c..6c6d4e274 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -1,15 +1,10 @@ # Planned Features atmosphère has a number of features that are either works-in-progress or planned. Please note that while time-estimates are given, they are loose, and things may be completed sooner or later than advertised. -The following descriptions were last updated on June 15th, 2020. - -## system updater api -* **Description**: A planned extension api for stratosphere (tenatively `ams:su`), this will provide an interface for homebrew to safely install system upgrades or downgrades. This will allow for much more easily transitioning safely between different versions of the operating system. -* **Development Status**: Backend/implementation completed; final stages (user-facing ipc api) to be written by SciresM. -* **Estimated Time**: June 2020 +The following descriptions were last updated on July 7th, 2020. ## ams-on-mariko -* **Description**: Atmosphere cannot run as-is on Mariko hardware. A large number of changes are needed in many components. Although exosphere's rewrite laid most groundwork on the secure monitor side, there is still work to do there -- and additional work is needed on the bootloader and stratosphere sides as well. Mariko support will also require further design thought; atmosphere's debugging design heavily relies on reboot-to-payload and (more generally) the ability to perform warmboot bootrom hax at will. This is not possible on Mariko, and will require a new design/software support for whatever solution is chosen. +* **Description**: Atmosphere cannot run as-is on Mariko hardware. A large number of changes are needed in many components. Although secure monitor support is complete in exosphere, additional work is needed on the bootloader and stratosphere sides as well. Mariko support will also require further design thought; atmosphere's debugging design heavily relies on reboot-to-payload and (more generally) the ability to perform warmboot bootrom hax at will. This is not possible on Mariko, and will require a new design/software support for whatever solution is chosen. * **Development Status**: Planned. * **Estimated Time**: Summer 2020 @@ -20,7 +15,7 @@ The following descriptions were last updated on June 15th, 2020. ## mesosphere * **Description**: mesosphère is a reimplementation of the Horizon operating system's Kernel. It aims to provide an open-source reference for Nintendo's code. -* **Development Status**: Under semi-active development by SciresM; temporarily on pause while the System Updater API is completed. +* **Development Status**: Under active development by SciresM. * **Estimated Time**: Mid-to-Late 2020 ## tma reimplementation @@ -47,3 +42,21 @@ The following descriptions were last updated on June 15th, 2020. * **Description**: General system stability improvements to enhance the user's experience. * **Development Status**: Undergoing active development by all members of the atmosphère team. * **Estimated Time**: June 15th. + +# Completed features + +The following features were previously included under the planned features section and are now complete. + +Please note that this is not an exhaustive list of features present in atmosphère, and only serves to indicate what from the above has been completed. + +## system updater homebrew +* **Description**: A user homebrew making use of the new system updater api, so that users can actually use the new api in practice. +* **Completion Time**: July 2020 + +## system updater api +* **Description**: A planned extension api for stratosphere (tenatively `ams:su`), this will provide an interface for homebrew to safely install system upgrades or downgrades. This will allow for much more easily transitioning safely between different versions of the operating system. +* **Completion Time**: June 2020 + +## exosphere re-write +* **Description**: exosphère, atmosphère's reimplementation of Horizon's Secure Monitor, was the first component authored for the project in early 2018. It is written in C, and in a style very different from the rest of atmosphère's code. In addition, exosphère was written to conform to constraints that no longer apply in an environment where it is not launched from the web browser, and where using a custom firmware image to orchestrate wake-from-sleep is possible. exosphère currently uses all but 1 KB of the space available to it, putting it at risk of breaking as future firmware updates are supported. A re-write will solve these issues. +* **Completion Time**: June 2020 \ No newline at end of file diff --git a/exosphere/program/source/boot/secmon_boot_setup.cpp b/exosphere/program/source/boot/secmon_boot_setup.cpp index 2f7217f25..9452d70fd 100644 --- a/exosphere/program/source/boot/secmon_boot_setup.cpp +++ b/exosphere/program/source/boot/secmon_boot_setup.cpp @@ -48,7 +48,12 @@ namespace ams::secmon::boot { const auto pmc = MemoryRegionVirtualDevicePmc.GetAddress(); /* Set the physical address of the warmboot binary to scratch 1. */ - reg::Write(pmc + APBDEV_PMC_SCRATCH1, static_cast(MemoryRegionPhysicalDramSecureDataStoreWarmbootFirmware.GetAddress())); + if (GetSocType() == fuse::SocType_Mariko) { + reg::Write(pmc + APBDEV_PMC_SECURE_SCRATCH119, static_cast(MemoryRegionPhysicalDramSecureDataStoreWarmbootFirmware.GetAddress())); + } else /* if (GetSocType() == fuse::SocType_Erista) */ { + reg::Write(pmc + APBDEV_PMC_SCRATCH1, static_cast(MemoryRegionPhysicalDramSecureDataStoreWarmbootFirmware.GetAddress())); + } + /* Configure logging by setting bits 18-19 of scratch 20. */ reg::ReadWrite(pmc + APBDEV_PMC_SCRATCH20, REG_BITS_VALUE(18, 2, 0)); @@ -66,14 +71,74 @@ namespace ams::secmon::boot { /* The warmboot key as a parameter. The latter is a better solution, but it would be nice to take */ /* care of it here. Perhaps we should read the number of anti-downgrade fuses burnt, and translate that */ /* to the warmboot key? To be decided during the process of implementing ams-on-mariko support. */ + reg::Write(pmc + APBDEV_PMC_SECURE_SCRATCH32, 0x129); } + constinit const u8 DeviceMasterKeySourceKekSource[se::AesBlockSize] = { + 0x0C, 0x91, 0x09, 0xDB, 0x93, 0x93, 0x07, 0x81, 0x07, 0x3C, 0xC4, 0x16, 0x22, 0x7C, 0x6C, 0x28 + }; + /* This function derives the master kek and device keys using the tsec root key. */ - /* NOTE: Exosphere does not use this in practice, and expects the bootloader to set up keys already. */ - /* NOTE: This function is currently not implemented. If implemented, it will only be a reference implementation. */ - [[maybe_unused]] - void DeriveMasterKekAndDeviceKey() { - /* TODO: Decide whether to implement this. */ + void DeriveMasterKekAndDeviceKeyErista(bool is_prod) { + /* NOTE: Exosphere does not use this in practice, and expects the bootloader to set up keys already. */ + /* NOTE: This function is currently not implemented. If implemented, it will only be a reference implementation. */ + if constexpr (false) { + /* TODO: Consider implementing this as a reference. */ + } + } + + /* NOTE: These are just latest-master-kek encrypted with BEK. */ + /* We can get away with only including latest because exosphere supports newer-than-expected master key in engine. */ + /* TODO: Update on next change of keys. */ + constinit const u8 MarikoMasterKekSourceProd[se::AesBlockSize] = { + 0x0E, 0x44, 0x0C, 0xED, 0xB4, 0x36, 0xC0, 0x3F, 0xAA, 0x1D, 0xAE, 0xBF, 0x62, 0xB1, 0x09, 0x82 + }; + + constinit const u8 MarikoMasterKekSourceDev[se::AesBlockSize] = { + 0xF9, 0x37, 0xCF, 0x9A, 0xBD, 0x86, 0xBB, 0xA9, 0x9C, 0x9E, 0x03, 0xC4, 0xFC, 0xBC, 0x3B, 0xCE + }; + + constinit const u8 MasterKeySource[se::AesBlockSize] = { + 0xD8, 0xA2, 0x41, 0x0A, 0xC6, 0xC5, 0x90, 0x01, 0xC6, 0x1D, 0x6A, 0x26, 0x7C, 0x51, 0x3F, 0x3C + }; + + void DeriveMasterKekAndDeviceKeyMariko(bool is_prod) { + /* Clear all keyslots other than KEK and SBK in SE1. */ + for (int i = 0; i < pkg1::AesKeySlot_Count; ++i) { + if (i != pkg1::AesKeySlot_MarikoKek && i != pkg1::AesKeySlot_SecureBoot) { + se::ClearAesKeySlot(i); + } + } + + /* Clear all keyslots in SE2. */ + for (int i = 0; i < pkg1::AesKeySlot_Count; ++i) { + se::ClearAesKeySlot2(i); + } + + /* Derive the master kek. */ + se::SetEncryptedAesKey128(pkg1::AesKeySlot_MasterKek, pkg1::AesKeySlot_MarikoKek, is_prod ? MarikoMasterKekSourceProd : MarikoMasterKekSourceDev, se::AesBlockSize); + + /* Derive the device master key source kek. */ + se::SetEncryptedAesKey128(pkg1::AesKeySlot_DeviceMasterKeySourceKekMariko, pkg1::AesKeySlot_SecureBoot, DeviceMasterKeySourceKekSource, se::AesBlockSize); + + /* Clear the KEK, now that we're done using it. */ + se::ClearAesKeySlot(pkg1::AesKeySlot_MarikoKek); + } + + void DeriveMasterKekAndDeviceKey(bool is_prod) { + if (GetSocType() == fuse::SocType_Mariko) { + DeriveMasterKekAndDeviceKeyMariko(is_prod); + } else /* if (GetSocType() == fuse::SocType_Erista) */ { + DeriveMasterKekAndDeviceKeyErista(is_prod); + } + } + + void DeriveMasterKey() { + if (GetSocType() == fuse::SocType_Mariko) { + se::SetEncryptedAesKey128(pkg1::AesKeySlot_Master, pkg1::AesKeySlot_MasterKek, MasterKeySource, se::AesBlockSize); + } else /* if (GetSocType() == fuse::SocType_Erista) */ { + /* Nothing to do here; erista bootloader will have derived master key already. */ + } } void SetupRandomKey(int slot, se::KeySlotLockFlags flags) { @@ -218,6 +283,9 @@ namespace ams::secmon::boot { /* Get the current key generation. */ const int current_generation = secmon::GetKeyGeneration(); + /* Get the kek slot. */ + const int kek_slot = GetSocType() == fuse::SocType_Mariko ? pkg1::AesKeySlot_DeviceMasterKeySourceKekMariko : pkg1::AesKeySlot_DeviceMasterKeySourceKekErista; + /* Iterate for all generations. */ for (int i = 0; i < pkg1::OldDeviceMasterKeyCount; ++i) { const int generation = pkg1::KeyGeneration_4_0_0 + i; @@ -229,7 +297,7 @@ namespace ams::secmon::boot { se::SetEncryptedAesKey128(pkg1::AesKeySlot_Temporary, pkg1::AesKeySlot_Temporary, is_prod ? DeviceMasterKekSourcesProd[i] : DeviceMasterKekSourcesDev[i], se::AesBlockSize); /* Decrypt the device master key source into the work block. */ - se::DecryptAes128(work_block, se::AesBlockSize, pkg1::AesKeySlot_DeviceMasterKeySourceKek, DeviceMasterKeySourceSources[i], se::AesBlockSize); + se::DecryptAes128(work_block, se::AesBlockSize, kek_slot, DeviceMasterKeySourceSources[i], se::AesBlockSize); /* If we're decrypting the current device master key, decrypt into the keyslot. */ if (generation == current_generation) { @@ -244,14 +312,11 @@ namespace ams::secmon::boot { } /* Clear and lock the Device Master Key Source Kek. */ - se::ClearAesKeySlot(pkg1::AesKeySlot_DeviceMasterKeySourceKek); - se::LockAesKeySlot(pkg1::AesKeySlot_DeviceMasterKeySourceKek, se::KeySlotLockFlags_AllLockKek); + se::ClearAesKeySlot(pkg1::AesKeySlot_DeviceMasterKeySourceKekMariko); + se::LockAesKeySlot(pkg1::AesKeySlot_DeviceMasterKeySourceKekMariko, se::KeySlotLockFlags_AllLockKek); } - void DeriveAllKeys() { - /* Determine whether we're prod. */ - const bool is_prod = IsProduction(); - + void DeriveAllKeys(bool is_prod) { /* Get the ephemeral work block. */ u8 * const work_block = se::GetEphemeralWorkBlock(); ON_SCOPE_EXIT { util::ClearMemory(work_block, se::AesBlockSize); }; @@ -265,6 +330,9 @@ namespace ams::secmon::boot { /* Derive the master keys. */ DeriveAllMasterKeys(is_prod, work_block); + /* Lock the master key as a kek. */ + se::LockAesKeySlot(pkg1::AesKeySlot_Master, se::KeySlotLockFlags_AllLockKek); + /* Derive the device master keys. */ DeriveAllDeviceMasterKeys(is_prod, work_block); @@ -300,16 +368,21 @@ namespace ams::secmon::boot { /* Initialize the rng. */ se::InitializeRandom(); + /* Determine whether we're production. */ + const bool is_prod = IsProduction(); + /* Derive the master kek and device key. */ - if constexpr (false) { - DeriveMasterKekAndDeviceKey(); - } + /* NOTE: This is a no-op on erista, because fusee will have set up keys. */ + DeriveMasterKekAndDeviceKey(is_prod); /* Lock the device key as only usable as a kek. */ se::LockAesKeySlot(pkg1::AesKeySlot_Device, se::KeySlotLockFlags_AllLockKek); - /* Derive all keys. */ - DeriveAllKeys(); + /* Derive the master key. */ + DeriveMasterKey(); + + /* Derive all other keys. */ + DeriveAllKeys(is_prod); } } @@ -348,6 +421,9 @@ namespace ams::secmon::boot { /* Set the security engine to Per Key Secure. */ se::SetPerKeySecure(); + /* Set the security engine to Context Save Secure. */ + se::SetContextSaveSecure(); + /* Setup the PMC registers. */ SetupPmcRegisters(); diff --git a/exosphere/program/source/boot/secmon_main.cpp b/exosphere/program/source/boot/secmon_main.cpp index db1e1e090..73f02e9ee 100644 --- a/exosphere/program/source/boot/secmon_main.cpp +++ b/exosphere/program/source/boot/secmon_main.cpp @@ -42,7 +42,7 @@ namespace ams::secmon { i2c::SetRegisterAddress(i2c::Port_5, MemoryRegionVirtualDeviceI2c5.GetAddress()); pinmux::SetRegisterAddress(MemoryRegionVirtualDeviceApbMisc.GetAddress(), MemoryRegionVirtualDeviceGpio.GetAddress()); pmc::SetRegisterAddress(MemoryRegionVirtualDevicePmc.GetAddress()); - se::SetRegisterAddress(MemoryRegionVirtualDeviceSecurityEngine.GetAddress()); + se::SetRegisterAddress(MemoryRegionVirtualDeviceSecurityEngine.GetAddress(), MemoryRegionVirtualDeviceSecurityEngine2.GetAddress()); uart::SetRegisterAddress(MemoryRegionVirtualDeviceUart.GetAddress()); wdt::SetRegisterAddress(MemoryRegionVirtualDeviceTimer.GetAddress()); util::SetRegisterAddress(MemoryRegionVirtualDeviceTimer.GetAddress()); diff --git a/exosphere/program/source/secmon_cpu_context.cpp b/exosphere/program/source/secmon_cpu_context.cpp index d2185a699..f35a68d36 100644 --- a/exosphere/program/source/secmon_cpu_context.cpp +++ b/exosphere/program/source/secmon_cpu_context.cpp @@ -33,8 +33,6 @@ namespace ams::secmon { u32 mdcr_el2; u32 mdcr_el3; u32 spsr_el3; - u64 dbgbvcr_el1[12]; - u64 dbgwvcr_el1[ 8]; }; struct CoreContext { @@ -61,30 +59,6 @@ namespace ams::secmon { HW_CPU_GET_MDCR_EL2 (dr.mdcr_el2); HW_CPU_GET_MDCR_EL3 (dr.mdcr_el3); HW_CPU_GET_SPSR_EL3 (dr.spsr_el3); - - /* Save debug breakpoints. */ - HW_CPU_GET_DBGBVR0_EL1(dr.dbgbvcr_el1[ 0]); - HW_CPU_GET_DBGBCR0_EL1(dr.dbgbvcr_el1[ 1]); - HW_CPU_GET_DBGBVR1_EL1(dr.dbgbvcr_el1[ 2]); - HW_CPU_GET_DBGBCR1_EL1(dr.dbgbvcr_el1[ 3]); - HW_CPU_GET_DBGBVR2_EL1(dr.dbgbvcr_el1[ 4]); - HW_CPU_GET_DBGBCR2_EL1(dr.dbgbvcr_el1[ 5]); - HW_CPU_GET_DBGBVR3_EL1(dr.dbgbvcr_el1[ 6]); - HW_CPU_GET_DBGBCR3_EL1(dr.dbgbvcr_el1[ 7]); - HW_CPU_GET_DBGBVR4_EL1(dr.dbgbvcr_el1[ 8]); - HW_CPU_GET_DBGBCR4_EL1(dr.dbgbvcr_el1[ 9]); - HW_CPU_GET_DBGBVR5_EL1(dr.dbgbvcr_el1[10]); - HW_CPU_GET_DBGBCR5_EL1(dr.dbgbvcr_el1[11]); - - /* Save debug watchpoints. */ - HW_CPU_GET_DBGWVR0_EL1(dr.dbgwvcr_el1[0]); - HW_CPU_GET_DBGWCR0_EL1(dr.dbgwvcr_el1[1]); - HW_CPU_GET_DBGWVR1_EL1(dr.dbgwvcr_el1[2]); - HW_CPU_GET_DBGWCR1_EL1(dr.dbgwvcr_el1[3]); - HW_CPU_GET_DBGWVR2_EL1(dr.dbgwvcr_el1[4]); - HW_CPU_GET_DBGWCR2_EL1(dr.dbgwvcr_el1[5]); - HW_CPU_GET_DBGWVR3_EL1(dr.dbgwvcr_el1[6]); - HW_CPU_GET_DBGWCR3_EL1(dr.dbgwvcr_el1[7]); } void RestoreDebugRegisters(const DebugRegisters &dr) { @@ -100,30 +74,6 @@ namespace ams::secmon { HW_CPU_SET_MDCR_EL2 (dr.mdcr_el2); HW_CPU_SET_MDCR_EL3 (dr.mdcr_el3); HW_CPU_SET_SPSR_EL3 (dr.spsr_el3); - - /* Restore debug breakpoints. */ - HW_CPU_SET_DBGBVR0_EL1(dr.dbgbvcr_el1[ 0]); - HW_CPU_SET_DBGBCR0_EL1(dr.dbgbvcr_el1[ 1]); - HW_CPU_SET_DBGBVR1_EL1(dr.dbgbvcr_el1[ 2]); - HW_CPU_SET_DBGBCR1_EL1(dr.dbgbvcr_el1[ 3]); - HW_CPU_SET_DBGBVR2_EL1(dr.dbgbvcr_el1[ 4]); - HW_CPU_SET_DBGBCR2_EL1(dr.dbgbvcr_el1[ 5]); - HW_CPU_SET_DBGBVR3_EL1(dr.dbgbvcr_el1[ 6]); - HW_CPU_SET_DBGBCR3_EL1(dr.dbgbvcr_el1[ 7]); - HW_CPU_SET_DBGBVR4_EL1(dr.dbgbvcr_el1[ 8]); - HW_CPU_SET_DBGBCR4_EL1(dr.dbgbvcr_el1[ 9]); - HW_CPU_SET_DBGBVR5_EL1(dr.dbgbvcr_el1[10]); - HW_CPU_SET_DBGBCR5_EL1(dr.dbgbvcr_el1[11]); - - /* Restore debug watchpoints. */ - HW_CPU_SET_DBGWVR0_EL1(dr.dbgwvcr_el1[0]); - HW_CPU_SET_DBGWCR0_EL1(dr.dbgwvcr_el1[1]); - HW_CPU_SET_DBGWVR1_EL1(dr.dbgwvcr_el1[2]); - HW_CPU_SET_DBGWCR1_EL1(dr.dbgwvcr_el1[3]); - HW_CPU_SET_DBGWVR2_EL1(dr.dbgwvcr_el1[4]); - HW_CPU_SET_DBGWCR2_EL1(dr.dbgwvcr_el1[5]); - HW_CPU_SET_DBGWVR3_EL1(dr.dbgwvcr_el1[6]); - HW_CPU_SET_DBGWCR3_EL1(dr.dbgwvcr_el1[7]); } constinit CoreContext g_core_contexts[NumCores] = {}; diff --git a/exosphere/program/source/secmon_error.cpp b/exosphere/program/source/secmon_error.cpp index 5b576294b..4c7d7752c 100644 --- a/exosphere/program/source/secmon_error.cpp +++ b/exosphere/program/source/secmon_error.cpp @@ -16,10 +16,24 @@ #include #include "secmon_error.hpp" -namespace { +namespace ams { - constexpr bool SaveSystemStateForDebug = false; + namespace { + constexpr bool SaveSystemStateForDebug = false; + constexpr bool LogSystemStateForDebug = false; + + void LogU64(u64 value) { + char buffer[2 * sizeof(value)]; + for (size_t i = 0; i < sizeof(value); ++i) { + buffer[sizeof(buffer) - 1 - (2 * i) - 0] = "0123456789ABCDEF"[(value >> 0) & 0xF]; + buffer[sizeof(buffer) - 1 - (2 * i) - 1] = "0123456789ABCDEF"[(value >> 4) & 0xF]; + value >>= 8; + } + log::SendText(buffer, sizeof(buffer)); + } + + } } namespace ams::diag { @@ -98,6 +112,57 @@ namespace ams::secmon { util::WaitMicroSeconds(1000); } + ALWAYS_INLINE void LogSystemStateForDebugErrorReboot(u64 lr, u64 sp) { + log::SendText("*** Error Reboot ***\n", 21); + log::Flush(); + + u64 temp_reg; + + __asm__ __volatile__("mrs %0, esr_el3" : "=r"(temp_reg) :: "memory"); + log::SendText("ESR_EL3: ", 9); + LogU64(temp_reg); + log::SendText("\n", 1); + log::Flush(); + + __asm__ __volatile__("mrs %0, elr_el3" : "=r"(temp_reg) :: "memory"); + log::SendText("ELR_EL3: ", 9); + LogU64(temp_reg); + log::SendText("\n", 1); + log::Flush(); + + __asm__ __volatile__("mrs %0, far_el3" : "=r"(temp_reg) :: "memory"); + log::SendText("FAR_EL3: ", 9); + LogU64(temp_reg); + log::SendText("\n", 1); + log::Flush(); + + log::SendText("LR: ", 9); + LogU64(lr); + log::SendText("\n", 1); + log::Flush(); + + log::SendText("SP: ", 9); + LogU64(sp); + log::SendText("\n", 1); + log::Flush(); + + log::SendText("Stack:\n", 7); + log::Flush(); + + char buf[2]; + for (int i = 0; i < 0x100; ++i) { + const u8 byte = *(volatile u8 *)(sp + i); + buf[0] = "0123456789ABCDEF"[(byte >> 4) & 0xF]; + buf[1] = "0123456789ABCDEF"[(byte >> 0) & 0xF]; + log::SendText(buf, 2); + log::Flush(); + if (util::IsAligned(i + 1, 0x10)) { + log::SendText("\n", 1); + log::Flush(); + } + } + } + } void SetError(pkg1::ErrorInfo info) { @@ -114,6 +179,14 @@ namespace ams::secmon { SaveSystemStateForDebugErrorReboot(); } + if constexpr (LogSystemStateForDebug) { + u64 lr, sp; + __asm__ __volatile__("mov %0, lr" : "=r"(lr) :: "memory"); + __asm__ __volatile__("mov %0, sp" : "=r"(sp) :: "memory"); + + LogSystemStateForDebugErrorReboot(lr, sp); + } + /* Lockout the security engine. */ se::Lockout(); diff --git a/exosphere/program/source/secmon_setup.cpp b/exosphere/program/source/secmon_setup.cpp index a87539e72..b5580c1b2 100644 --- a/exosphere/program/source/secmon_setup.cpp +++ b/exosphere/program/source/secmon_setup.cpp @@ -51,27 +51,27 @@ namespace ams::secmon { constinit bool g_is_cold_boot = true; - constinit const se::StickyBits ExpectedSeStickyBits = { + constinit se::StickyBits ExpectedSeStickyBits = { .se_security = (1 << 0), /* SE_HARD_SETTING */ .tzram_security = 0, .crypto_security_perkey = (1 << pkg1::AesKeySlot_UserEnd) - 1, .crypto_keytable_access = { - (1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 0: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */ - (1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 1: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */ - (1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 2: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */ - (1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 3: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */ - (1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 4: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */ - (1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 5: User keyslot. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */ - (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 6: Unused keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ - (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 7: Unused keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ - (0 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 8: Temp keyslot. UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. KEYUSE, UIVREAD, OIVREAD, KEYREAD disabled. */ - (0 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 9: SmcTemp keyslot. UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. KEYUSE, UIVREAD, OIVREAD, KEYREAD disabled. */ - (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 10: Wrap1 keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ - (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 11: Wrap2 keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ - (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 12: DMaster keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ - (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 13: Master keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ - (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 14: Unused keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ - (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 13: Device keyslot. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ + (0 << 7) | (1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 0: User keyslot. KEY. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */ + (0 << 7) | (1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 1: User keyslot. KEY. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */ + (0 << 7) | (1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 2: User keyslot. KEY. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */ + (0 << 7) | (1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 3: User keyslot. KEY. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */ + (0 << 7) | (1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 4: User keyslot. KEY. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */ + (0 << 7) | (1 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 5: User keyslot. KEY. KEYUSE, UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. UIVREAD, OIVREAD, KEYREAD disabled. */ + (1 << 7) | (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 6: Unused keyslot. KEK. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ + (1 << 7) | (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 7: Unused keyslot. KEK. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ + (0 << 7) | (0 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 8: Temp keyslot. KEY. UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. KEYUSE, UIVREAD, OIVREAD, KEYREAD disabled. */ + (0 << 7) | (0 << 6) | (1 << 5) | (0 << 4) | (1 << 3) | (0 << 2) | (1 << 1) | (0 << 0), /* 9: SmcTemp keyslot. KEY. UIVUPDATE, OIVUPDATE, KEYUPDATE enabled. KEYUSE, UIVREAD, OIVREAD, KEYREAD disabled. */ + (1 << 7) | (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 10: Wrap1 keyslot. KEK. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ + (0 << 7) | (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 11: Wrap2 keyslot. KEY. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ + (1 << 7) | (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 12: DMaster keyslot. KEK. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ + (1 << 7) | (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 13: Master keyslot. KEK. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ + (1 << 7) | (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 14: Unused keyslot. KEK. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ + (1 << 7) | (0 << 6) | (0 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (0 << 1) | (0 << 0), /* 13: Device keyslot. KEK. KEYUSE, UIVUPDATE, UIVREAD, OIVUPDATE, OIVREAD, KEYUPDATE, KEYREAD disabled. */ }, .rsa_security_perkey = 0, .rsa_keytable_access = { @@ -139,6 +139,16 @@ namespace ams::secmon { } void VerifySecurityEngineStickyBits() { + /* On mariko, an extra sticky bit is set. */ + if (GetSocType() == fuse::SocType_Mariko) { + ExpectedSeStickyBits.se_security |= (1 << 5); + } else /* if (GetSocType() == fuse::SocType_Erista) */ { + /* Erista does not support DST_KEYTABLE_ONLY, and so all keys will have the bit clear. */ + for (size_t i = 0; i < util::size(ExpectedSeStickyBits.crypto_keytable_access); ++i) { + ExpectedSeStickyBits.crypto_keytable_access[i] &= ~(1 << 7); + } + } + if (!se::ValidateStickyBits(ExpectedSeStickyBits)) { SetError(pkg1::ErrorInfo_InvalidSecurityEngineStickyBits); AMS_ABORT("Invalid sticky bits"); @@ -938,12 +948,16 @@ namespace ams::secmon { return reg::Read(MC + MC_SECURITY_CFG3) == 0; } + void SetupLogForBoot() { + log::Initialize(); + log::SendText("OHAYO\n", 6); + log::Flush(); + } + void LogExitLp0() { /* NOTE: Nintendo only does this on dev, but we will always do it. */ if (true /* !pkg1::IsProduction() */) { - log::Initialize(); - log::SendText("OHAYO\n", 6); - log::Flush(); + SetupLogForBoot(); } } @@ -969,7 +983,7 @@ namespace ams::secmon { InitializeConfigurationContext(); /* Initialize uart for logging. */ - log::Initialize(); + SetupLogForBoot(); /* Initialize the security engine. */ se::Initialize(); @@ -1017,12 +1031,16 @@ namespace ams::secmon { /* Overwrite keys that we want to be random with random contents. */ se::InitializeRandom(); + se::ConfigureAutomaticContextSave(); se::SetRandomKey(pkg1::AesKeySlot_Temporary); se::GenerateSrk(); se::SetRandomKey(pkg1::AesKeySlot_TzramSaveKek); /* Initialize pmc secure scratch. */ - pmc::InitializeRandomScratch(); + if (GetSocType() == fuse::SocType_Erista) { + pmc::InitializeRandomScratch(); + } + pmc::LockSecureRegister(pmc::SecureRegister_Srk); /* Setup secure registers. */ SetupSecureRegisters(); diff --git a/exosphere/program/source/secmon_setup_warm.cpp b/exosphere/program/source/secmon_setup_warm.cpp index 331502e02..a0f1bbe38 100644 --- a/exosphere/program/source/secmon_setup_warm.cpp +++ b/exosphere/program/source/secmon_setup_warm.cpp @@ -264,6 +264,13 @@ namespace ams::secmon { { reg::Write(AHB_ARBC(AHB_GIZMO_TZRAM), (1u << 7)); } + + /* NOTE: This is Mariko only in Nintendo's firmware. */ + /* Still, it seems to have no adverse effects on Erista... */ + /* TODO: Find a way to get access to SocType this early (fuse driver isn't alive yet), only write on mariko? */ + { + reg::ReadWrite(AHB_ARBC(AHB_AHB_SPARE_REG), AHB_REG_BITS_VALUE(AHB_SPARE_REG_AHB_SPARE_REG, 0xE0000)); + } } void SetupSocDmaControllersCpuMemoryControllersEnableMmuWarmboot() { diff --git a/exosphere/program/source/smc/secmon_smc_aes.cpp b/exosphere/program/source/smc/secmon_smc_aes.cpp index 60912e78a..158f249ae 100644 --- a/exosphere/program/source/smc/secmon_smc_aes.cpp +++ b/exosphere/program/source/smc/secmon_smc_aes.cpp @@ -257,7 +257,7 @@ namespace ams::secmon::smc { } int PrepareDeviceMasterKey(int generation) { - if (generation == pkg1::KeyGeneration_1_0_0) { + if (generation == pkg1::KeyGeneration_1_0_0 && GetSocType() == fuse::SocType_Erista) { return pkg1::AesKeySlot_Device; } if (generation == GetKeyGeneration()) { diff --git a/exosphere/program/source/smc/secmon_smc_info.cpp b/exosphere/program/source/smc/secmon_smc_info.cpp index 7cf50269d..d06b8a509 100644 --- a/exosphere/program/source/smc/secmon_smc_info.cpp +++ b/exosphere/program/source/smc/secmon_smc_info.cpp @@ -262,6 +262,11 @@ namespace ams::secmon::smc { /* Get whether this unit should allow writing to the calibration partition. */ args.r[1] = (GetEmummcConfiguration().IsEmummcActive() || GetSecmonConfiguration().AllowWritingToCalibrationBinarySysmmc()); break; + case ConfigItem::ExosphereEmummcType: + /* Get what kind of emummc this unit has active. */ + /* NOTE: This may return values other than 1 in the future. */ + args.r[1] = (GetEmummcConfiguration().IsEmummcActive() ? 1 : 0); + break; default: return SmcResult::InvalidArgument; } diff --git a/exosphere/program/source/smc/secmon_smc_info.hpp b/exosphere/program/source/smc/secmon_smc_info.hpp index d45bf57d2..55846631e 100644 --- a/exosphere/program/source/smc/secmon_smc_info.hpp +++ b/exosphere/program/source/smc/secmon_smc_info.hpp @@ -47,6 +47,7 @@ namespace ams::secmon::smc { ExosphereHasRcmBugPatch = 65004, ExosphereBlankProdInfo = 65005, ExosphereAllowCalWrites = 65006, + ExosphereEmummcType = 65007, }; SmcResult SmcGetConfigUser(SmcArguments &args); diff --git a/exosphere/program/source/smc/secmon_smc_power_management.cpp b/exosphere/program/source/smc/secmon_smc_power_management.cpp index 4af64ee37..1e8a90389 100644 --- a/exosphere/program/source/smc/secmon_smc_power_management.cpp +++ b/exosphere/program/source/smc/secmon_smc_power_management.cpp @@ -325,7 +325,11 @@ namespace ams::secmon::smc { } void SaveSecureContextForMariko() { - /* TODO: Implement this when adding ams-on-mariko support. */ + /* Save security engine context to TZRAM SE carveout (inaccessible to cpu). */ + se::SaveContextAutomatic(); + + /* Save TZRAM to shadow-TZRAM in always-on power domain. */ + se::SaveTzramAutomatic(); } void SaveSecureContext() { diff --git a/exosphere/program/split_program.py b/exosphere/program/split_program.py index b52406eba..e8806c16b 100644 --- a/exosphere/program/split_program.py +++ b/exosphere/program/split_program.py @@ -3,7 +3,11 @@ import sys, lz4 from struct import unpack as up def lz4_compress(data): - return lz4.block.compress(data, 'high_compression', store_size=False) + try: + import lz4.block as block + except ImportError: + block = lz4.LZ4_compress + return block.compress(data, 'high_compression', store_size=False) def split_binary(data): A, B, START, BOOT_CODE_START, BOOT_CODE_END, PROGRAM_START, C, D = up(' diff --git a/fusee/fusee-mtc/src/utils.c b/fusee/fusee-mtc/src/utils.c index d06fe6e91..95577462e 100644 --- a/fusee/fusee-mtc/src/utils.c +++ b/fusee/fusee-mtc/src/utils.c @@ -17,17 +17,26 @@ #include #include #include "utils.h" +#include "display/video_fb.h" #include "lib/log.h" __attribute__ ((noreturn)) void generic_panic(void) { - print(SCREEN_LOG_LEVEL_ERROR, "Panic raised!"); - while (true) { /* Lock. */ } } __attribute__((noreturn)) void fatal_error(const char *fmt, ...) { + /* Forcefully initialize the screen if logging is disabled. */ + if (log_get_log_level() == SCREEN_LOG_LEVEL_NONE) { + /* Zero-fill the framebuffer and register it as printk provider. */ + video_init((void *)0xC0000000); + + /* Override the global logging level. */ + log_set_log_level(SCREEN_LOG_LEVEL_ERROR); + } + + /* Display fatal error. */ va_list args; print(SCREEN_LOG_LEVEL_ERROR, "Fatal error: "); va_start(args, fmt); diff --git a/fusee/fusee-primary/src/exception_handlers.c b/fusee/fusee-primary/src/exception_handlers.c index 821f8cb2d..128425891 100644 --- a/fusee/fusee-primary/src/exception_handlers.c +++ b/fusee/fusee-primary/src/exception_handlers.c @@ -19,9 +19,10 @@ #include "exception_handlers.h" #include "utils.h" #include "lib/log.h" +#include "lib/vsprintf.h" #define CODE_DUMP_SIZE 0x30 -#define STACK_DUMP_SIZE 0x60 +#define STACK_DUMP_SIZE 0x30 extern const uint32_t exception_handler_table[]; @@ -34,6 +35,40 @@ static const char *register_names[] = { "SP", "LR", "PC", "CPSR", }; +/* Adapted from https://gist.github.com/ccbrown/9722406 */ +static void hexdump(const void* data, size_t size, uintptr_t addrbase, char* strbuf) { + const uint8_t *d = (const uint8_t *)data; + char ascii[17] = {0}; + ascii[16] = '\0'; + + for (size_t i = 0; i < size; i++) { + if (i % 16 == 0) { + strbuf += sprintf(strbuf, "%0*" PRIXPTR ": | ", 2 * sizeof(addrbase), addrbase + i); + } + strbuf += sprintf(strbuf, "%02X ", d[i]); + if (d[i] >= ' ' && d[i] <= '~') { + ascii[i % 16] = d[i]; + } else { + ascii[i % 16] = '.'; + } + if ((i+1) % 8 == 0 || i+1 == size) { + strbuf += sprintf(strbuf, " "); + if ((i+1) % 16 == 0) { + strbuf += sprintf(strbuf, "| %s \n", ascii); + } else if (i+1 == size) { + ascii[(i+1) % 16] = '\0'; + if ((i+1) % 16 <= 8) { + strbuf += sprintf(strbuf, " "); + } + for (size_t j = (i+1) % 16; j < 16; j++) { + strbuf += sprintf(strbuf, " "); + } + strbuf += sprintf(strbuf, "| %s \n", ascii); + } + } + } +} + void setup_exception_handlers(void) { volatile uint32_t *bpmp_exception_handler_table = (volatile uint32_t *)0x6000F200; for (int i = 0; i < 8; i++) { @@ -44,38 +79,40 @@ void setup_exception_handlers(void) { } void exception_handler_main(uint32_t *registers, unsigned int exception_type) { - uint8_t code_dump[CODE_DUMP_SIZE]; - uint8_t stack_dump[STACK_DUMP_SIZE]; - size_t code_dump_size; - size_t stack_dump_size; + char exception_log[0x400] = {0}; + uint8_t code_dump[CODE_DUMP_SIZE] = {0}; + uint8_t stack_dump[STACK_DUMP_SIZE] = {0}; + size_t code_dump_size = 0; + size_t stack_dump_size = 0; uint32_t pc = registers[15]; uint32_t cpsr = registers[16]; - uint32_t instr_addr = pc + ((cpsr & 0x20) ? 2 : 4) - CODE_DUMP_SIZE; - print(SCREEN_LOG_LEVEL_ERROR, "\nSomething went wrong...\n"); - + sprintf(exception_log, "An exception occured!\n"); + code_dump_size = safecpy(code_dump, (const void *)instr_addr, CODE_DUMP_SIZE); stack_dump_size = safecpy(stack_dump, (const void *)registers[13], STACK_DUMP_SIZE); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nException type: %s\n", - exception_names[exception_type]); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nRegisters:\n\n"); + sprintf(exception_log + strlen(exception_log), "\nException type: %s\n", exception_names[exception_type]); + sprintf(exception_log + strlen(exception_log), "\nRegisters:\n"); /* Print r0 to pc. */ for (int i = 0; i < 16; i += 2) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "%-7s%08"PRIX32" %-7s%08"PRIX32"\n", + sprintf(exception_log + strlen(exception_log), "%-7s%08"PRIX32" %-7s%08"PRIX32"\n", register_names[i], registers[i], register_names[i+1], registers[i+1]); } /* Print cpsr. */ - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "%-7s%08"PRIX32"\n", register_names[16], registers[16]); - - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nCode dump:\n"); - hexdump(code_dump, code_dump_size, instr_addr); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nStack dump:\n"); - hexdump(stack_dump, stack_dump_size, registers[13]); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\n"); - fatal_error("An exception occured!\n"); + sprintf(exception_log + strlen(exception_log), "%-7s%08"PRIX32"\n", register_names[16], registers[16]); + + /* Print code and stack regions. */ + sprintf(exception_log + strlen(exception_log), "\nCode dump:\n"); + hexdump(code_dump, code_dump_size, instr_addr, exception_log + strlen(exception_log)); + sprintf(exception_log + strlen(exception_log), "\nStack dump:\n"); + hexdump(stack_dump, stack_dump_size, registers[13], exception_log + strlen(exception_log)); + sprintf(exception_log + strlen(exception_log), "\n"); + + /* Throw fatal error with the full exception log. */ + fatal_error(exception_log); } diff --git a/fusee/fusee-primary/src/lib/log.h b/fusee/fusee-primary/src/lib/log.h index 8a28b0835..d1c5777f3 100644 --- a/fusee/fusee-primary/src/lib/log.h +++ b/fusee/fusee-primary/src/lib/log.h @@ -17,7 +17,7 @@ #ifndef FUSEE_LOG_H #define FUSEE_LOG_H -#define PRINT_MESSAGE_MAX_LENGTH 512 +#define PRINT_MESSAGE_MAX_LENGTH 1024 #include diff --git a/fusee/fusee-primary/src/panic.c b/fusee/fusee-primary/src/panic.c index e7b206f33..bd0520efb 100644 --- a/fusee/fusee-primary/src/panic.c +++ b/fusee/fusee-primary/src/panic.c @@ -42,6 +42,8 @@ static const char *get_error_desc_str(uint32_t error_desc) { return "SError"; case 0x301: return "Bad SVC"; + case 0xFFD: + return "Stack overflow"; case 0xFFE: return "std::abort() called"; default: @@ -90,12 +92,15 @@ static void _check_and_display_atmosphere_fatal_error(void) { char filepath[0x40]; snprintf(filepath, sizeof(filepath) - 1, "/atmosphere/fatal_errors/report_%016llx.bin", ctx.report_identifier); filepath[sizeof(filepath)-1] = 0; - write_to_file(&ctx, sizeof(ctx), filepath); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX,"Report saved to %s\n", filepath); + if (write_to_file(&ctx, sizeof(ctx), filepath) != sizeof(ctx)) { + print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "Failed to save report to the SD card!\n"); + } else { + print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "Report saved to %s\n", filepath); + } } /* Display error. */ - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX,"\nPress POWER to reboot\n"); + print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nPress POWER to reboot\n"); } /* Wait for button and reboot. */ diff --git a/fusee/fusee-primary/src/stage2.c b/fusee/fusee-primary/src/stage2.c index b267f1217..150fcb310 100644 --- a/fusee/fusee-primary/src/stage2.c +++ b/fusee/fusee-primary/src/stage2.c @@ -106,7 +106,7 @@ void load_stage2(const char *bct0) { } if (strlen(config.path) + 1 + sizeof(stage2_args_t) > CHAINLOADER_ARG_DATA_MAX_SIZE) { - print(SCREEN_LOG_LEVEL_ERROR, "Stage2's path name is too big!\n"); + fatal_error("Stage2's path name is too big!\n"); } if (!check_32bit_address_loadable(config.entrypoint)) { diff --git a/fusee/fusee-primary/src/utils.c b/fusee/fusee-primary/src/utils.c index 910e36c4e..51033eed7 100644 --- a/fusee/fusee-primary/src/utils.c +++ b/fusee/fusee-primary/src/utils.c @@ -26,6 +26,7 @@ #include "car.h" #include "btn.h" #include "lib/log.h" +#include "lib/vsprintf.h" #include "display/video_fb.h" #include @@ -112,11 +113,11 @@ __attribute__((noreturn)) void fatal_error(const char *fmt, ...) { /* Turn on the backlight after initializing the lfb */ /* to avoid flickering. */ display_backlight(true); - - /* Override the global logging level. */ - log_set_log_level(SCREEN_LOG_LEVEL_ERROR); } + /* Override the global logging level. */ + log_set_log_level(SCREEN_LOG_LEVEL_ERROR); + /* Display fatal error. */ va_list args; print(SCREEN_LOG_LEVEL_ERROR, "Fatal error: "); @@ -137,37 +138,3 @@ __attribute__((noinline)) bool overlaps(uint64_t as, uint64_t ae, uint64_t bs, u return true; return false; } - -/* Adapted from https://gist.github.com/ccbrown/9722406 */ -void hexdump(const void* data, size_t size, uintptr_t addrbase) { - const uint8_t *d = (const uint8_t *)data; - char ascii[17]; - ascii[16] = '\0'; - - for (size_t i = 0; i < size; i++) { - if (i % 16 == 0) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "%0*" PRIXPTR ": | ", 2 * sizeof(addrbase), addrbase + i); - } - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "%02X ", d[i]); - if (d[i] >= ' ' && d[i] <= '~') { - ascii[i % 16] = d[i]; - } else { - ascii[i % 16] = '.'; - } - if ((i+1) % 8 == 0 || i+1 == size) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, " "); - if ((i+1) % 16 == 0) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "| %s \n", ascii); - } else if (i+1 == size) { - ascii[(i+1) % 16] = '\0'; - if ((i+1) % 16 <= 8) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, " "); - } - for (size_t j = (i+1) % 16; j < 16; j++) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, " "); - } - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "| %s \n", ascii); - } - } - } -} diff --git a/fusee/fusee-primary/src/utils.h b/fusee/fusee-primary/src/utils.h index 0b16ff6dc..d3eaea88e 100644 --- a/fusee/fusee-primary/src/utils.h +++ b/fusee/fusee-primary/src/utils.h @@ -117,15 +117,12 @@ static inline bool check_32bit_address_range_in_program(uintptr_t addr, size_t s overlaps_a(start, end, __start__, __end__); } -void hexdump(const void* data, size_t size, uintptr_t addrbase); - __attribute__((noreturn)) void watchdog_reboot(void); __attribute__((noreturn)) void pmc_reboot(uint32_t scratch0); __attribute__((noreturn)) void reboot_to_self(void); __attribute__((noreturn)) void wait_for_button_and_reboot(void); __attribute__((noreturn)) void generic_panic(void); - __attribute__((noreturn)) void fatal_error(const char *fmt, ...); #endif diff --git a/fusee/fusee-secondary/src/device_partition.c b/fusee/fusee-secondary/src/device_partition.c index 0a7330aca..0cbbc8e35 100644 --- a/fusee/fusee-secondary/src/device_partition.c +++ b/fusee/fusee-secondary/src/device_partition.c @@ -18,67 +18,9 @@ #include #include "device_partition.h" -int device_partition_read_data(device_partition_t *devpart, void *dst, uint64_t sector, uint64_t num_sectors) -{ - int rc; - if (!devpart->initialized) { - rc = devpart->initializer(devpart); - if (rc != 0) { - return rc; - } - } - if ((devpart->read_cipher != NULL) && (devpart->crypto_mode != DevicePartitionCryptoMode_None)) { - for (uint64_t i = 0; i < num_sectors; i += devpart->crypto_work_buffer_num_sectors) { - uint64_t n = (i + devpart->crypto_work_buffer_num_sectors > num_sectors) ? (num_sectors - i) : devpart->crypto_work_buffer_num_sectors; - rc = devpart->reader(devpart, devpart->crypto_work_buffer, sector + i, n); - if (rc != 0) { - return rc; - } - rc = devpart->read_cipher(devpart, sector + i, n); - if (rc != 0) { - return rc; - } - memcpy(dst + (size_t)(devpart->sector_size * i), devpart->crypto_work_buffer, (size_t)(devpart->sector_size * n)); - } - return 0; - } else { - return devpart->reader(devpart, dst, sector, num_sectors); - } -} - -int device_partition_write_data(device_partition_t *devpart, const void *src, uint64_t sector, uint64_t num_sectors) -{ - int rc; - if (!devpart->initialized) { - rc = devpart->initializer(devpart); - if (rc != 0) { - return rc; - } - } - if ((devpart->write_cipher != NULL) && (devpart->crypto_mode != DevicePartitionCryptoMode_None)) { - for (uint64_t i = 0; i < num_sectors; i += devpart->crypto_work_buffer_num_sectors) { - uint64_t n = (i + devpart->crypto_work_buffer_num_sectors > num_sectors) ? (num_sectors - i) : devpart->crypto_work_buffer_num_sectors; - memcpy(devpart->crypto_work_buffer, src + (size_t)(devpart->sector_size * i), (size_t)(devpart->sector_size * n)); - rc = devpart->write_cipher(devpart, sector + i, n); - if (rc != 0) { - return rc; - } - rc = devpart->writer(devpart, devpart->crypto_work_buffer, sector + i, n); - if (rc != 0) { - return rc; - } - } - return 0; - } else { - return devpart->writer(devpart, src, sector, num_sectors); - } -} - -int emu_device_partition_read_data(device_partition_t *devpart, void *dst, uint64_t sector, uint64_t num_sectors, const char *origin_path, int num_parts, uint64_t part_limit) -{ +int device_partition_read_data(device_partition_t *devpart, void *dst, uint64_t sector, uint64_t num_sectors) { int rc = 0; uint64_t target_sector = sector; - char target_path[0x300 + 1] = {0}; /* Perform initialization steps, if necessary. */ if (!devpart->initialized) { @@ -88,33 +30,37 @@ int emu_device_partition_read_data(device_partition_t *devpart, void *dst, uint6 } } - /* Prepare the right file path if using file mode. */ - if (devpart->emu_use_file && (origin_path != NULL)) { - /* Handle data in multiple parts, if necessary. */ - if (num_parts > 0) { - int target_part = 0; - uint64_t data_offset = sector * devpart->sector_size; + /* Handle emulation. */ + if (devpart->is_emulated) { + /* Prepare the right file path if using file mode. */ + if (devpart->emu_use_file) { + int num_parts = devpart->emu_num_parts; + uint64_t part_limit = devpart->emu_part_limit; - if (data_offset >= part_limit) { - uint64_t data_offset_aligned = (data_offset + (part_limit - 1)) & ~(part_limit - 1); - target_part = (data_offset_aligned == data_offset) ? (data_offset / part_limit) : (data_offset_aligned / part_limit) - 1; - target_sector = (data_offset - (target_part * part_limit)) / devpart->sector_size; - - /* Target part is invalid. */ - if (target_part > num_parts) { - return -1; + /* Handle data in multiple parts, if necessary. */ + if (num_parts > 0) { + int target_part = 0; + char target_path[0x300 + 1] = {0}; + uint64_t data_offset = sector * devpart->sector_size; + + if (data_offset >= part_limit) { + uint64_t data_offset_aligned = (data_offset + (part_limit - 1)) & ~(part_limit - 1); + target_part = (data_offset_aligned == data_offset) ? (data_offset / part_limit) : (data_offset_aligned / part_limit) - 1; + target_sector = (data_offset - (target_part * part_limit)) / devpart->sector_size; + + /* Target part is invalid. */ + if (target_part > num_parts) { + return -1; + } } + + /* Treat the path as a folder with each part inside. */ + snprintf(target_path, sizeof(target_path) - 1, "%s/%02d", devpart->emu_root_path, target_part); + + /* Update the target file path. */ + strcpy(devpart->emu_file_path, target_path); } - - /* Treat the path as a folder with each part inside. */ - snprintf(target_path, sizeof(target_path) - 1, "%s/%02d", origin_path, target_part); - } else { - /* If there are no parts, copy the origin path directly. */ - strcpy(target_path, origin_path); } - - /* Update the target file path. */ - devpart->emu_file_path = target_path; } /* Read the partition data. */ @@ -147,11 +93,9 @@ int emu_device_partition_read_data(device_partition_t *devpart, void *dst, uint6 return rc; } -int emu_device_partition_write_data(device_partition_t *devpart, const void *src, uint64_t sector, uint64_t num_sectors, const char *origin_path, int num_parts, uint64_t part_limit) -{ +int device_partition_write_data(device_partition_t *devpart, const void *src, uint64_t sector, uint64_t num_sectors) { int rc = 0; uint64_t target_sector = sector; - char target_path[0x300 + 1] = {0}; /* Perform initialization steps, if necessary. */ if (!devpart->initialized) { @@ -161,33 +105,37 @@ int emu_device_partition_write_data(device_partition_t *devpart, const void *src } } - /* Prepare the right file path if using file mode. */ - if (devpart->emu_use_file && (origin_path != NULL)) { - /* Handle data in multiple parts, if necessary. */ - if (num_parts > 0) { - int target_part = 0; - uint64_t data_offset = sector * devpart->sector_size; + /* Handle emulation. */ + if (devpart->is_emulated) { + /* Prepare the right file path if using file mode. */ + if (devpart->emu_use_file) { + int num_parts = devpart->emu_num_parts; + uint64_t part_limit = devpart->emu_part_limit; - if (data_offset >= part_limit) { - uint64_t data_offset_aligned = (data_offset + (part_limit - 1)) & ~(part_limit - 1); - target_part = (data_offset_aligned == data_offset) ? (data_offset / part_limit) : (data_offset_aligned / part_limit) - 1; - target_sector = (data_offset - (target_part * part_limit)) / devpart->sector_size; - - /* Target part is invalid. */ - if (target_part > num_parts) { - return -1; + /* Handle data in multiple parts, if necessary. */ + if (num_parts > 0) { + int target_part = 0; + char target_path[0x300 + 1] = {0}; + uint64_t data_offset = sector * devpart->sector_size; + + if (data_offset >= part_limit) { + uint64_t data_offset_aligned = (data_offset + (part_limit - 1)) & ~(part_limit - 1); + target_part = (data_offset_aligned == data_offset) ? (data_offset / part_limit) : (data_offset_aligned / part_limit) - 1; + target_sector = (data_offset - (target_part * part_limit)) / devpart->sector_size; + + /* Target part is invalid. */ + if (target_part > num_parts) { + return -1; + } } + + /* Treat the path as a folder with each part inside. */ + snprintf(target_path, sizeof(target_path) - 1, "%s/%02d", devpart->emu_root_path, target_part); + + /* Update the target file path. */ + strcpy(devpart->emu_file_path, target_path); } - - /* Treat the path as a folder with each part inside. */ - snprintf(target_path, sizeof(target_path) - 1, "%s/%02d", origin_path, target_part); - } else { - /* If there are no parts, copy the origin path directly. */ - strcpy(target_path, origin_path); } - - /* Update the target file path. */ - devpart->emu_file_path = target_path; } /* Write the partition data. */ diff --git a/fusee/fusee-secondary/src/device_partition.h b/fusee/fusee-secondary/src/device_partition.h index 41f034803..a4a5df86e 100644 --- a/fusee/fusee-secondary/src/device_partition.h +++ b/fusee/fusee-secondary/src/device_partition.h @@ -67,13 +67,16 @@ typedef struct device_partition_t { uint8_t __attribute__((aligned(16))) iv[DEVPART_IV_MAX_SIZE]; /* IV. */ bool initialized; - char *emu_file_path; /* Emulated device file path. */ + /* Emulation only. */ + bool is_emulated; bool emu_use_file; + char emu_root_path[0x100 + 1]; + char emu_file_path[0x300 + 1]; + int emu_num_parts; + uint64_t emu_part_limit; } device_partition_t; int device_partition_read_data(device_partition_t *devpart, void *dst, uint64_t sector, uint64_t num_sectors); int device_partition_write_data(device_partition_t *devpart, const void *src, uint64_t sector, uint64_t num_sectors); -int emu_device_partition_read_data(device_partition_t *devpart, void *dst, uint64_t sector, uint64_t num_sectors, const char *origin_path, int num_parts, uint64_t part_limit); -int emu_device_partition_write_data(device_partition_t *devpart, const void *src, uint64_t sector, uint64_t num_sectors, const char *origin_path, int num_parts, uint64_t part_limit); #endif diff --git a/fusee/fusee-secondary/src/emu_dev.c b/fusee/fusee-secondary/src/emu_dev.c deleted file mode 100644 index 55949ef7c..000000000 --- a/fusee/fusee-secondary/src/emu_dev.c +++ /dev/null @@ -1,489 +0,0 @@ -/* - * Copyright (c) 2018-2020 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "emu_dev.h" -#include "utils.h" - -static int emudev_open(struct _reent *r, void *fileStruct, const char *path, int flags, int mode); -static int emudev_close(struct _reent *r, void *fd); -static ssize_t emudev_write(struct _reent *r, void *fd, const char *ptr, size_t len); -static ssize_t emudev_read(struct _reent *r, void *fd, char *ptr, size_t len); -static off_t emudev_seek(struct _reent *r, void *fd, off_t pos, int whence); -static int emudev_fstat(struct _reent *r, void *fd, struct stat *st); -static int emudev_stat(struct _reent *r, const char *file, struct stat *st); -static int emudev_fsync(struct _reent *r, void *fd); - -typedef struct emudev_device_t { - devoptab_t devoptab; - - char origin_path[0x300+1]; - int num_parts; - uint64_t part_limit; - uint8_t *tmp_sector; - device_partition_t devpart; - char name[32+1]; - char root_path[34+1]; - bool setup, registered; -} emudev_device_t; - -typedef struct emudev_file_t { - emudev_device_t *device; - int open_flags; - uint64_t offset; -} emudev_file_t; - -static emudev_device_t g_emudev_devices[EMUDEV_MAX_DEVICES] = {0}; - -static devoptab_t g_emudev_devoptab = { - .structSize = sizeof(emudev_file_t), - .open_r = emudev_open, - .close_r = emudev_close, - .write_r = emudev_write, - .read_r = emudev_read, - .seek_r = emudev_seek, - .fstat_r = emudev_fstat, - .stat_r = emudev_stat, - .fsync_r = emudev_fsync, - .deviceData = NULL, -}; - -static emudev_device_t *emudev_find_device(const char *name) { - for (size_t i = 0; i < EMUDEV_MAX_DEVICES; i++) { - if (g_emudev_devices[i].setup && strcmp(g_emudev_devices[i].name, name) == 0) { - return &g_emudev_devices[i]; - } - } - - return NULL; -} - -int emudev_mount_device(const char *name, const device_partition_t *devpart, const char *origin_path, int num_parts, uint64_t part_limit) { - emudev_device_t *device = NULL; - - if (name[0] == '\0' || devpart == NULL) { - errno = EINVAL; - return -1; - } - - if (strlen(name) > 32) { - errno = ENAMETOOLONG; - return -1; - } - if (emudev_find_device(name) != NULL) { - errno = EEXIST; /* Device already exists */ - return -1; - } - - /* Find an unused slot. */ - for (size_t i = 0; i < EMUDEV_MAX_DEVICES; i++) { - if (!g_emudev_devices[i].setup) { - device = &g_emudev_devices[i]; - break; - } - } - if (device == NULL) { - errno = ENOMEM; - return -1; - } - - memset(device, 0, sizeof(emudev_device_t)); - device->devoptab = g_emudev_devoptab; - device->devpart = *devpart; - strcpy(device->name, name); - strcpy(device->root_path, name); - strcat(device->root_path, ":/"); - - /* Copy the file path for file mode. */ - if (devpart->emu_use_file) - strcpy(device->origin_path, origin_path); - - device->num_parts = num_parts; - device->part_limit = part_limit; - - device->devoptab.name = device->name; - device->devoptab.deviceData = device; - - /* Initialize immediately. */ - int rc = device->devpart.initializer(&device->devpart); - if (rc != 0) { - errno = rc; - return -1; - } - - /* Allocate memory for our intermediate sector. */ - device->tmp_sector = (uint8_t *)malloc(devpart->sector_size); - if (device->tmp_sector == NULL) { - errno = ENOMEM; - return -1; - } - - device->setup = true; - device->registered = false; - - return 0; -} - -int emudev_register_device(const char *name) { - emudev_device_t *device = emudev_find_device(name); - if (device == NULL) { - errno = ENOENT; - return -1; - } - - if (device->registered) { - /* Do nothing if the device is already registered. */ - return 0; - } - - if (AddDevice(&device->devoptab) == -1) { - errno = ENOMEM; - return -1; - } else { - device->registered = true; - return 0; - } -} - -int emudev_unregister_device(const char *name) { - emudev_device_t *device = emudev_find_device(name); - char drname[40]; - - if (device == NULL) { - errno = ENOENT; - return -1; - } - - if (!device->registered) { - /* Do nothing if the device is not registered. */ - return 0; - } - - strcpy(drname, name); - strcat(drname, ":"); - - if (RemoveDevice(drname) == -1) { - errno = ENOENT; - return -1; - } else { - device->registered = false; - return 0; - } -} - -int emudev_unmount_device(const char *name) { - int rc; - emudev_device_t *device = emudev_find_device(name); - - if (device == NULL) { - errno = ENOENT; - return -1; - } - - rc = emudev_unregister_device(name); - if (rc == -1) { - return -1; - } - - free(device->tmp_sector); - device->devpart.finalizer(&device->devpart); - memset(device, 0, sizeof(emudev_device_t)); - - return 0; -} - -int emudev_unmount_all(void) { - for (size_t i = 0; i < EMUDEV_MAX_DEVICES; i++) { - int rc = emudev_unmount_device(g_emudev_devices[i].name); - if (rc != 0) { - return rc; - } - } - - return 0; -} - -static int emudev_open(struct _reent *r, void *fileStruct, const char *path, int flags, int mode) { - (void)mode; - emudev_file_t *f = (emudev_file_t *)fileStruct; - emudev_device_t *device = (emudev_device_t *)(r->deviceData); - - /* Only allow "device:/". */ - if (strcmp(path, device->root_path) != 0) { - r->_errno = ENOENT; - return -1; - } - - /* Forbid some flags that we explicitly don't support.*/ - if (flags & (O_APPEND | O_TRUNC | O_EXCL)) { - r->_errno = EINVAL; - return -1; - } - - memset(f, 0, sizeof(emudev_file_t)); - f->device = device; - f->open_flags = flags; - return 0; -} - -static int emudev_close(struct _reent *r, void *fd) { - (void)r; - emudev_file_t *f = (emudev_file_t *)fd; - memset(f, 0, sizeof(emudev_file_t)); - - return 0; -} - -static ssize_t emudev_write(struct _reent *r, void *fd, const char *ptr, size_t len) { - emudev_file_t *f = (emudev_file_t *)fd; - emudev_device_t *device = f->device; - size_t sector_size = device->devpart.sector_size; - uint64_t sector_begin = f->offset / sector_size; - uint64_t sector_end = (f->offset + len + sector_size - 1) / sector_size; - uint64_t sector_end_aligned; - uint64_t current_sector = sector_begin; - const uint8_t *data = (const uint8_t *)ptr; - - int no = 0; - - if (sector_end >= device->devpart.num_sectors) { - len = (size_t)(sector_size * device->devpart.num_sectors - f->offset); - sector_end = device->devpart.num_sectors; - } - - sector_end_aligned = sector_end - ((f->offset + len) % sector_size != 0 ? 1 : 0); - - if (len == 0) { - return 0; - } - - /* Unaligned at the start, we need to read the sector and incorporate the data. */ - if (f->offset % sector_size != 0) { - size_t nb = (size_t)(len <= (sector_size - (f->offset % sector_size)) ? len : sector_size - (f->offset % sector_size)); - no = emu_device_partition_read_data(&device->devpart, device->tmp_sector, sector_begin, 1, device->origin_path, device->num_parts, device->part_limit); - if (no != 0) { - r->_errno = no; - return -1; - } - - memcpy(device->tmp_sector + (f->offset % sector_size), data, nb); - - no = emu_device_partition_write_data(&device->devpart, device->tmp_sector, sector_begin, 1, device->origin_path, device->num_parts, device->part_limit); - if (no != 0) { - r->_errno = no; - return -1; - } - - /* Advance */ - data += sector_size - (f->offset % sector_size); - current_sector++; - } - - /* Check if we're already done (otherwise this causes a bug in handling the last sector of the range). */ - if (current_sector == sector_end) { - f->offset += len; - return len; - } - - /* Write all of the sector-aligned data. */ - if (current_sector != sector_end_aligned) { - no = emu_device_partition_write_data(&device->devpart, data, current_sector, sector_end_aligned - current_sector, device->origin_path, device->num_parts, device->part_limit); - if (no != 0) { - r->_errno = no; - return -1; - } - } - - data += sector_size * (sector_end_aligned - current_sector); - current_sector = sector_end_aligned; - - /* Unaligned at the end, we need to read the sector and incorporate the data. */ - if (sector_end != sector_end_aligned) { - no = emu_device_partition_read_data(&device->devpart, device->tmp_sector, sector_end_aligned, 1, device->origin_path, device->num_parts, device->part_limit); - if (no != 0) { - r->_errno = no; - return -1; - } - - memcpy(device->tmp_sector, data, (size_t)((f->offset + len) % sector_size)); - - no = emu_device_partition_write_data(&device->devpart, device->tmp_sector, sector_end_aligned, 1, device->origin_path, device->num_parts, device->part_limit); - if (no != 0) { - r->_errno = no; - return -1; - } - - /* Advance */ - data += sector_size - ((f->offset + len) % sector_size); - current_sector++; - } - - f->offset += len; - return len; -} - -static ssize_t emudev_read(struct _reent *r, void *fd, char *ptr, size_t len) { - emudev_file_t *f = (emudev_file_t *)fd; - emudev_device_t *device = f->device; - size_t sector_size = device->devpart.sector_size; - uint64_t sector_begin = f->offset / sector_size; - uint64_t sector_end = (f->offset + len + sector_size - 1) / sector_size; - uint64_t sector_end_aligned; - uint64_t current_sector = sector_begin; - uint8_t *data = (uint8_t *)ptr; - - int no = 0; - - if (sector_end >= device->devpart.num_sectors) { - len = (size_t)(sector_size * device->devpart.num_sectors - f->offset); - sector_end = device->devpart.num_sectors; - } - - sector_end_aligned = sector_end - ((f->offset + len) % sector_size != 0 ? 1 : 0); - - if (len == 0) { - return 0; - } - - /* Unaligned at the start, we need to read the sector and incorporate the data. */ - if (f->offset % sector_size != 0) { - size_t nb = (size_t)(len <= (sector_size - (f->offset % sector_size)) ? len : sector_size - (f->offset % sector_size)); - no = emu_device_partition_read_data(&device->devpart, device->tmp_sector, sector_begin, 1, device->origin_path, device->num_parts, device->part_limit); - if (no != 0) { - r->_errno = no; - return -1; - } - - memcpy(data, device->tmp_sector + (f->offset % sector_size), nb); - - /* Advance */ - data += sector_size - (f->offset % sector_size); - current_sector++; - } - - /* Check if we're already done (otherwise this causes a bug in handling the last sector of the range). */ - if (current_sector == sector_end) { - f->offset += len; - return len; - } - - /* Read all of the sector-aligned data. */ - if (current_sector != sector_end_aligned) { - no = emu_device_partition_read_data(&device->devpart, data, current_sector, sector_end_aligned - current_sector, device->origin_path, device->num_parts, device->part_limit); - if (no != 0) { - r->_errno = no; - return -1; - } - } - - data += sector_size * (sector_end_aligned - current_sector); - current_sector = sector_end_aligned; - - /* Unaligned at the end, we need to read the sector and incorporate the data. */ - if (sector_end != sector_end_aligned) { - no = emu_device_partition_read_data(&device->devpart, device->tmp_sector, sector_end_aligned, 1, device->origin_path, device->num_parts, device->part_limit); - if (no != 0) { - r->_errno = no; - return -1; - } - - memcpy(data, device->tmp_sector, (size_t)((f->offset + len) % sector_size)); - - /* Advance */ - data += sector_size - ((f->offset + len) % sector_size); - current_sector++; - } - - f->offset += len; - return len; -} - -static off_t emudev_seek(struct _reent *r, void *fd, off_t pos, int whence) { - emudev_file_t *f = (emudev_file_t *)fd; - emudev_device_t *device = f->device; - uint64_t off; - - switch (whence) { - case SEEK_SET: - off = 0; - break; - case SEEK_CUR: - off = f->offset; - break; - case SEEK_END: - off = device->devpart.num_sectors * device->devpart.sector_size; - break; - default: - r->_errno = EINVAL; - return -1; - } - - if (pos < 0 && pos + off < 0) { - /* don't allow seek to before the beginning of the file */ - r->_errno = EINVAL; - return -1; - } - - f->offset = (uint64_t)(pos + off); - return (off_t)(pos + off); -} - -static void emudev_stat_impl(emudev_device_t *device, struct stat *st) { - memset(st, 0, sizeof(struct stat)); - st->st_size = (off_t)(device->devpart.num_sectors * device->devpart.sector_size); - st->st_nlink = 1; - - st->st_blksize = device->devpart.sector_size; - st->st_blocks = st->st_size / st->st_blksize; - - st->st_mode = S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; -} - -static int emudev_fstat(struct _reent *r, void *fd, struct stat *st) { - (void)r; - emudev_file_t *f = (emudev_file_t *)fd; - emudev_device_t *device = f->device; - emudev_stat_impl(device, st); - - return 0; -} - -static int emudev_stat(struct _reent *r, const char *file, struct stat *st) { - emudev_device_t *device = (emudev_device_t *)(r->deviceData); - if (strcmp(file, device->root_path) != 0) { - r->_errno = ENOENT; - return -1; - } - - emudev_stat_impl(device, st); - return 0; -} - -static int emudev_fsync(struct _reent *r, void *fd) { - /* Nothing to do. */ - (void)r; - (void)fd; - return 0; -} diff --git a/fusee/fusee-secondary/src/exception_handlers.c b/fusee/fusee-secondary/src/exception_handlers.c index 698129ee4..5ade0ba66 100644 --- a/fusee/fusee-secondary/src/exception_handlers.c +++ b/fusee/fusee-secondary/src/exception_handlers.c @@ -14,6 +14,7 @@ * along with this program. If not, see . */ +#include #include #include "exception_handlers.h" @@ -22,7 +23,7 @@ #include "lib/log.h" #define CODE_DUMP_SIZE 0x30 -#define STACK_DUMP_SIZE 0x60 +#define STACK_DUMP_SIZE 0x30 extern const uint32_t exception_handler_table[]; @@ -35,6 +36,40 @@ static const char *register_names[] = { "SP", "LR", "PC", "CPSR", }; +/* Adapted from https://gist.github.com/ccbrown/9722406 */ +static void hexdump(const void* data, size_t size, uintptr_t addrbase, char* strbuf) { + const uint8_t *d = (const uint8_t *)data; + char ascii[17] = {0}; + ascii[16] = '\0'; + + for (size_t i = 0; i < size; i++) { + if (i % 16 == 0) { + strbuf += sprintf(strbuf, "%0*" PRIXPTR ": | ", 2 * sizeof(addrbase), addrbase + i); + } + strbuf += sprintf(strbuf, "%02X ", d[i]); + if (d[i] >= ' ' && d[i] <= '~') { + ascii[i % 16] = d[i]; + } else { + ascii[i % 16] = '.'; + } + if ((i+1) % 8 == 0 || i+1 == size) { + strbuf += sprintf(strbuf, " "); + if ((i+1) % 16 == 0) { + strbuf += sprintf(strbuf, "| %s \n", ascii); + } else if (i+1 == size) { + ascii[(i+1) % 16] = '\0'; + if ((i+1) % 16 <= 8) { + strbuf += sprintf(strbuf, " "); + } + for (size_t j = (i+1) % 16; j < 16; j++) { + strbuf += sprintf(strbuf, " "); + } + strbuf += sprintf(strbuf, "| %s \n", ascii); + } + } + } +} + void setup_exception_handlers(void) { volatile uint32_t *bpmp_exception_handler_table = (volatile uint32_t *)0x6000F200; for (int i = 0; i < 8; i++) { @@ -45,38 +80,40 @@ void setup_exception_handlers(void) { } void exception_handler_main(uint32_t *registers, unsigned int exception_type) { - uint8_t code_dump[CODE_DUMP_SIZE]; - uint8_t stack_dump[STACK_DUMP_SIZE]; - size_t code_dump_size; - size_t stack_dump_size; + char exception_log[0x400] = {0}; + uint8_t code_dump[CODE_DUMP_SIZE] = {0}; + uint8_t stack_dump[STACK_DUMP_SIZE] = {0}; + size_t code_dump_size = 0; + size_t stack_dump_size = 0; uint32_t pc = registers[15]; uint32_t cpsr = registers[16]; - uint32_t instr_addr = pc + ((cpsr & 0x20) ? 2 : 4) - CODE_DUMP_SIZE; - print(SCREEN_LOG_LEVEL_ERROR, "\nSomething went wrong...\n"); - + sprintf(exception_log, "An exception occured!\n"); + code_dump_size = safecpy(code_dump, (const void *)instr_addr, CODE_DUMP_SIZE); stack_dump_size = safecpy(stack_dump, (const void *)registers[13], STACK_DUMP_SIZE); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nException type: %s\n", - exception_names[exception_type]); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nRegisters:\n\n"); + sprintf(exception_log + strlen(exception_log), "\nException type: %s\n", exception_names[exception_type]); + sprintf(exception_log + strlen(exception_log), "\nRegisters:\n"); /* Print r0 to pc. */ for (int i = 0; i < 16; i += 2) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "%-7s%08"PRIX32" %-7s%08"PRIX32"\n", + sprintf(exception_log + strlen(exception_log), "%-7s%08"PRIX32" %-7s%08"PRIX32"\n", register_names[i], registers[i], register_names[i+1], registers[i+1]); } /* Print cpsr. */ - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "%-7s%08"PRIX32"\n", register_names[16], registers[16]); - - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nCode dump:\n"); - hexdump(code_dump, code_dump_size, instr_addr); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nStack dump:\n"); - hexdump(stack_dump, stack_dump_size, registers[13]); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\n"); - fatal_error("An exception occurred!\n"); + sprintf(exception_log + strlen(exception_log), "%-7s%08"PRIX32"\n", register_names[16], registers[16]); + + /* Print code and stack regions. */ + sprintf(exception_log + strlen(exception_log), "\nCode dump:\n"); + hexdump(code_dump, code_dump_size, instr_addr, exception_log + strlen(exception_log)); + sprintf(exception_log + strlen(exception_log), "\nStack dump:\n"); + hexdump(stack_dump, stack_dump_size, registers[13], exception_log + strlen(exception_log)); + sprintf(exception_log + strlen(exception_log), "\n"); + + /* Throw fatal error with the full exception log. */ + fatal_error(exception_log); } diff --git a/fusee/fusee-secondary/src/fs_dev.c b/fusee/fusee-secondary/src/fs_dev.c index 28b640438..93f8b4161 100644 --- a/fusee/fusee-secondary/src/fs_dev.c +++ b/fusee/fusee-secondary/src/fs_dev.c @@ -302,7 +302,6 @@ int fsdev_register_keys(const char *name, unsigned int target_firmware, BisParti return 0; } - int fsdev_unmount_all(void) { for (size_t i = 0; i < FF_VOLUMES; i++) { int ret = fsdev_unmount_device(g_fsdev_devices[i].name); diff --git a/fusee/fusee-secondary/src/gpt.c b/fusee/fusee-secondary/src/gpt.c index eecc5ea59..5f4183b12 100644 --- a/fusee/fusee-secondary/src/gpt.c +++ b/fusee/fusee-secondary/src/gpt.c @@ -112,42 +112,3 @@ int gpt_iterate_through_entries(FILE *disk, size_t sector_size, gpt_entry_iterat return 0; } - -int gpt_iterate_through_emu_entries(FILE *disk, size_t sector_size, gpt_emu_entry_iterator_t callback, void *param, const char *origin_path, int num_parts, uint64_t part_limit) { - efi_header_t hdr; - efi_entry_t entry; - size_t offset = 2 * 512; /* Sector #2. */ - size_t delta; - - /* Get the header. */ - if (gpt_get_header(&hdr, disk, sector_size) == -1) { - return -1; - } - - /* Seek to the entry table. */ - if (fseek(disk, sector_size * hdr.entries_first_lba - offset, SEEK_CUR) != 0) { - return -1; - } - - offset = sector_size * hdr.entries_first_lba; - delta = hdr.entry_size - sizeof(efi_entry_t); - - /* Iterate through the entries. */ - for (uint32_t i = 0; i < hdr.entry_count; i++) { - if (!fread(&entry, sizeof(efi_entry_t), 1, disk)) { - return -1; - } - - if (callback(&entry, param, offset, disk, origin_path, num_parts, part_limit) != 0) { - return -1; - } - - if (delta != 0 && fseek(disk, delta, SEEK_CUR) != 0) { - return -1; - } - - offset += hdr.entry_size; - } - - return 0; -} diff --git a/fusee/fusee-secondary/src/gpt.h b/fusee/fusee-secondary/src/gpt.h index 349aaa197..491dae4f6 100644 --- a/fusee/fusee-secondary/src/gpt.h +++ b/fusee/fusee-secondary/src/gpt.h @@ -56,6 +56,5 @@ typedef int (*gpt_emu_entry_iterator_t)(const efi_entry_t *entry, void *param, s int gpt_get_header(efi_header_t *out, FILE *disk, size_t sector_size); int gpt_iterate_through_entries(FILE *disk, size_t sector_size, gpt_entry_iterator_t callback, void *param); -int gpt_iterate_through_emu_entries(FILE *disk, size_t sector_size, gpt_emu_entry_iterator_t callback, void *param, const char *origin_path, int num_parts, uint64_t part_limit); #endif diff --git a/fusee/fusee-secondary/src/kernel_patches.c b/fusee/fusee-secondary/src/kernel_patches.c index 6408c14c1..fc2451052 100644 --- a/fusee/fusee-secondary/src/kernel_patches.c +++ b/fusee/fusee-secondary/src/kernel_patches.c @@ -963,6 +963,11 @@ void package2_patch_kernel(void *_kernel, size_t *kernel_size, bool is_sd_kernel } if (kernel_info == NULL && is_sd_kernel) { + /* If the kernel is mesosphere, patch it. */ + if (*(volatile uint32_t *)((uintptr_t)_kernel + 4) == 0x3053534D) { + *out_ini1 = (void *)((uintptr_t)_kernel + *(volatile uint32_t *)((uintptr_t)_kernel + 8)); + *(volatile uint64_t *)((uintptr_t)_kernel + 8) = (uint64_t)*kernel_size; + } return; } @@ -971,12 +976,6 @@ void package2_patch_kernel(void *_kernel, size_t *kernel_size, bool is_sd_kernel const uint32_t kernel_ldr_offset = *((volatile uint64_t *)((uintptr_t)_kernel + kernel_info->embedded_ini_ptr + 8)); memcpy((void *)((uintptr_t)_kernel + kernel_ldr_offset), kernel_ldr_bin, kernel_ldr_bin_size); - /* Set target firmware for our kernel loader. */ - uint32_t *kldr_u32 = (uint32_t *)((uintptr_t)_kernel + kernel_ldr_offset); - if (kldr_u32[1] == 0x30444C4D) { - kldr_u32[2] = target_firmware; - } - /* Update size. */ *kernel_size = kernel_ldr_offset + kernel_ldr_bin_size; diff --git a/fusee/fusee-secondary/src/key_derivation.c b/fusee/fusee-secondary/src/key_derivation.c index 190e37539..73a052bdf 100644 --- a/fusee/fusee-secondary/src/key_derivation.c +++ b/fusee/fusee-secondary/src/key_derivation.c @@ -273,7 +273,10 @@ void derive_bis_key(void *dst, BisPartition partition_id, uint32_t target_firmwa } }; - const uint32_t bis_key_generation = fuse_get_5x_key_generation(); + uint32_t bis_key_generation = fuse_get_5x_key_generation(); + if (bis_key_generation > 0) { + bis_key_generation -= 1; + } static const uint8_t AL16 bis_kek_source[0x10] = {0x34, 0xC1, 0xA0, 0xC4, 0x82, 0x58, 0xF8, 0xB4, 0xFA, 0x9E, 0x5E, 0x6A, 0xDA, 0xFC, 0x7E, 0x4F}; switch (partition_id) { diff --git a/fusee/fusee-secondary/src/lib/log.h b/fusee/fusee-secondary/src/lib/log.h index 8a28b0835..d1c5777f3 100644 --- a/fusee/fusee-secondary/src/lib/log.h +++ b/fusee/fusee-secondary/src/lib/log.h @@ -17,7 +17,7 @@ #ifndef FUSEE_LOG_H #define FUSEE_LOG_H -#define PRINT_MESSAGE_MAX_LENGTH 512 +#define PRINT_MESSAGE_MAX_LENGTH 1024 #include diff --git a/fusee/fusee-secondary/src/nxboot.c b/fusee/fusee-secondary/src/nxboot.c index 0fa866ed7..7a8519208 100644 --- a/fusee/fusee-secondary/src/nxboot.c +++ b/fusee/fusee-secondary/src/nxboot.c @@ -64,7 +64,6 @@ #undef u32 extern const uint8_t warmboot_bin[]; -extern const uint32_t warmboot_bin_size; static const uint8_t retail_pkc_modulus[0x100] = { 0xF7, 0x86, 0x47, 0xAB, 0x71, 0x89, 0x81, 0xB5, 0xCF, 0x0C, 0xB0, 0xE8, 0x48, 0xA7, 0xFD, 0xAD, @@ -236,6 +235,8 @@ static uint32_t nxboot_get_specific_target_firmware(uint32_t target_firmware){ #define CHECK_NCA(NCA_ID, VERSION) do { if (is_nca_present(NCA_ID)) { return ATMOSPHERE_TARGET_FIRMWARE_##VERSION; } } while(0) if (target_firmware >= ATMOSPHERE_TARGET_FIRMWARE_10_0_0) { + CHECK_NCA("5077973537f6735b564dd7475b779f87", 10_1_1); /* Exclusive to China. */ + CHECK_NCA("fd1faed0ca750700d254c0915b93d506", 10_1_0); CHECK_NCA("34728c771299443420820d8ae490ea41", 10_0_4); CHECK_NCA("5b1df84f88c3334335bbb45d8522cbb4", 10_0_3); CHECK_NCA("e951bc9dedcd54f65ffd83d4d050f9e0", 10_0_2); @@ -649,7 +650,7 @@ uint32_t nxboot_main(void) { } else { emummc_size = get_file_size("atmosphere/emummc.kip"); if (emummc_size != 0) { - /* Allocate memory for the TSEC firmware. */ + /* Allocate memory for the emummc KIP. */ emummc = memalign(0x100, emummc_size); if (emummc == NULL) { @@ -838,13 +839,7 @@ uint32_t nxboot_main(void) { /* Derive new device keys. */ { - if (target_firmware >= ATMOSPHERE_TARGET_FIRMWARE_5_0_0) { - derive_new_device_keys(fuse_get_retail_type() != 0, KEYSLOT_SWITCH_5XNEWDEVICEKEYGENKEY, target_firmware); - } else if (target_firmware >= ATMOSPHERE_TARGET_FIRMWARE_4_0_0) { - derive_new_device_keys(fuse_get_retail_type() != 0, KEYSLOT_SWITCH_4XNEWDEVICEKEYGENKEY, target_firmware); - } else { - /* No new keys to derive */ - } + derive_new_device_keys(fuse_get_retail_type() != 0, KEYSLOT_SWITCH_5XNEWDEVICEKEYGENKEY, target_firmware); } /* Set the system partition's keys. */ diff --git a/fusee/fusee-secondary/src/nxfs.c b/fusee/fusee-secondary/src/nxfs.c index 8758085a4..d63a1372e 100644 --- a/fusee/fusee-secondary/src/nxfs.c +++ b/fusee/fusee-secondary/src/nxfs.c @@ -36,7 +36,6 @@ static bool g_emmc_device_initialized = false; static bool g_fsdev_ready = false; static bool g_rawdev_ready = false; -static bool g_emudev_ready = false; static bool g_is_emummc = false; @@ -356,87 +355,6 @@ static int nxfs_mount_partition_gpt_callback(const efi_entry_t *entry, void *par return 0; } -static int nxfs_mount_emu_partition_gpt_callback(const efi_entry_t *entry, void *param, size_t entry_offset, FILE *disk, const char *origin_path, int num_parts, uint64_t part_limit) { - (void)entry_offset; - (void)disk; - device_partition_t *parent = (device_partition_t *)param; - device_partition_t devpart = *parent; - char name_buffer[128]; - const uint16_t *utf16name = entry->name; - uint32_t name_len; - int rc; - - static const struct { - const char *partition_name; - const char *mount_point; - bool is_fat; - bool is_encrypted; - bool register_immediately; - } known_partitions[] = { - {"PRODINFO", "prodinfo", false, true, false}, - {"PRODINFOF", "prodinfof", true, true, false}, - {"BCPKG2-1-Normal-Main", "bcpkg21", false, false, true}, - {"BCPKG2-2-Normal-Sub", "bcpkg22", false, false, false}, - {"BCPKG2-3-SafeMode-Main", "bcpkg23", false, false, false}, - {"BCPKG2-4-SafeMode-Sub", "bcpkg24", false, false, false}, - {"BCPKG2-5-Repair-Main", "bcpkg25", false, false, false}, - {"BCPKG2-6-Repair-Sub", "bcpkg26", false, false, false}, - {"SAFE", "safe", true, true, false}, - {"SYSTEM", "system", true, true, true}, - {"USER", "user", true, true, false}, - }; - - /* Convert the partition name to ASCII, for comparison. */ - for (name_len = 0; name_len < sizeof(entry->name) && *utf16name != 0; name_len++) { - name_buffer[name_len] = (char)*utf16name++; - } - name_buffer[name_len] = '\0'; - - /* Mount the partition, if we know about it. */ - for (size_t i = 0; i < sizeof(known_partitions)/sizeof(known_partitions[0]); i++) { - if (strcmp(name_buffer, known_partitions[i].partition_name) == 0) { - devpart.start_sector += entry->first_lba; - devpart.num_sectors = (entry->last_lba + 1) - entry->first_lba; - if (parent->num_sectors < devpart.num_sectors) { - errno = EINVAL; - return -1; - } - - if (known_partitions[i].is_encrypted) { - devpart.read_cipher = nxfs_bis_crypto_decrypt; - devpart.write_cipher = nxfs_bis_crypto_encrypt; - devpart.crypto_mode = DevicePartitionCryptoMode_Xts; - } - - if (known_partitions[i].is_fat) { - rc = fsdev_mount_device(known_partitions[i].mount_point, &devpart, false); - if (rc == -1) { - return -1; - } - if (known_partitions[i].register_immediately) { - rc = fsdev_register_device(known_partitions[i].mount_point); - if (rc == -1) { - return -1; - } - } - } else { - rc = emudev_mount_device(known_partitions[i].mount_point, &devpart, origin_path, num_parts, part_limit); - if (rc == -1) { - return -1; - } - if (known_partitions[i].register_immediately) { - rc = emudev_register_device(known_partitions[i].mount_point); - if (rc == -1) { - return -1; - } - } - } - } - } - - return 0; -} - int nxfs_mount_sd() { device_partition_t model; int rc; @@ -446,6 +364,7 @@ int nxfs_mount_sd() { model.device_struct = &g_sd_mmcpart; model.start_sector = 0; model.num_sectors = 1u << 30; /* arbitrary numbers of sectors. TODO: find the size of the SD in sectors. */ + model.is_emulated = false; /* Mount the SD card device. */ rc = fsdev_mount_device("sdmc", &model, true); @@ -479,6 +398,7 @@ int nxfs_mount_emmc() { model.device_struct = &g_emmc_boot0_mmcpart; model.start_sector = 0; model.num_sectors = 0x184000 / model.sector_size; + model.is_emulated = false; /* Mount boot0 device. */ rc = rawdev_mount_device("boot0", &model, true); @@ -499,6 +419,7 @@ int nxfs_mount_emmc() { model.device_struct = &g_emmc_boot1_mmcpart; model.start_sector = 0; model.num_sectors = 0x80000 / model.sector_size; + model.is_emulated = false; /* Mount boot1 device. */ rc = rawdev_mount_device("boot1", &model, false); @@ -514,6 +435,7 @@ int nxfs_mount_emmc() { model.device_struct = &g_emmc_user_mmcpart; model.start_sector = 0; model.num_sectors = (256ull << 30) / model.sector_size; + model.is_emulated = false; /* Mount raw NAND device. */ rc = rawdev_mount_device("rawnand", &model, false); @@ -558,10 +480,11 @@ int nxfs_mount_emummc_partition(uint64_t emummc_start_sector) { model = g_emummc_devpart_template; model.start_sector = emummc_start_sector + (0x400000 * 0 / model.sector_size); model.num_sectors = 0x400000 / model.sector_size; + model.is_emulated = true; model.emu_use_file = false; /* Mount emulated boot0 device. */ - rc = emudev_mount_device("boot0", &model, NULL, 0, 0); + rc = rawdev_mount_device("boot0", &model, true); /* Failed to mount boot0 device. */ if (rc == -1) { @@ -569,7 +492,7 @@ int nxfs_mount_emummc_partition(uint64_t emummc_start_sector) { } /* Register emulated boot0 device. */ - rc = emudev_register_device("boot0"); + rc = rawdev_register_device("boot0"); /* Failed to register boot0 device. */ if (rc == -1) { @@ -580,10 +503,11 @@ int nxfs_mount_emummc_partition(uint64_t emummc_start_sector) { model = g_emummc_devpart_template; model.start_sector = emummc_start_sector + (0x400000 * 1 / model.sector_size); model.num_sectors = 0x400000 / model.sector_size; + model.is_emulated = true; model.emu_use_file = false; /* Mount emulated boot1 device. */ - rc = emudev_mount_device("boot1", &model, NULL, 0, 0); + rc = rawdev_mount_device("boot1", &model, false); /* Failed to mount boot1. */ if (rc == -1) { @@ -596,10 +520,11 @@ int nxfs_mount_emummc_partition(uint64_t emummc_start_sector) { model = g_emummc_devpart_template; model.start_sector = emummc_start_sector + (0x400000 * 2 / model.sector_size); model.num_sectors = (256ull << 30) / model.sector_size; + model.is_emulated = true; model.emu_use_file = false; /* Mount emulated raw NAND device. */ - rc = emudev_mount_device("rawnand", &model, NULL, 0, 0); + rc = rawdev_mount_device("rawnand", &model, false); /* Failed to mount raw NAND. */ if (rc == -1) { @@ -607,7 +532,7 @@ int nxfs_mount_emummc_partition(uint64_t emummc_start_sector) { } /* Register emulated raw NAND device. */ - rc = emudev_register_device("rawnand"); + rc = rawdev_register_device("rawnand"); /* Failed to register raw NAND device. */ if (rc == -1) { @@ -623,14 +548,14 @@ int nxfs_mount_emummc_partition(uint64_t emummc_start_sector) { } /* Iterate the GPT and mount each emulated raw NAND partition. */ - rc = gpt_iterate_through_emu_entries(rawnand, model.sector_size, nxfs_mount_emu_partition_gpt_callback, &model, NULL, 0, 0); + rc = gpt_iterate_through_entries(rawnand, model.sector_size, nxfs_mount_partition_gpt_callback, &model); /* Close emulated raw NAND device. */ fclose(rawnand); /* All emulated devices are ready. */ if (rc == 0) { - g_emudev_ready = true; + g_rawdev_ready = true; g_is_emummc = true; } @@ -641,10 +566,9 @@ int nxfs_mount_emummc_file(const char *emummc_path, int num_parts, uint64_t part device_partition_t model; int rc; FILE *rawnand; - bool is_exfat; char emummc_boot0_path[0x300 + 1] = {0}; char emummc_boot1_path[0x300 + 1] = {0}; - + /* Check if the SD card is EXFAT formatted. */ rc = fsdev_is_exfat("sdmc"); @@ -652,29 +576,20 @@ int nxfs_mount_emummc_file(const char *emummc_path, int num_parts, uint64_t part if (rc == -1) { return -1; } - - /* Set EXFAT status. */ - is_exfat = (rc == 1); - - /* Reject single part in FAT32. */ - /* NOTE: This check has no effect in the current design. */ - if (!is_exfat && (num_parts < 1)) { - return -2; - } - + /* We want a folder with the archive bit set. */ rc = fsdev_get_attr(emummc_path); /* Failed to get file DOS attributes. */ if (rc == -1) { - return -3; + return -2; } - + /* Our path is not a directory. */ if (!(rc & AM_DIR)) { - return -4; + return -3; } - + /* Check if the archive bit is not set. */ if (!(rc & AM_ARC)) { /* Try to set the archive bit. */ @@ -682,102 +597,117 @@ int nxfs_mount_emummc_file(const char *emummc_path, int num_parts, uint64_t part /* Failed to set file DOS attributes. */ if (rc == -1) { - return -5; + return -4; } } - + + /* Prepare boot0 file path. */ + snprintf(emummc_boot0_path, sizeof(emummc_boot0_path) - 1, "%s/%s", emummc_path, "boot0"); + /* Setup an emulation template for boot0. */ model = g_emummc_devpart_template; model.start_sector = 0; model.num_sectors = 0x400000 / model.sector_size; + model.is_emulated = true; model.emu_use_file = true; - - /* Prepare boot0 file path. */ - snprintf(emummc_boot0_path, sizeof(emummc_boot0_path) - 1, "%s/%s", emummc_path, "boot0"); + model.emu_num_parts = 0; + model.emu_part_limit = 0; + strcpy(model.emu_root_path, emummc_path); + strcpy(model.emu_file_path, emummc_boot0_path); /* Mount emulated boot0 device. */ - rc = emudev_mount_device("boot0", &model, emummc_boot0_path, 0, 0); + rc = rawdev_mount_device("boot0", &model, true); /* Failed to mount boot0 device. */ if (rc == -1) { - return -6; + return -5; } - + /* Register emulated boot0 device. */ - rc = emudev_register_device("boot0"); + rc = rawdev_register_device("boot0"); /* Failed to register boot0 device. */ if (rc == -1) { - return -7; + return -6; } - + + /* Prepare boot1 file path. */ + snprintf(emummc_boot1_path, sizeof(emummc_boot1_path) - 1, "%s/%s", emummc_path, "boot1"); + /* Setup an emulation template for boot1. */ model = g_emummc_devpart_template; model.start_sector = 0; model.num_sectors = 0x400000 / model.sector_size; + model.is_emulated = true; model.emu_use_file = true; - - /* Prepare boot1 file path. */ - snprintf(emummc_boot1_path, sizeof(emummc_boot1_path) - 1, "%s/%s", emummc_path, "boot1"); - + model.emu_num_parts = 0; + model.emu_part_limit = 0; + strcpy(model.emu_root_path, emummc_path); + strcpy(model.emu_file_path, emummc_boot1_path); + /* Mount emulated boot1 device. */ - rc = emudev_mount_device("boot1", &model, emummc_boot1_path, 0, 0); + rc = rawdev_mount_device("boot1", &model, false); /* Failed to mount boot1. */ if (rc == -1) { - return -8; + return -7; } - + /* Register emulated boot1 device. */ - rc = emudev_register_device("boot1"); + rc = rawdev_register_device("boot1"); /* Failed to register boot1 device. */ if (rc == -1) { - return -9; + return -8; } - + /* Setup a template for raw NAND. */ model = g_emummc_devpart_template; model.start_sector = 0; model.num_sectors = (256ull << 30) / model.sector_size; + model.is_emulated = true; model.emu_use_file = true; + model.emu_num_parts = num_parts; + model.emu_part_limit = part_limit; + strcpy(model.emu_root_path, emummc_path); + strcpy(model.emu_file_path, emummc_path); /* Mount emulated raw NAND device from single or multiple parts. */ - rc = emudev_mount_device("rawnand", &model, emummc_path, num_parts, part_limit); + rc = rawdev_mount_device("rawnand", &model, false); /* Failed to mount raw NAND. */ if (rc == -1) { - return -10; + return -9; } - + /* Register emulated raw NAND device. */ - rc = emudev_register_device("rawnand"); + rc = rawdev_register_device("rawnand"); /* Failed to register raw NAND device. */ if (rc == -1) { - return -11; + return -10; } - + /* Open emulated raw NAND device. */ rawnand = fopen("rawnand:/", "rb"); /* Failed to open emulated raw NAND device. */ if (rawnand == NULL) { - return -12; + return -11; } - + /* Iterate the GPT and mount each emulated raw NAND partition. */ - rc = gpt_iterate_through_emu_entries(rawnand, model.sector_size, nxfs_mount_emu_partition_gpt_callback, &model, emummc_path, num_parts, part_limit); + rc = gpt_iterate_through_entries(rawnand, model.sector_size, nxfs_mount_partition_gpt_callback, &model); /* Close emulated raw NAND device. */ fclose(rawnand); /* All emulated devices are ready. */ if (rc == 0) { - g_emudev_ready = true; + g_rawdev_ready = true; g_is_emummc = true; } - + return rc; } @@ -805,18 +735,6 @@ int nxfs_unmount_emmc() { return rc; } -int nxfs_unmount_emummc() { - int rc = 0; - - /* Unmount all emulated devices. */ - if (g_emudev_ready) { - rc = emudev_unmount_all(); - g_emudev_ready = false; - } - - return rc; -} - int nxfs_init() { int rc; @@ -832,5 +750,5 @@ int nxfs_init() { } int nxfs_end() { - return ((nxfs_unmount_sd() || nxfs_unmount_emmc() || nxfs_unmount_emummc()) ? -1 : 0); + return ((nxfs_unmount_sd() || nxfs_unmount_emmc()) ? -1 : 0); } diff --git a/fusee/fusee-secondary/src/nxfs.h b/fusee/fusee-secondary/src/nxfs.h index e1a8c225a..685147f72 100644 --- a/fusee/fusee-secondary/src/nxfs.h +++ b/fusee/fusee-secondary/src/nxfs.h @@ -19,7 +19,6 @@ #include "fs_dev.h" #include "raw_dev.h" -#include "emu_dev.h" int nxfs_init(); int nxfs_end(); diff --git a/fusee/fusee-secondary/src/raw_dev.c b/fusee/fusee-secondary/src/raw_dev.c index 270e96eb1..e20c272dd 100644 --- a/fusee/fusee-secondary/src/raw_dev.c +++ b/fusee/fusee-secondary/src/raw_dev.c @@ -77,9 +77,6 @@ static rawdev_device_t *rawdev_find_device(const char *name) { int rawdev_mount_device(const char *name, const device_partition_t *devpart, bool initialize_immediately) { rawdev_device_t *device = NULL; - char drname[40]; - strcpy(drname, name); - strcat(drname, ":"); if (name[0] == '\0' || devpart == NULL) { errno = EINVAL; @@ -205,6 +202,19 @@ int rawdev_unmount_device(const char *name) { return 0; } +int rawdev_register_keys(const char *name, unsigned int target_firmware, BisPartition part) { + rawdev_device_t *device = rawdev_find_device(name); + + if (device == NULL) { + errno = ENOENT; + return -1; + } + + derive_bis_key(device->devpart.keys, part, target_firmware); + + return 0; +} + int rawdev_unmount_all(void) { for (size_t i = 0; i < RAWDEV_MAX_DEVICES; i++) { int rc = rawdev_unmount_device(g_rawdev_devices[i].name); diff --git a/fusee/fusee-secondary/src/raw_dev.h b/fusee/fusee-secondary/src/raw_dev.h index df2fac1f0..9a7454082 100644 --- a/fusee/fusee-secondary/src/raw_dev.h +++ b/fusee/fusee-secondary/src/raw_dev.h @@ -21,6 +21,7 @@ #include #include #include "device_partition.h" +#include "key_derivation.h" #define RAWDEV_MAX_DEVICES 16 @@ -29,6 +30,8 @@ int rawdev_register_device(const char *name); int rawdev_unregister_device(const char *name); int rawdev_unmount_device(const char *name); /* also unregisters. */ +int rawdev_register_keys(const char *name, unsigned int target_firmware, BisPartition part); + int rawdev_unmount_all(void); #endif diff --git a/fusee/fusee-secondary/src/stratosphere.c b/fusee/fusee-secondary/src/stratosphere.c index 4adec08b6..903b24206 100644 --- a/fusee/fusee-secondary/src/stratosphere.c +++ b/fusee/fusee-secondary/src/stratosphere.c @@ -51,7 +51,6 @@ static bool g_stratosphere_boot_enabled = true; static bool g_stratosphere_ncm_enabled = false; extern const uint8_t loader_kip[], pm_kip[], sm_kip[], spl_kip[], boot_kip[], ncm_kip[], ams_mitm_kip[]; -extern const uint32_t loader_kip_size, pm_kip_size, sm_kip_size, spl_kip_size, boot_kip_size, ncm_kip_size, ams_mitm_kip_size; static emummc_fs_ver_t g_fs_ver = FS_VER_1_0_0; diff --git a/fusee/fusee-secondary/src/utils.c b/fusee/fusee-secondary/src/utils.c index 2ea2aafd4..0e8f0abd5 100644 --- a/fusee/fusee-secondary/src/utils.c +++ b/fusee/fusee-secondary/src/utils.c @@ -157,12 +157,18 @@ __attribute__ ((noreturn)) void generic_panic(void) { } __attribute__((noreturn)) void fatal_error(const char *fmt, ...) { + /* Override the global logging level. */ + log_set_log_level(SCREEN_LOG_LEVEL_ERROR); + + /* Display fatal error. */ va_list args; print(SCREEN_LOG_LEVEL_ERROR, "Fatal error: "); va_start(args, fmt); vprint(SCREEN_LOG_LEVEL_ERROR, fmt, args); va_end(args); print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\n Press POWER to reboot.\n"); + + /* Wait for button and reboot. */ wait_for_button_and_reboot(); } @@ -174,37 +180,3 @@ __attribute__((noinline)) bool overlaps(uint64_t as, uint64_t ae, uint64_t bs, u return true; return false; } - -/* Adapted from https://gist.github.com/ccbrown/9722406 */ -void hexdump(const void* data, size_t size, uintptr_t addrbase) { - const uint8_t *d = (const uint8_t *)data; - char ascii[17]; - ascii[16] = '\0'; - - for (size_t i = 0; i < size; i++) { - if (i % 16 == 0) { - printf("%0*" PRIXPTR ": | ", 2 * sizeof(addrbase), addrbase + i); - } - printf("%02X ", d[i]); - if (d[i] >= ' ' && d[i] <= '~') { - ascii[i % 16] = d[i]; - } else { - ascii[i % 16] = '.'; - } - if ((i+1) % 8 == 0 || i+1 == size) { - printf(" "); - if ((i+1) % 16 == 0) { - printf("| %s \n", ascii); - } else if (i+1 == size) { - ascii[(i+1) % 16] = '\0'; - if ((i+1) % 16 <= 8) { - printf(" "); - } - for (size_t j = (i+1) % 16; j < 16; j++) { - printf(" "); - } - printf("| %s \n", ascii); - } - } - } -} diff --git a/fusee/fusee-secondary/src/utils.h b/fusee/fusee-secondary/src/utils.h index d90b9cd99..846d7da9c 100644 --- a/fusee/fusee-secondary/src/utils.h +++ b/fusee/fusee-secondary/src/utils.h @@ -118,8 +118,6 @@ static inline bool check_32bit_address_range_in_program(uintptr_t addr, size_t s overlaps_a(start, end, __start__, __end__); } -void hexdump(const void* data, size_t size, uintptr_t addrbase); - __attribute__((noreturn)) void watchdog_reboot(void); __attribute__((noreturn)) void pmc_reboot(uint32_t scratch0); __attribute__((noreturn)) void reboot_to_fusee_primary(void); @@ -129,8 +127,6 @@ __attribute__((noreturn)) void wait_for_button_and_reboot(void); void wait_for_button(void); __attribute__((noreturn)) void generic_panic(void); - __attribute__((noreturn)) void fatal_error(const char *fmt, ...); - #endif diff --git a/libraries/.gitrepo b/libraries/.gitrepo index 08a97c558..c5fbf5ca1 100644 --- a/libraries/.gitrepo +++ b/libraries/.gitrepo @@ -6,7 +6,7 @@ [subrepo] remote = https://github.com/Atmosphere-NX/Atmosphere-libs branch = master - commit = cf8f0c3c1f006e07c0b3976908220d3e7e83f7fa - parent = 033ae1dbe09ba354849caf90ca2a2f114d9b3b4b + commit = cac5957d3f4b1417cf76a83cf704a14a254dd4dc + parent = 3726def6ecc547e64912ddb050737ebd296366e7 method = merge cmdver = 0.4.1 diff --git a/libraries/Makefile b/libraries/Makefile index 9ed0e10e4..55a46de01 100644 --- a/libraries/Makefile +++ b/libraries/Makefile @@ -1,4 +1,4 @@ -ATMOSPHERE_LIBRARIES := libmesosphere libstratosphere +ATMOSPHERE_LIBRARIES := libmesosphere libstratosphere libexosphere TOPTARGETS := all clean diff --git a/libraries/config/common.mk b/libraries/config/common.mk index ebe9bcfb8..6c5516238 100644 --- a/libraries/config/common.mk +++ b/libraries/config/common.mk @@ -17,8 +17,10 @@ endif export ATMOSPHERE_DEFINES := -DATMOSPHERE export ATMOSPHERE_SETTINGS := -fPIE -g -export ATMOSPHERE_CFLAGS := -Wall -ffunction-sections -fdata-sections -fno-strict-aliasing -fwrapv \ - -fno-asynchronous-unwind-tables -fno-unwind-tables -fno-stack-protector +export ATMOSPHERE_CFLAGS := -Wall -ffunction-sections -fdata-sections -fno-strict-aliasing -fwrapv \ + -fno-asynchronous-unwind-tables -fno-unwind-tables -fno-stack-protector \ + -Wno-format-truncation -Wno-format-zero-length -Wno-stringop-truncation + export ATMOSPHERE_CXXFLAGS := -fno-rtti -fno-exceptions -std=gnu++20 export ATMOSPHERE_ASFLAGS := diff --git a/libraries/config/templates/mesosphere.mk b/libraries/config/templates/mesosphere.mk index a8a36c9e6..98dcf1235 100644 --- a/libraries/config/templates/mesosphere.mk +++ b/libraries/config/templates/mesosphere.mk @@ -12,7 +12,7 @@ export CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE) export CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit export ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES) -export LDFLAGS = -specs=$(TOPDIR)/$(notdir $(TOPDIR)).specs -nostdlib -nostartfiles -g $(SETTINGS) -Wl,-Map,$(notdir $*.map) -Wl,-z,relro,-z,now +export LDFLAGS = -specs=$(TOPDIR)/$(notdir $(TOPDIR)).specs -fno-asynchronous-unwind-tables -fno-unwind-tables -nostdlib -nostartfiles -g $(SETTINGS) -Wl,-Map,$(notdir $*.map) -Wl,-z,relro,-z,now export CXXWRAPS := -Wl,--wrap,__cxa_pure_virtual \ -Wl,--wrap,__cxa_throw \ diff --git a/libraries/libexosphere/include/exosphere/pkg1/pkg1_se_key_slots.hpp b/libraries/libexosphere/include/exosphere/pkg1/pkg1_se_key_slots.hpp index ead3d4124..659691397 100644 --- a/libraries/libexosphere/include/exosphere/pkg1/pkg1_se_key_slots.hpp +++ b/libraries/libexosphere/include/exosphere/pkg1/pkg1_se_key_slots.hpp @@ -19,34 +19,40 @@ namespace ams::pkg1 { enum AesKeySlot { - AesKeySlot_UserStart = 0, + AesKeySlot_UserStart = 0, - AesKeySlot_TzramSaveKek = 2, - AesKeySlot_TzramSaveKey = 3, + AesKeySlot_TzramSaveKek = 2, + AesKeySlot_TzramSaveKey = 3, - AesKeySlot_UserLast = 5, - AesKeySlot_UserEnd = AesKeySlot_UserLast + 1, + AesKeySlot_UserLast = 5, + AesKeySlot_UserEnd = AesKeySlot_UserLast + 1, - AesKeySlot_SecmonStart = 8, + AesKeySlot_SecmonStart = 8, - AesKeySlot_Temporary = 8, - AesKeySlot_Smc = 9, - AesKeySlot_RandomForUserWrap = 10, - AesKeySlot_RandomForKeyStorageWrap = 11, - AesKeySlot_DeviceMaster = 12, - AesKeySlot_Master = 13, - AesKeySlot_Device = 15, + AesKeySlot_Temporary = 8, + AesKeySlot_Smc = 9, + AesKeySlot_RandomForUserWrap = 10, + AesKeySlot_RandomForKeyStorageWrap = 11, + AesKeySlot_DeviceMaster = 12, + AesKeySlot_Master = 13, + AesKeySlot_Device = 15, - AesKeySlot_SecmonEnd = 16, + AesKeySlot_Count = 16, + AesKeySlot_SecmonEnd = AesKeySlot_Count, /* Used only during boot. */ - AesKeySlot_Tsec = 12, - AesKeySlot_TsecRoot = 13, - AesKeySlot_SecureBoot = 14, - AesKeySlot_SecureStorage = 15, + AesKeySlot_Tsec = 12, + AesKeySlot_TsecRoot = 13, + AesKeySlot_SecureBoot = 14, + AesKeySlot_SecureStorage = 15, - AesKeySlot_MasterKek = 13, - AesKeySlot_DeviceMasterKeySourceKek = 14, + AesKeySlot_DeviceMasterKeySourceKekErista = 10, + AesKeySlot_MasterKek = 13, + AesKeySlot_DeviceMasterKeySourceKekMariko = 14, + + /* Mariko only keyslots, used during boot. */ + AesKeySlot_MarikoKek = 12, + AesKeySlot_MarikoBek = 13, }; enum RsaKeySlot { diff --git a/libraries/libexosphere/include/exosphere/se/se_aes.hpp b/libraries/libexosphere/include/exosphere/se/se_aes.hpp index d3f18d20c..b5a828648 100644 --- a/libraries/libexosphere/include/exosphere/se/se_aes.hpp +++ b/libraries/libexosphere/include/exosphere/se/se_aes.hpp @@ -26,6 +26,9 @@ namespace ams::se { void ClearAesKeyIv(int slot); void LockAesKeySlot(int slot, u32 flags); + /* NOTE: This is Nintendo's API, but if we actually want to use SE2 we should use a different one. */ + void ClearAesKeySlot2(int slot); + void SetAesKey(int slot, const void *key, size_t key_size); void SetEncryptedAesKey128(int dst_slot, int kek_slot, const void *key, size_t key_size); diff --git a/libraries/libexosphere/include/exosphere/se/se_management.hpp b/libraries/libexosphere/include/exosphere/se/se_management.hpp index 41469f594..843de190c 100644 --- a/libraries/libexosphere/include/exosphere/se/se_management.hpp +++ b/libraries/libexosphere/include/exosphere/se/se_management.hpp @@ -18,13 +18,14 @@ namespace ams::se { - void SetRegisterAddress(uintptr_t address); + void SetRegisterAddress(uintptr_t address, uintptr_t address2); void Initialize(); void SetSecure(bool secure); void SetTzramSecure(); void SetPerKeySecure(); + void SetContextSaveSecure(); void Lockout(); diff --git a/libraries/libexosphere/include/exosphere/se/se_suspend.hpp b/libraries/libexosphere/include/exosphere/se/se_suspend.hpp index 33cc7a15a..17f20a1c4 100644 --- a/libraries/libexosphere/include/exosphere/se/se_suspend.hpp +++ b/libraries/libexosphere/include/exosphere/se/se_suspend.hpp @@ -53,4 +53,8 @@ namespace ams::se { bool ValidateStickyBits(const StickyBits &bits); void SaveContext(Context *dst); + void ConfigureAutomaticContextSave(); + void SaveContextAutomatic(); + void SaveTzramAutomatic(); + } diff --git a/libraries/libexosphere/include/exosphere/tegra/tegra_ahb_arbc.hpp b/libraries/libexosphere/include/exosphere/tegra/tegra_ahb_arbc.hpp index d8a5d4321..b130ada97 100644 --- a/libraries/libexosphere/include/exosphere/tegra/tegra_ahb_arbc.hpp +++ b/libraries/libexosphere/include/exosphere/tegra/tegra_ahb_arbc.hpp @@ -23,6 +23,7 @@ #define AHB_MASTER_SWID (0x018) #define AHB_MASTER_SWID_1 (0x038) #define AHB_GIZMO_TZRAM (0x054) +#define AHB_AHB_SPARE_REG (0x110) #define AHB_REG_BITS_MASK(NAME) REG_NAMED_BITS_MASK (AHB_, NAME) #define AHB_REG_BITS_VALUE(NAME, VALUE) REG_NAMED_BITS_VALUE (AHB_, NAME, VALUE) @@ -39,3 +40,8 @@ DEFINE_AHB_REG_BIT_ENUM(ARBITRATION_DISABLE_COP, 1, ENABLE, DISABLE); DEFINE_AHB_REG_BIT_ENUM(ARBITRATION_DISABLE_AHBDMA, 5, ENABLE, DISABLE); DEFINE_AHB_REG_BIT_ENUM(ARBITRATION_DISABLE_USB, 6, ENABLE, DISABLE); DEFINE_AHB_REG_BIT_ENUM(ARBITRATION_DISABLE_USB2, 18, ENABLE, DISABLE); + +DEFINE_AHB_REG(AHB_SPARE_REG_CSITE_PADMACRO3_TRIM_SEL, 0, 5); +DEFINE_AHB_REG_BIT_ENUM(AHB_SPARE_REG_OBS_OVERRIDE_EN, 5, DISABLE, ENABLE); +DEFINE_AHB_REG_BIT_ENUM(AHB_SPARE_REG_APB2JTAG_OVERRIDE_EN, 6, DISABLE, ENABLE); +DEFINE_AHB_REG(AHB_SPARE_REG_AHB_SPARE_REG, 12, 32-12); diff --git a/libraries/libexosphere/include/exosphere/tegra/tegra_pmc.hpp b/libraries/libexosphere/include/exosphere/tegra/tegra_pmc.hpp index 6934c8ccb..ecd8d1860 100644 --- a/libraries/libexosphere/include/exosphere/tegra/tegra_pmc.hpp +++ b/libraries/libexosphere/include/exosphere/tegra/tegra_pmc.hpp @@ -114,6 +114,7 @@ #define APBDEV_PMC_SECURE_SCRATCH113 (0xB1C) #define APBDEV_PMC_SECURE_SCRATCH114 (0xB20) #define APBDEV_PMC_SECURE_SCRATCH115 (0xB24) +#define APBDEV_PMC_SECURE_SCRATCH119 (0xB34) #define PMC_REG_BITS_MASK(NAME) REG_NAMED_BITS_MASK (APBDEV_PMC, NAME) diff --git a/libraries/libexosphere/source/clkrst/clkrst_api.cpp b/libraries/libexosphere/source/clkrst/clkrst_api.cpp index c0ae50b25..8b3967804 100644 --- a/libraries/libexosphere/source/clkrst/clkrst_api.cpp +++ b/libraries/libexosphere/source/clkrst/clkrst_api.cpp @@ -38,7 +38,7 @@ namespace ams::clkrst { reg::ReadWrite(g_register_address + param.clk_enb_offset, REG_BITS_VALUE(param.index, 1, 0)); /* Set the clock source. */ - if (param.clk_src != 0) { + if (param.clk_src_offset != 0) { reg::Write(g_register_address + param.clk_src_offset, (param.clk_src << 29) | (param.clk_div << 0)); } @@ -89,11 +89,11 @@ namespace ams::clkrst { } void EnableUartBClock() { - EnableClock(UartAClock); + EnableClock(UartBClock); } void EnableUartCClock() { - EnableClock(UartAClock); + EnableClock(UartCClock); } void EnableActmonClock() { diff --git a/libraries/libexosphere/source/fuse/fuse_api.cpp b/libraries/libexosphere/source/fuse/fuse_api.cpp index 7265e42d9..17e4b2564 100644 --- a/libraries/libexosphere/source/fuse/fuse_api.cpp +++ b/libraries/libexosphere/source/fuse/fuse_api.cpp @@ -20,6 +20,10 @@ namespace ams::fuse { namespace { + static constexpr SocType SocType_CommonInternal = static_cast(-1); + static_assert(SocType_CommonInternal != SocType_Erista); + static_assert(SocType_CommonInternal != SocType_Mariko); + struct BypassEntry { u32 offset; u32 value; @@ -42,6 +46,11 @@ namespace ams::fuse { using HardwareType3 = util::BitPack32::Field; }; + struct OdmWord28 { + using Regulator = util::BitPack32::Field<0, 1, int>; + using Reserved = util::BitPack32::Field<1, 31, int>; + }; + constexpr ALWAYS_INLINE int GetHardwareStateValue(const util::BitPack32 odm_word4) { constexpr auto HardwareState1Shift = 0; constexpr auto HardwareState2Shift = OdmWord4::HardwareState1::Count + HardwareState1Shift; @@ -73,8 +82,16 @@ namespace ams::fuse { return GetRegisterRegion()->fuse; } - ALWAYS_INLINE volatile FuseChipRegisters &GetChipRegisters() { - return GetRegisterRegion()->chip; + ALWAYS_INLINE volatile FuseChipRegistersCommon &GetChipRegistersCommon() { + return GetRegisterRegion()->chip_common; + } + + ALWAYS_INLINE volatile FuseChipRegistersErista &GetChipRegistersErista() { + return GetRegisterRegion()->chip_erista; + } + + ALWAYS_INLINE volatile FuseChipRegistersMariko &GetChipRegistersMariko() { + return GetRegisterRegion()->chip_mariko; } bool IsIdle() { @@ -85,6 +102,31 @@ namespace ams::fuse { while (!IsIdle()) { /* ... */ } } + u32 GetOdmWordImpl(int index, fuse::SocType soc_type) { + if (index < 8) { + volatile auto &chip = GetChipRegistersCommon(); + return chip.FUSE_RESERVED_ODM_0[index - 0]; + } else if (soc_type == SocType_Mariko) { + volatile auto &chip = GetChipRegistersMariko(); + if (index < 22) { + return chip.FUSE_RESERVED_ODM_8[index - 8]; + } else if (index < 25) { + return chip.FUSE_RESERVED_ODM_22[index - 22]; + } else if (index < 26) { + return chip.FUSE_RESERVED_ODM_25[index - 25]; + } else if (index < 29) { + return chip.FUSE_RESERVED_ODM_26[index - 26]; + } else if (index < 30) { + return chip.FUSE_RESERVED_ODM_29[index - 29]; + } + } + AMS_ABORT("Invalid ODM fuse read"); + } + + u32 GetCommonOdmWord(int index) { + return GetOdmWordImpl(index, SocType_CommonInternal); + } + bool IsNewFuseFormat() { /* On mariko, this should always be true. */ if (GetSocType() != SocType_Erista) { @@ -92,7 +134,7 @@ namespace ams::fuse { } /* Require that the format version be non-zero in odm4. */ - if (util::BitPack32{GetOdmWord(4)}.Get() == 0) { + if (util::BitPack32{GetCommonOdmWord(4)}.Get() == 0) { return false; } @@ -100,8 +142,8 @@ namespace ams::fuse { constexpr u32 NewFuseFormatMagic0 = 0x8E61ECAE; constexpr u32 NewFuseFormatMagic1 = 0xF2BA3BB2; - const u32 w0 = GetOdmWord(0); - const u32 w1 = GetOdmWord(1); + const u32 w0 = GetCommonOdmWord(0); + const u32 w1 = GetCommonOdmWord(1); return w0 == NewFuseFormatMagic0 && w1 == NewFuseFormatMagic1; } @@ -206,12 +248,12 @@ namespace ams::fuse { } u32 GetOdmWord(int index) { - return GetChipRegisters().FUSE_RESERVED_ODM[index]; + return GetOdmWordImpl(index, GetSocType()); } void GetEcid(br::BootEcid *out) { /* Get the registers. */ - volatile auto &chip = GetChipRegisters(); + volatile auto &chip = GetChipRegistersCommon(); /* Read the ecid components. */ const u32 vendor = reg::Read(chip.FUSE_OPT_VENDOR_CODE) & ((1u << 4) - 1); @@ -235,7 +277,7 @@ namespace ams::fuse { u64 GetDeviceId() { /* Get the registers. */ - volatile auto &chip = GetChipRegisters(); + volatile auto &chip = GetChipRegistersCommon(); /* Read the device id components. */ /* NOTE: Device ID is "basically" just an alternate encoding of Ecid. */ @@ -258,12 +300,12 @@ namespace ams::fuse { } DramId GetDramId() { - return static_cast(util::BitPack32{GetOdmWord(4)}.Get()); + return static_cast(util::BitPack32{GetCommonOdmWord(4)}.Get()); } HardwareType GetHardwareType() { /* Read the odm word. */ - const util::BitPack32 odm_word4 = { GetOdmWord(4) }; + const util::BitPack32 odm_word4 = { GetCommonOdmWord(4) }; /* Get the value. */ const auto value = GetHardwareTypeValue(odm_word4); @@ -280,7 +322,7 @@ namespace ams::fuse { HardwareState GetHardwareState() { /* Read the odm word. */ - const util::BitPack32 odm_word4 = { GetOdmWord(4) }; + const util::BitPack32 odm_word4 = { GetCommonOdmWord(4) }; /* Get the value. */ const auto value = GetHardwareStateValue(odm_word4); @@ -293,22 +335,28 @@ namespace ams::fuse { } PatchVersion GetPatchVersion() { - const auto patch_version = reg::Read(GetChipRegisters().FUSE_SOC_SPEEDO_1_CALIB); + const auto patch_version = reg::Read(GetChipRegistersCommon().FUSE_SOC_SPEEDO_1_CALIB); return static_cast(static_cast(GetSocType() << 12) | patch_version); } QuestState GetQuestState() { - return static_cast(util::BitPack32{GetOdmWord(4)}.Get()); + return static_cast(util::BitPack32{GetCommonOdmWord(4)}.Get()); } pmic::Regulator GetRegulator() { - /* TODO: How should mariko be handled? This reads from ODM word 28 in fuses (not present in erista...). */ - return pmic::Regulator_Erista_Max77621; + if (GetSocType() == SocType_Mariko) { + /* Read the odm word. */ + const util::BitPack32 odm_word28 = { GetOdmWordImpl(28, SocType_Mariko) }; + + return static_cast(odm_word28.Get() + 1); + } else /* if (GetSocType() == SocType_Erista) */ { + return pmic::Regulator_Erista_Max77621; + } } int GetDeviceUniqueKeyGeneration() { if (IsNewFuseFormat()) { - return util::BitPack32{GetOdmWord(2)}.Get(); + return util::BitPack32{GetCommonOdmWord(2)}.Get(); } else { return 0; } @@ -344,13 +392,13 @@ namespace ams::fuse { } /* Some patched units use XUSB in RCM. */ - if (reg::Read(GetChipRegisters().FUSE_RESERVED_SW) & 0x80) { + if (reg::Read(GetChipRegistersCommon().FUSE_RESERVED_SW) & 0x80) { g_has_rcm_bug_patch = true; break; } /* Other units have a proper ipatch instead. */ - u32 word_count = reg::Read(GetChipRegisters().FUSE_FIRST_BOOTROM_PATCH_SIZE) & 0x7F; + u32 word_count = reg::Read(GetChipRegistersCommon().FUSE_FIRST_BOOTROM_PATCH_SIZE) & 0x7F; u32 word_addr = 191; while (word_count && !g_has_rcm_bug_patch) { @@ -379,7 +427,7 @@ namespace ams::fuse { } bool IsOdmProductionMode() { - return reg::HasValue(GetChipRegisters().FUSE_SECURITY_MODE, FUSE_REG_BITS_ENUM(SECURITY_MODE_SECURITY_MODE, ENABLED)); + return reg::HasValue(GetChipRegistersCommon().FUSE_SECURITY_MODE, FUSE_REG_BITS_ENUM(SECURITY_MODE_SECURITY_MODE, ENABLED)); } void ConfigureFuseBypass() { diff --git a/libraries/libexosphere/source/fuse/fuse_registers.hpp b/libraries/libexosphere/source/fuse/fuse_registers.hpp index 3d729c9b6..19542b0b0 100644 --- a/libraries/libexosphere/source/fuse/fuse_registers.hpp +++ b/libraries/libexosphere/source/fuse/fuse_registers.hpp @@ -42,12 +42,162 @@ namespace ams::fuse { u32 FUSE_PRIVATE_KEY2_NONZERO; u32 FUSE_PRIVATE_KEY3_NONZERO; u32 FUSE_PRIVATE_KEY4_NONZERO; - u32 _0x94[0x1B]; + u32 _0x94; }; static_assert(util::is_pod::value); - static_assert(sizeof(FuseRegisters) == 0x100); + static_assert(sizeof(FuseRegisters) == 0x98); - struct FuseChipRegisters { + struct FuseChipRegistersCommon { + u32 _0x98[0x1A]; + u32 FUSE_PRODUCTION_MODE; + u32 FUSE_JTAG_SECUREID_VALID; + u32 FUSE_ODM_LOCK; + u32 FUSE_OPT_OPENGL_EN; + u32 FUSE_SKU_INFO; + u32 FUSE_CPU_SPEEDO_0_CALIB; + u32 FUSE_CPU_IDDQ_CALIB; + u32 _0x11C; + u32 _0x120; + u32 _0x124; + u32 FUSE_OPT_FT_REV; + u32 FUSE_CPU_SPEEDO_1_CALIB; + u32 FUSE_CPU_SPEEDO_2_CALIB; + u32 FUSE_SOC_SPEEDO_0_CALIB; + u32 FUSE_SOC_SPEEDO_1_CALIB; + u32 FUSE_SOC_SPEEDO_2_CALIB; + u32 FUSE_SOC_IDDQ_CALIB; + u32 _0x144; + u32 FUSE_FA; + u32 FUSE_RESERVED_PRODUCTION; + u32 FUSE_HDMI_LANE0_CALIB; + u32 FUSE_HDMI_LANE1_CALIB; + u32 FUSE_HDMI_LANE2_CALIB; + u32 FUSE_HDMI_LANE3_CALIB; + u32 FUSE_ENCRYPTION_RATE; + u32 FUSE_PUBLIC_KEY[0x8]; + u32 FUSE_TSENSOR1_CALIB; + u32 FUSE_TSENSOR2_CALIB; + u32 _0x18C; + u32 FUSE_OPT_CP_REV; + u32 FUSE_OPT_PFG; + u32 FUSE_TSENSOR0_CALIB; + u32 FUSE_FIRST_BOOTROM_PATCH_SIZE; + u32 FUSE_SECURITY_MODE; + u32 FUSE_PRIVATE_KEY[0x5]; + u32 FUSE_ARM_JTAG_DIS; + u32 FUSE_BOOT_DEVICE_INFO; + u32 FUSE_RESERVED_SW; + u32 FUSE_OPT_VP9_DISABLE; + u32 FUSE_RESERVED_ODM_0[8 - 0]; + u32 FUSE_OBS_DIS; + u32 _0x1EC; + u32 FUSE_USB_CALIB; + u32 FUSE_SKU_DIRECT_CONFIG; + u32 FUSE_KFUSE_PRIVKEY_CTRL; + u32 FUSE_PACKAGE_INFO; + u32 FUSE_OPT_VENDOR_CODE; + u32 FUSE_OPT_FAB_CODE; + u32 FUSE_OPT_LOT_CODE_0; + u32 FUSE_OPT_LOT_CODE_1; + u32 FUSE_OPT_WAFER_ID; + u32 FUSE_OPT_X_COORDINATE; + u32 FUSE_OPT_Y_COORDINATE; + u32 FUSE_OPT_SEC_DEBUG_EN; + u32 FUSE_OPT_OPS_RESERVED; + u32 _0x224; + u32 FUSE_GPU_IDDQ_CALIB; + u32 FUSE_TSENSOR3_CALIB; + u32 _0x234; + u32 _0x238; + u32 _0x23C; + u32 _0x240; + u32 _0x244; + u32 FUSE_OPT_SAMPLE_TYPE; + u32 FUSE_OPT_SUBREVISION; + u32 FUSE_OPT_SW_RESERVED_0; + u32 FUSE_OPT_SW_RESERVED_1; + u32 FUSE_TSENSOR4_CALIB; + u32 FUSE_TSENSOR5_CALIB; + u32 FUSE_TSENSOR6_CALIB; + u32 FUSE_TSENSOR7_CALIB; + u32 FUSE_OPT_PRIV_SEC_EN; + u32 _0x268; + u32 _0x26C; + u32 _0x270; + u32 _0x274; + u32 _0x278; + u32 FUSE_FUSE2TSEC_DEBUG_DISABLE; + u32 FUSE_TSENSOR_COMMON; + u32 FUSE_OPT_CP_BIN; + u32 FUSE_OPT_GPU_DISABLE; + u32 FUSE_OPT_FT_BIN; + u32 FUSE_OPT_DONE_MAP; + u32 _0x294; + u32 FUSE_APB2JTAG_DISABLE; + u32 FUSE_ODM_INFO; + u32 _0x2A0; + u32 _0x2A4; + u32 FUSE_ARM_CRYPT_DE_FEATURE; + u32 _0x2AC; + u32 _0x2B0; + u32 _0x2B4; + u32 _0x2B8; + u32 _0x2BC; + u32 FUSE_WOA_SKU_FLAG; + u32 FUSE_ECO_RESERVE_1; + u32 FUSE_GCPLEX_CONFIG_FUSE; + u32 FUSE_PRODUCTION_MONTH; + u32 FUSE_RAM_REPAIR_INDICATOR; + u32 FUSE_TSENSOR9_CALIB; + u32 _0x2D8; + u32 FUSE_VMIN_CALIBRATION; + u32 FUSE_AGING_SENSOR_CALIBRATION; + u32 FUSE_DEBUG_AUTHENTICATION; + u32 FUSE_SECURE_PROVISION_INDEX; + u32 FUSE_SECURE_PROVISION_INFO; + u32 FUSE_OPT_GPU_DISABLE_CP1; + u32 FUSE_SPARE_ENDIS; + u32 FUSE_ECO_RESERVE_0; + u32 _0x2FC; + u32 _0x300; + u32 FUSE_RESERVED_CALIB0; + u32 FUSE_RESERVED_CALIB1; + u32 FUSE_OPT_GPU_TPC0_DISABLE; + u32 FUSE_OPT_GPU_TPC0_DISABLE_CP1; + u32 FUSE_OPT_CPU_DISABLE; + u32 FUSE_OPT_CPU_DISABLE_CP1; + u32 FUSE_TSENSOR10_CALIB; + u32 FUSE_TSENSOR10_CALIB_AUX; + u32 _0x324; + u32 _0x328; + u32 _0x32C; + u32 _0x330; + u32 _0x334; + u32 FUSE_OPT_GPU_TPC0_DISABLE_CP2; + u32 FUSE_OPT_GPU_TPC1_DISABLE; + u32 FUSE_OPT_GPU_TPC1_DISABLE_CP1; + u32 FUSE_OPT_GPU_TPC1_DISABLE_CP2; + u32 FUSE_OPT_CPU_DISABLE_CP2; + u32 FUSE_OPT_GPU_DISABLE_CP2; + u32 FUSE_USB_CALIB_EXT; + u32 FUSE_RESERVED_FIELD; + u32 _0x358; + u32 _0x35C; + u32 _0x360; + u32 _0x364; + u32 _0x368; + u32 _0x36C; + u32 _0x370; + u32 _0x374; + u32 _0x378; + u32 FUSE_SPARE_REALIGNMENT_REG; + u32 FUSE_SPARE_BIT[0x20]; + }; + static_assert(util::is_pod::value); + static_assert(sizeof(FuseChipRegistersCommon) == 0x400 - 0x98); + + struct FuseChipRegistersErista { + u32 _0x98[0x1A]; u32 FUSE_PRODUCTION_MODE; u32 FUSE_JTAG_SECUREID_VALID; u32 FUSE_ODM_LOCK; @@ -87,7 +237,7 @@ namespace ams::fuse { u32 FUSE_BOOT_DEVICE_INFO; u32 FUSE_RESERVED_SW; u32 FUSE_OPT_VP9_DISABLE; - u32 FUSE_RESERVED_ODM[0x8]; + u32 FUSE_RESERVED_ODM_0[8 - 0]; u32 FUSE_OBS_DIS; u32 FUSE_NOR_INFO; u32 FUSE_USB_CALIB; @@ -121,34 +271,34 @@ namespace ams::fuse { u32 FUSE_TSENSOR7_CALIB; u32 FUSE_OPT_PRIV_SEC_EN; u32 FUSE_PKC_DISABLE; - u32 _0x16C; - u32 _0x170; - u32 _0x174; - u32 _0x178; + u32 _0x26C; + u32 _0x270; + u32 _0x274; + u32 _0x278; u32 FUSE_FUSE2TSEC_DEBUG_DISABLE; u32 FUSE_TSENSOR_COMMON; u32 FUSE_OPT_CP_BIN; u32 FUSE_OPT_GPU_DISABLE; u32 FUSE_OPT_FT_BIN; u32 FUSE_OPT_DONE_MAP; - u32 _0x194; + u32 _0x294; u32 FUSE_APB2JTAG_DISABLE; u32 FUSE_ODM_INFO; - u32 _0x1A0; - u32 _0x1A4; + u32 _0x2A0; + u32 _0x2A4; u32 FUSE_ARM_CRYPT_DE_FEATURE; - u32 _0x1AC; - u32 _0x1B0; - u32 _0x1B4; - u32 _0x1B8; - u32 _0x1BC; + u32 _0x2AC; + u32 _0x2B0; + u32 _0x2B4; + u32 _0x2B8; + u32 _0x2BC; u32 FUSE_WOA_SKU_FLAG; u32 FUSE_ECO_RESERVE_1; u32 FUSE_GCPLEX_CONFIG_FUSE; u32 FUSE_PRODUCTION_MONTH; u32 FUSE_RAM_REPAIR_INDICATOR; u32 FUSE_TSENSOR9_CALIB; - u32 _0x1D8; + u32 _0x2D8; u32 FUSE_VMIN_CALIBRATION; u32 FUSE_AGING_SENSOR_CALIBRATION; u32 FUSE_DEBUG_AUTHENTICATION; @@ -157,8 +307,8 @@ namespace ams::fuse { u32 FUSE_OPT_GPU_DISABLE_CP1; u32 FUSE_SPARE_ENDIS; u32 FUSE_ECO_RESERVE_0; - u32 _0x1FC; - u32 _0x200; + u32 _0x2FC; + u32 _0x300; u32 FUSE_RESERVED_CALIB0; u32 FUSE_RESERVED_CALIB1; u32 FUSE_OPT_GPU_TPC0_DISABLE; @@ -181,23 +331,175 @@ namespace ams::fuse { u32 FUSE_USB_CALIB_EXT; u32 FUSE_RESERVED_FIELD; u32 FUSE_OPT_ECC_EN; - u32 _0x25C; - u32 _0x260; - u32 _0x264; - u32 _0x268; + u32 _0x35C; + u32 _0x360; + u32 _0x364; + u32 _0x368; + u32 _0x36C; + u32 _0x370; + u32 _0x374; + u32 _0x378; + u32 FUSE_SPARE_REALIGNMENT_REG; + u32 FUSE_SPARE_BIT[0x20]; + }; + static_assert(util::is_pod::value); + static_assert(sizeof(FuseChipRegistersErista) == 0x400 - 0x98); + + struct FuseChipRegistersMariko { + u32 FUSE_RESERVED_ODM_8[22 - 8]; + u32 FUSE_KEK[4]; + u32 FUSE_BEK[4]; + u32 _0xF0[4]; + u32 FUSE_PRODUCTION_MODE; + u32 FUSE_JTAG_SECUREID_VALID; + u32 FUSE_ODM_LOCK; + u32 FUSE_OPT_OPENGL_EN; + u32 FUSE_SKU_INFO; + u32 FUSE_CPU_SPEEDO_0_CALIB; + u32 FUSE_CPU_IDDQ_CALIB; + u32 FUSE_RESERVED_ODM_22[25 - 22]; + u32 FUSE_OPT_FT_REV; + u32 FUSE_CPU_SPEEDO_1_CALIB; + u32 FUSE_CPU_SPEEDO_2_CALIB; + u32 FUSE_SOC_SPEEDO_0_CALIB; + u32 FUSE_SOC_SPEEDO_1_CALIB; + u32 FUSE_SOC_SPEEDO_2_CALIB; + u32 FUSE_SOC_IDDQ_CALIB; + u32 FUSE_RESERVED_ODM_25[26 - 25]; + u32 FUSE_FA; + u32 FUSE_RESERVED_PRODUCTION; + u32 FUSE_HDMI_LANE0_CALIB; + u32 FUSE_HDMI_LANE1_CALIB; + u32 FUSE_HDMI_LANE2_CALIB; + u32 FUSE_HDMI_LANE3_CALIB; + u32 FUSE_ENCRYPTION_RATE; + u32 FUSE_PUBLIC_KEY[0x8]; + u32 FUSE_TSENSOR1_CALIB; + u32 FUSE_TSENSOR2_CALIB; + u32 FUSE_OPT_SECURE_SCC_DIS; + u32 FUSE_OPT_CP_REV; + u32 FUSE_OPT_PFG; + u32 FUSE_TSENSOR0_CALIB; + u32 FUSE_FIRST_BOOTROM_PATCH_SIZE; + u32 FUSE_SECURITY_MODE; + u32 FUSE_PRIVATE_KEY[0x5]; + u32 FUSE_ARM_JTAG_DIS; + u32 FUSE_BOOT_DEVICE_INFO; + u32 FUSE_RESERVED_SW; + u32 FUSE_OPT_VP9_DISABLE; + u32 FUSE_RESERVED_ODM_0[8 - 0]; + u32 FUSE_OBS_DIS; + u32 _0x1EC; + u32 FUSE_USB_CALIB; + u32 FUSE_SKU_DIRECT_CONFIG; + u32 FUSE_KFUSE_PRIVKEY_CTRL; + u32 FUSE_PACKAGE_INFO; + u32 FUSE_OPT_VENDOR_CODE; + u32 FUSE_OPT_FAB_CODE; + u32 FUSE_OPT_LOT_CODE_0; + u32 FUSE_OPT_LOT_CODE_1; + u32 FUSE_OPT_WAFER_ID; + u32 FUSE_OPT_X_COORDINATE; + u32 FUSE_OPT_Y_COORDINATE; + u32 FUSE_OPT_SEC_DEBUG_EN; + u32 FUSE_OPT_OPS_RESERVED; + u32 _0x224; + u32 FUSE_GPU_IDDQ_CALIB; + u32 FUSE_TSENSOR3_CALIB; + u32 FUSE_CLOCK_BONDOUT0; + u32 FUSE_CLOCK_BONDOUT1; + u32 FUSE_RESERVED_ODM_26[29 - 26]; + u32 FUSE_OPT_SAMPLE_TYPE; + u32 FUSE_OPT_SUBREVISION; + u32 FUSE_OPT_SW_RESERVED_0; + u32 FUSE_OPT_SW_RESERVED_1; + u32 FUSE_TSENSOR4_CALIB; + u32 FUSE_TSENSOR5_CALIB; + u32 FUSE_TSENSOR6_CALIB; + u32 FUSE_TSENSOR7_CALIB; + u32 FUSE_OPT_PRIV_SEC_EN; + u32 FUSE_BOOT_SECURITY_INFO; u32 _0x26C; u32 _0x270; u32 _0x274; u32 _0x278; + u32 FUSE_FUSE2TSEC_DEBUG_DISABLE; + u32 FUSE_TSENSOR_COMMON; + u32 FUSE_OPT_CP_BIN; + u32 FUSE_OPT_GPU_DISABLE; + u32 FUSE_OPT_FT_BIN; + u32 FUSE_OPT_DONE_MAP; + u32 FUSE_RESERVED_ODM_29[30 - 29]; + u32 FUSE_APB2JTAG_DISABLE; + u32 FUSE_ODM_INFO; + u32 _0x2A0; + u32 _0x2A4; + u32 FUSE_ARM_CRYPT_DE_FEATURE; + u32 _0x2AC; + u32 _0x2B0; + u32 _0x2B4; + u32 _0x2B8; + u32 _0x2BC; + u32 FUSE_WOA_SKU_FLAG; + u32 FUSE_ECO_RESERVE_1; + u32 FUSE_GCPLEX_CONFIG_FUSE; + u32 FUSE_PRODUCTION_MONTH; + u32 FUSE_RAM_REPAIR_INDICATOR; + u32 FUSE_TSENSOR9_CALIB; + u32 _0x2D8; + u32 FUSE_VMIN_CALIBRATION; + u32 FUSE_AGING_SENSOR_CALIBRATION; + u32 FUSE_DEBUG_AUTHENTICATION; + u32 FUSE_SECURE_PROVISION_INDEX; + u32 FUSE_SECURE_PROVISION_INFO; + u32 FUSE_OPT_GPU_DISABLE_CP1; + u32 FUSE_SPARE_ENDIS; + u32 FUSE_ECO_RESERVE_0; + u32 _0x2FC; + u32 _0x300; + u32 FUSE_RESERVED_CALIB0; + u32 FUSE_RESERVED_CALIB1; + u32 FUSE_OPT_GPU_TPC0_DISABLE; + u32 FUSE_OPT_GPU_TPC0_DISABLE_CP1; + u32 FUSE_OPT_CPU_DISABLE; + u32 FUSE_OPT_CPU_DISABLE_CP1; + u32 FUSE_TSENSOR10_CALIB; + u32 FUSE_TSENSOR10_CALIB_AUX; + u32 _0x324; + u32 _0x328; + u32 _0x32C; + u32 _0x330; + u32 _0x334; + u32 FUSE_OPT_GPU_TPC0_DISABLE_CP2; + u32 FUSE_OPT_GPU_TPC1_DISABLE; + u32 FUSE_OPT_GPU_TPC1_DISABLE_CP1; + u32 FUSE_OPT_GPU_TPC1_DISABLE_CP2; + u32 FUSE_OPT_CPU_DISABLE_CP2; + u32 FUSE_OPT_GPU_DISABLE_CP2; + u32 FUSE_USB_CALIB_EXT; + u32 FUSE_RESERVED_FIELD; + u32 _0x358; + u32 _0x35C; + u32 _0x360; + u32 _0x364; + u32 _0x368; + u32 _0x36C; + u32 _0x370; + u32 _0x374; + u32 _0x378; u32 FUSE_SPARE_REALIGNMENT_REG; u32 FUSE_SPARE_BIT[0x20]; }; - static_assert(util::is_pod::value); - static_assert(sizeof(FuseChipRegisters) == 0x300); + static_assert(util::is_pod::value); + static_assert(sizeof(FuseChipRegistersMariko) == 0x400 - 0x98); struct FuseRegisterRegion { FuseRegisters fuse; - FuseChipRegisters chip; + union { + FuseChipRegistersCommon chip_common; + FuseChipRegistersErista chip_erista; + FuseChipRegistersMariko chip_mariko; + }; }; static_assert(util::is_pod::value); static_assert(sizeof(FuseRegisterRegion) == secmon::MemoryRegionPhysicalDeviceFuses.GetSize()); diff --git a/libraries/libexosphere/source/log/log_api.cpp b/libraries/libexosphere/source/log/log_api.cpp index ad1d89ea2..b6c96ed02 100644 --- a/libraries/libexosphere/source/log/log_api.cpp +++ b/libraries/libexosphere/source/log/log_api.cpp @@ -48,12 +48,14 @@ namespace ams::log { clkrst::EnableUartAClock(); } else if constexpr (UartLogPort == uart::Port_LeftJoyCon) { /* Logging to left joy-con (e.g. with Joyless). */ - pinmux::SetupUartB(); - clkrst::EnableUartBClock(); - } else if constexpr (UartLogPort == uart::Port_RightJoyCon) { - /* Logging to right joy-con (e.g. with Joyless). */ + static_assert(uart::Port_LeftJoyCon == uart::Port_C); pinmux::SetupUartC(); clkrst::EnableUartCClock(); + } else if constexpr (UartLogPort == uart::Port_RightJoyCon) { + /* Logging to right joy-con (e.g. with Joyless). */ + static_assert(uart::Port_RightJoyCon == uart::Port_B); + pinmux::SetupUartB(); + clkrst::EnableUartBClock(); } else { __builtin_unreachable(); } diff --git a/libraries/libexosphere/source/pinmux/pinmux_api.cpp b/libraries/libexosphere/source/pinmux/pinmux_api.cpp index 276c9c8b8..3559541cd 100644 --- a/libraries/libexosphere/source/pinmux/pinmux_api.cpp +++ b/libraries/libexosphere/source/pinmux/pinmux_api.cpp @@ -104,7 +104,7 @@ namespace ams::pinmux { /* Get the registers. */ const uintptr_t PINMUX = g_pinmux_address; - /* Configure Uart-B. */ + /* Configure Uart-C. */ reg::Write(PINMUX + PINMUX_AUX_UART3_TX, PINMUX_REG_BITS_ENUM(AUX_UART3_PM, UARTC), PINMUX_REG_BITS_ENUM(AUX_PUPD, NONE), PINMUX_REG_BITS_ENUM(AUX_TRISTATE, PASSTHROUGH), @@ -114,13 +114,13 @@ namespace ams::pinmux { reg::Write(PINMUX + PINMUX_AUX_UART3_RX, PINMUX_REG_BITS_ENUM(AUX_UART3_PM, UARTC), PINMUX_REG_BITS_ENUM(AUX_PUPD, NONE), - PINMUX_REG_BITS_ENUM(AUX_TRISTATE, PASSTHROUGH), + PINMUX_REG_BITS_ENUM(AUX_TRISTATE, TRISTATE), PINMUX_REG_BITS_ENUM(AUX_E_INPUT, ENABLE), PINMUX_REG_BITS_ENUM(AUX_LOCK, DISABLE), PINMUX_REG_BITS_ENUM(AUX_E_OD, DISABLE)); reg::Write(PINMUX + PINMUX_AUX_UART3_RTS, PINMUX_REG_BITS_ENUM(AUX_UART3_PM, UARTC), - PINMUX_REG_BITS_ENUM(AUX_PUPD, NONE), + PINMUX_REG_BITS_ENUM(AUX_PUPD, PULL_DOWN), PINMUX_REG_BITS_ENUM(AUX_TRISTATE, PASSTHROUGH), PINMUX_REG_BITS_ENUM(AUX_E_INPUT, DISABLE), PINMUX_REG_BITS_ENUM(AUX_LOCK, DISABLE), @@ -128,13 +128,16 @@ namespace ams::pinmux { reg::Write(PINMUX + PINMUX_AUX_UART3_CTS, PINMUX_REG_BITS_ENUM(AUX_UART3_PM, UARTC), PINMUX_REG_BITS_ENUM(AUX_PUPD, NONE), - PINMUX_REG_BITS_ENUM(AUX_TRISTATE, PASSTHROUGH), + PINMUX_REG_BITS_ENUM(AUX_TRISTATE, TRISTATE), PINMUX_REG_BITS_ENUM(AUX_E_INPUT, ENABLE), PINMUX_REG_BITS_ENUM(AUX_LOCK, DISABLE), PINMUX_REG_BITS_ENUM(AUX_E_OD, DISABLE)); /* Configure GPIO for Uart-C. */ - reg::ReadWrite(g_gpio_address + 0x00C, REG_BITS_VALUE(1, 4, 0)); + reg::ReadWrite(g_gpio_address + 0x118, REG_BITS_VALUE(0, 1, 1)); + reg::Read(g_gpio_address + 0x118); + reg::ReadWrite(g_gpio_address + 0x00C, REG_BITS_VALUE(1, 1, 0)); + reg::Read(g_gpio_address + 0x00C); } void SetupI2c1() { diff --git a/libraries/libexosphere/source/se/se_aes.cpp b/libraries/libexosphere/source/se/se_aes.cpp index 00105f71d..f5950552c 100644 --- a/libraries/libexosphere/source/se/se_aes.cpp +++ b/libraries/libexosphere/source/se/se_aes.cpp @@ -362,22 +362,29 @@ namespace ams::se { StartOperationRaw(SE, SE_OPERATION_OP_START, out_ll_address, in_ll_address); } + void ClearAesKeySlot(volatile SecurityEngineRegisters *SE, int slot) { + /* Validate the key slot. */ + AMS_ABORT_UNLESS(0 <= slot && slot < AesKeySlotCount); + + for (int i = 0; i < 16; ++i) { + /* Select the keyslot. */ + reg::Write(SE->SE_CRYPTO_KEYTABLE_ADDR, SE_REG_BITS_VALUE(CRYPTO_KEYTABLE_ADDR_KEYIV_KEY_SLOT, slot), SE_REG_BITS_VALUE(CRYPTO_KEYTABLE_ADDR_KEYIV_WORD, i)); + + /* Write the data. */ + SE->SE_CRYPTO_KEYTABLE_DATA = 0; + } + } + } void ClearAesKeySlot(int slot) { - /* Validate the key slot. */ - AMS_ABORT_UNLESS(0 <= slot && slot < AesKeySlotCount); + /* Clear the slot in SE1. */ + ClearAesKeySlot(GetRegisters(), slot); + } - /* Get the engine. */ - auto *SE = GetRegisters(); - - for (int i = 0; i < 16; ++i) { - /* Select the keyslot. */ - reg::Write(SE->SE_CRYPTO_KEYTABLE_ADDR, SE_REG_BITS_VALUE(CRYPTO_KEYTABLE_ADDR_KEYIV_KEY_SLOT, slot), SE_REG_BITS_VALUE(CRYPTO_KEYTABLE_ADDR_KEYIV_WORD, i)); - - /* Write the data. */ - SE->SE_CRYPTO_KEYTABLE_DATA = 0; - } + void ClearAesKeySlot2(int slot) { + /* Clear the slot in SE2. */ + ClearAesKeySlot(GetRegisters2(), slot); } void ClearAesKeyIv(int slot) { diff --git a/libraries/libexosphere/source/se/se_execute.hpp b/libraries/libexosphere/source/se/se_execute.hpp index cc28d3ebb..8c7e9292f 100644 --- a/libraries/libexosphere/source/se/se_execute.hpp +++ b/libraries/libexosphere/source/se/se_execute.hpp @@ -19,6 +19,7 @@ namespace ams::se { volatile SecurityEngineRegisters *GetRegisters(); + volatile SecurityEngineRegisters *GetRegisters2(); void ExecuteOperation(volatile SecurityEngineRegisters *SE, SE_OPERATION_OP op, void *dst, size_t dst_size, const void *src, size_t src_size); void ExecuteOperationSingleBlock(volatile SecurityEngineRegisters *SE, void *dst, size_t dst_size, const void *src, size_t src_size); diff --git a/libraries/libexosphere/source/se/se_management.cpp b/libraries/libexosphere/source/se/se_management.cpp index 0c577a01b..fe5c20de4 100644 --- a/libraries/libexosphere/source/se/se_management.cpp +++ b/libraries/libexosphere/source/se/se_management.cpp @@ -20,17 +20,35 @@ namespace ams::se { namespace { - constinit uintptr_t g_register_address = secmon::MemoryRegionPhysicalDeviceSecurityEngine.GetAddress(); + constinit uintptr_t g_register_address = secmon::MemoryRegionPhysicalDeviceSecurityEngine.GetAddress(); + constinit uintptr_t g_register2_address = secmon::MemoryRegionPhysicalDeviceSecurityEngine2.GetAddress(); constinit DoneHandler g_done_handler = nullptr; + void SetSecure(volatile SecurityEngineRegisters *SE, bool secure) { + /* Set the security software setting. */ + if (secure) { + reg::ReadWrite(SE->SE_SE_SECURITY, SE_REG_BITS_ENUM(SECURITY_SOFT_SETTING, SECURE)); + } else { + reg::ReadWrite(SE->SE_SE_SECURITY, SE_REG_BITS_ENUM(SECURITY_SOFT_SETTING, NONSECURE)); + } + + /* Read the status register to force an update. */ + reg::Read(SE->SE_SE_SECURITY); + } + } volatile SecurityEngineRegisters *GetRegisters() { return reinterpret_cast(g_register_address); } - void SetRegisterAddress(uintptr_t address) { - g_register_address = address; + volatile SecurityEngineRegisters *GetRegisters2() { + return reinterpret_cast(g_register2_address); + } + + void SetRegisterAddress(uintptr_t address, uintptr_t address2) { + g_register_address = address; + g_register2_address = address2; } void Initialize() { @@ -39,17 +57,13 @@ namespace ams::se { } void SetSecure(bool secure) { - auto *SE = GetRegisters(); + /* Set security for SE1. */ + SetSecure(GetRegisters(), secure); - /* Set the security software setting. */ - if (secure) { - reg::ReadWrite(SE->SE_SE_SECURITY, SE_REG_BITS_ENUM(SECURITY_SOFT_SETTING, SECURE)); - } else { - reg::ReadWrite(SE->SE_SE_SECURITY, SE_REG_BITS_ENUM(SECURITY_SOFT_SETTING, NONSECURE)); + /* If SE2 is present, set security for SE2. */ + if (fuse::GetSocType() == fuse::SocType_Mariko) { + SetSecure(GetRegisters2(), secure); } - - /* Read the status register to force an update. */ - reg::Read(SE->SE_SE_SECURITY); } void SetTzramSecure() { @@ -66,6 +80,18 @@ namespace ams::se { reg::ReadWrite(SE->SE_SE_SECURITY, SE_REG_BITS_ENUM(SECURITY_PERKEY_SETTING, SECURE)); } + + void SetContextSaveSecure() { + /* Context save lock to trustzone secure is only available on mariko. */ + if (fuse::GetSocType() == fuse::SocType_Mariko) { + auto *SE = GetRegisters(); + auto *SE2 = GetRegisters2(); + + reg::ReadWrite(SE->SE_SE_SECURITY, SE_REG_BITS_ENUM(SECURITY_CTX_SAVE_TZ_LOCK, SECURE)); + reg::ReadWrite(SE2->SE_SE_SECURITY, SE_REG_BITS_ENUM(SECURITY_CTX_SAVE_TZ_LOCK, SECURE)); + } + } + void Lockout() { auto *SE = GetRegisters(); diff --git a/libraries/libexosphere/source/se/se_registers.hpp b/libraries/libexosphere/source/se/se_registers.hpp index f66360a88..189767aec 100644 --- a/libraries/libexosphere/source/se/se_registers.hpp +++ b/libraries/libexosphere/source/se/se_registers.hpp @@ -32,7 +32,8 @@ namespace ams::se { u32 SE_OUT_CUR_LL_ID; u32 SE_HASH_RESULT[0x10]; u32 SE_CTX_SAVE_CONFIG; - u32 _0x74[0x63]; + u32 SE_CTX_SAVE_AUTO; + u32 _0x78[0x62]; u32 SE_SHA_CONFIG; u32 SE_SHA_MSG_LENGTH[0x4]; u32 SE_SHA_MSG_LEFT[0x4]; @@ -61,7 +62,9 @@ namespace ams::se { u32 SE_RSA_KEYTABLE_ADDR; u32 SE_RSA_KEYTABLE_DATA; u32 SE_RSA_OUTPUT[0x40]; - u32 _0x528[0xB6]; + u32 _0x528[0x6]; + u32 SE_TZRAM_OPERATION; + u32 _0x544[0xAF]; u32 SE_STATUS; u32 SE_ERR_STATUS; u32 SE_MISC; @@ -100,17 +103,26 @@ namespace ams::se { /* SE_STATUS. */ DEFINE_SE_REG_TWO_BIT_ENUM(STATUS_STATE, 0, IDLE, BUSY, WAIT_OUT, WAIT_IN); + DEFINE_SE_REG_BIT_ENUM(STATUS_MEM_INTERFACE, 2, IDLE, BUSY); /* SE_SECURITY */ - DEFINE_SE_REG_BIT_ENUM(SECURITY_HARD_SETTING, 0, SECURE, NONSECURE); - DEFINE_SE_REG_BIT_ENUM(SECURITY_ENG_DIS, 1, DISABLE, ENABLE); - DEFINE_SE_REG_BIT_ENUM(SECURITY_PERKEY_SETTING, 2, SECURE, NONSECURE); - DEFINE_SE_REG_BIT_ENUM(SECURITY_SOFT_SETTING, 16, SECURE, NONSECURE); + DEFINE_SE_REG_BIT_ENUM(SECURITY_HARD_SETTING, 0, SECURE, NONSECURE); + DEFINE_SE_REG_BIT_ENUM(SECURITY_ENG_DIS, 1, DISABLE, ENABLE); + DEFINE_SE_REG_BIT_ENUM(SECURITY_PERKEY_SETTING, 2, SECURE, NONSECURE); + DEFINE_SE_REG_BIT_ENUM(SECURITY_CTX_SAVE_TZ_LOCK, 4, SECURE, NONSECURE); + DEFINE_SE_REG_BIT_ENUM(SECURITY_CTX_TZ_LOCK_SOFT, 5, SECURE, NONSECURE); + DEFINE_SE_REG_BIT_ENUM(SECURITY_SOFT_SETTING, 16, SECURE, NONSECURE); /* SE_TZRAM_SECURITY */ DEFINE_SE_REG(TZRAM_SETTING, 0, BITSIZEOF(u32)); constexpr inline u32 SE_TZRAM_SETTING_SECURE = 0; + /* SE_TZRAM_OPERATION */ + DEFINE_SE_REG_BIT_ENUM(TZRAM_OPERATION_REQ, 0, IDLE, INITIATE); + DEFINE_SE_REG_BIT_ENUM(TZRAM_OPERATION_MODE, 1, SAVE, RESTORE); + DEFINE_SE_REG_BIT_ENUM(TZRAM_OPERATION_BUSY, 2, NO, YES); + DEFINE_SE_REG(TZRAM_OPERATION_CURR_ADDR, 16, 16); + /* SE_OPERATION */ DEFINE_SE_REG_THREE_BIT_ENUM(OPERATION_OP, 0, ABORT, START, RESTART_OUT, CTX_SAVE, RESTART_IN, RESERVED_5, RESERVED_6, RESERVED_7); @@ -168,6 +180,11 @@ namespace ams::se { DEFINE_SE_REG_BIT_ENUM(CTX_SAVE_CONFIG_STICKY_WORD_QUAD, 24, WORDS_0_3, WORDS_4_7); DEFINE_SE_REG_THREE_BIT_ENUM(CTX_SAVE_CONFIG_SRC, 29, STICKY_BITS, RSA_KEYTABLE, AES_KEYTABLE, PKA1_STICKY_BITS, MEM, RESERVED5, SRK, PKA1_KEYTABLE); + /* SE_CTX_SAVE_AUTO */ + DEFINE_SE_REG_BIT_ENUM(CTX_SAVE_AUTO_ENABLE, 0, NO, YES); + DEFINE_SE_REG_BIT_ENUM(CTX_SAVE_AUTO_LOCK, 8, NO, YES); + DEFINE_SE_REG(CTX_SAVE_AUTO_CURR_CNT, 16, 10); + /* SE_SHA_CONFIG */ DEFINE_SE_REG(SHA_CONFIG_HW_INIT_HASH, 0, 1); diff --git a/libraries/libexosphere/source/se/se_rng.cpp b/libraries/libexosphere/source/se/se_rng.cpp index 4cb099bde..70eea576e 100644 --- a/libraries/libexosphere/source/se/se_rng.cpp +++ b/libraries/libexosphere/source/se/se_rng.cpp @@ -44,31 +44,50 @@ namespace ams::se { reg::Write(SE->SE_RNG_CONFIG, SE_REG_BITS_ENUM(RNG_CONFIG_SRC, ENTROPY), SE_REG_BITS_VALUE(RNG_CONFIG_MODE, mode)); } - } + void InitializeRandom(volatile SecurityEngineRegisters *SE) { + /* Lock the entropy source. */ + reg::Write(SE->SE_RNG_SRC_CONFIG, SE_REG_BITS_ENUM(RNG_SRC_CONFIG_RO_ENTROPY_SOURCE, ENABLE), + SE_REG_BITS_ENUM(RNG_SRC_CONFIG_RO_ENTROPY_SOURCE_LOCK, ENABLE)); - void InitializeRandom() { - /* Get the engine. */ - auto *SE = GetRegisters(); + /* Set the reseed interval to force a reseed every 70000 blocks. */ + SE->SE_RNG_RESEED_INTERVAL = RngReseedInterval; - /* Lock the entropy source. */ - reg::Write(SE->SE_RNG_SRC_CONFIG, SE_REG_BITS_ENUM(RNG_SRC_CONFIG_RO_ENTROPY_SOURCE, ENABLE), - SE_REG_BITS_ENUM(RNG_SRC_CONFIG_RO_ENTROPY_SOURCE_LOCK, ENABLE)); + /* Initialize the DRBG. */ + { + u8 dummy_buf[AesBlockSize]; - /* Set the reseed interval to force a reseed every 70000 blocks. */ - SE->SE_RNG_RESEED_INTERVAL = RngReseedInterval; + /* Configure the engine to force drbg instantiation by writing random to memory. */ + ConfigRng(SE, SE_CONFIG_DST_MEMORY, SE_RNG_CONFIG_MODE_FORCE_INSTANTIATION); - /* Initialize the DRBG. */ - { - u8 dummy_buf[AesBlockSize]; + /* Configure to do a single RNG block operation to trigger DRBG init. */ + SE->SE_CRYPTO_LAST_BLOCK = 0; - /* Configure the engine to force drbg instantiation by writing random to memory. */ - ConfigRng(SE, SE_CONFIG_DST_MEMORY, SE_RNG_CONFIG_MODE_FORCE_INSTANTIATION); + /* Execute the operation. */ + ExecuteOperation(SE, SE_OPERATION_OP_START, dummy_buf, sizeof(dummy_buf), nullptr, 0); + } + } - /* Configure to do a single RNG block operation to trigger DRBG init. */ + void GenerateSrk(volatile SecurityEngineRegisters *SE) { + /* Configure the RNG to output to SRK and force a reseed. */ + ConfigRng(SE, SE_CONFIG_DST_SRK, SE_RNG_CONFIG_MODE_FORCE_RESEED); + + /* Configure a single block operation. */ SE->SE_CRYPTO_LAST_BLOCK = 0; /* Execute the operation. */ - ExecuteOperation(SE, SE_OPERATION_OP_START, dummy_buf, sizeof(dummy_buf), nullptr, 0); + ExecuteOperation(SE, SE_OPERATION_OP_START, nullptr, 0, nullptr, 0); + } + + } + + void InitializeRandom() { + /* Initialize random for SE1. */ + InitializeRandom(GetRegisters()); + + /* If we have SE2, initialize random for SE2. */ + /* NOTE: Nintendo's implementation of this is incorrect. */ + if (fuse::GetSocType() == fuse::SocType_Mariko) { + InitializeRandom(GetRegisters2()); } } @@ -130,17 +149,14 @@ namespace ams::se { } void GenerateSrk() { - /* Get the engine. */ - auto *SE = GetRegisters(); + /* Generate SRK for SE1. */ + GenerateSrk(GetRegisters()); - /* Configure the RNG to output to SRK and force a reseed. */ - ConfigRng(SE, SE_CONFIG_DST_SRK, SE_RNG_CONFIG_MODE_FORCE_RESEED); - - /* Configure a single block operation. */ - SE->SE_CRYPTO_LAST_BLOCK = 0; - - /* Execute the operation. */ - ExecuteOperation(SE, SE_OPERATION_OP_START, nullptr, 0, nullptr, 0); + /* If we have SE2, generate SRK for SE2. */ + /* NOTE: Nintendo's implementation of this is incorrect. */ + if (fuse::GetSocType() == fuse::SocType_Mariko) { + GenerateSrk(GetRegisters2()); + } } } diff --git a/libraries/libexosphere/source/se/se_suspend.cpp b/libraries/libexosphere/source/se/se_suspend.cpp index 883ae3cbc..e064858d3 100644 --- a/libraries/libexosphere/source/se/se_suspend.cpp +++ b/libraries/libexosphere/source/se/se_suspend.cpp @@ -20,6 +20,10 @@ namespace ams::se { namespace { + constexpr inline size_t SE1ContextSaveOperationCount = 133; + constexpr inline size_t SE2ContextSaveOperationCount = 646; + static_assert(((SE1ContextSaveOperationCount - 2) + 1) * se::AesBlockSize == sizeof(se::Context)); + constinit const u8 FixedPattern[AesBlockSize] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F }; @@ -64,6 +68,44 @@ namespace ams::se { ExecuteContextSaveOperation(SE, dst, AesBlockSize, nullptr, 0); } + void ConfigureForAutomaticContextSave(volatile SecurityEngineRegisters *SE) { + /* Configure the engine to do RNG encryption. */ + reg::Write(SE->SE_CONFIG, SE_REG_BITS_ENUM(CONFIG_ENC_MODE, AESMODE_KEY128), + SE_REG_BITS_ENUM(CONFIG_DEC_MODE, AESMODE_KEY128), + SE_REG_BITS_ENUM(CONFIG_ENC_ALG, RNG), + SE_REG_BITS_ENUM(CONFIG_DEC_ALG, NOP), + SE_REG_BITS_ENUM(CONFIG_DST, MEMORY)); + + reg::Write(SE->SE_CRYPTO_CONFIG, SE_REG_BITS_ENUM (CRYPTO_CONFIG_MEMIF, AHB), + SE_REG_BITS_VALUE(CRYPTO_CONFIG_CTR_CNTN, 0), + SE_REG_BITS_ENUM (CRYPTO_CONFIG_KEYSCH_BYPASS, DISABLE), + SE_REG_BITS_ENUM (CRYPTO_CONFIG_CORE_SEL, ENCRYPT), + SE_REG_BITS_ENUM (CRYPTO_CONFIG_IV_SELECT, ORIGINAL), + SE_REG_BITS_ENUM (CRYPTO_CONFIG_VCTRAM_SEL, MEMORY), + SE_REG_BITS_ENUM (CRYPTO_CONFIG_INPUT_SEL, RANDOM), + SE_REG_BITS_ENUM (CRYPTO_CONFIG_XOR_POS, BYPASS), + SE_REG_BITS_ENUM (CRYPTO_CONFIG_HASH_ENB, DISABLE)); + } + + void WaitAutomaticContextSaveDone(volatile SecurityEngineRegisters *SE) { + /* Wait for operation. */ + while (!reg::HasValue(SE->SE_INT_STATUS, SE_REG_BITS_ENUM(INT_STATUS_SE_OP_DONE, ACTIVE))) { /* ... */ } + + /* Wait for the engine to be idle. */ + while (!reg::HasValue(SE->SE_STATUS, SE_REG_BITS_ENUM(STATUS_STATE, IDLE))) { /* ... */ } + + /* Wait for the memory interface to be idle. */ + while (!reg::HasValue(SE->SE_STATUS, SE_REG_BITS_ENUM(STATUS_MEM_INTERFACE, IDLE))) { /* ... */ } + } + + void ValidateErrStatus(volatile SecurityEngineRegisters *SE) { + /* Ensure there is no error status. */ + AMS_ABORT_UNLESS(reg::Read(SE->SE_ERR_STATUS) == 0); + + /* Ensure no error occurred. */ + AMS_ABORT_UNLESS(reg::HasValue(SE->SE_INT_STATUS, SE_REG_BITS_ENUM(INT_STATUS_ERR_STAT, CLEAR))); + } + } bool ValidateStickyBits(const StickyBits &bits) { @@ -237,15 +279,76 @@ namespace ams::se { } } + void ConfigureAutomaticContextSave() { + /* Get registers. */ + auto *SE = GetRegisters(); + auto *SE2 = GetRegisters2(); + + /* Automatic context save is supported only on mariko. */ + if (fuse::GetSocType() == fuse::SocType_Mariko) { + /* Configure SE1 to do automatic context save. */ + reg::Write(SE->SE_CTX_SAVE_AUTO, SE_REG_BITS_ENUM(CTX_SAVE_AUTO_ENABLE, YES), + SE_REG_BITS_ENUM(CTX_SAVE_AUTO_LOCK, YES)); + + /* Configure SE2 to do automatic context save. */ + reg::Write(SE2->SE_CTX_SAVE_AUTO, SE_REG_BITS_ENUM(CTX_SAVE_AUTO_ENABLE, YES), + SE_REG_BITS_ENUM(CTX_SAVE_AUTO_LOCK, YES)); + } + } + + void SaveContextAutomatic() { + /* Get registers. */ + auto *SE = GetRegisters(); + auto *SE2 = GetRegisters2(); + + /* Ensure there's no error status before or after we save context. */ + ValidateErrStatus(); + ON_SCOPE_EXIT { ValidateErrStatus(); }; + + /* Perform atomic context save. */ + { + /* Check that context save has not already been performed. */ + AMS_ABORT_UNLESS(reg::HasValue(SE->SE_CTX_SAVE_AUTO, SE_REG_BITS_VALUE(CTX_SAVE_AUTO_CURR_CNT, 0))); + AMS_ABORT_UNLESS(reg::HasValue(SE2->SE_CTX_SAVE_AUTO, SE_REG_BITS_VALUE(CTX_SAVE_AUTO_CURR_CNT, 0))); + + /* Configure SE1 to do context save. */ + ConfigureForAutomaticContextSave(SE); + ConfigureForAutomaticContextSave(SE2); + + /* Start the context save operation. */ + reg::Write(SE->SE_OPERATION, SE_REG_BITS_ENUM(OPERATION_OP, CTX_SAVE)); + reg::Write(SE2->SE_OPERATION, SE_REG_BITS_ENUM(OPERATION_OP, CTX_SAVE)); + + /* Wait for the context save operation to complete. */ + WaitAutomaticContextSaveDone(SE); + WaitAutomaticContextSaveDone(SE2); + + /* Check that the correct sizes were written. */ + AMS_ABORT_UNLESS(reg::HasValue(SE->SE_CTX_SAVE_AUTO, SE_REG_BITS_VALUE(CTX_SAVE_AUTO_CURR_CNT, SE1ContextSaveOperationCount))); + AMS_ABORT_UNLESS(reg::HasValue(SE2->SE_CTX_SAVE_AUTO, SE_REG_BITS_VALUE(CTX_SAVE_AUTO_CURR_CNT, SE2ContextSaveOperationCount))); + } + } + + void SaveTzramAutomatic() { + /* Get registers. */ + auto *SE = GetRegisters(); + + /* Begin save-to-shadow-tzram operation. */ + reg::Write(SE->SE_TZRAM_OPERATION, SE_REG_BITS_ENUM(TZRAM_OPERATION_MODE, SAVE), + SE_REG_BITS_ENUM(TZRAM_OPERATION_REQ, INITIATE)); + + /* Wait for operation to complete. */ + while (reg::HasValue(SE->SE_TZRAM_OPERATION, SE_REG_BITS_ENUM(TZRAM_OPERATION_BUSY, YES))) { /* ... */ } + } + void ValidateErrStatus() { - /* Get the registers. */ - auto *SE = GetRegisters(); + /* Ensure SE has no error status. */ + ValidateErrStatus(GetRegisters()); - /* Ensure there is no error status. */ - AMS_ABORT_UNLESS(reg::Read(SE->SE_ERR_STATUS) == 0); - - /* Ensure no error occurred. */ - AMS_ABORT_UNLESS(reg::HasValue(SE->SE_INT_STATUS, SE_REG_BITS_ENUM(INT_STATUS_ERR_STAT, CLEAR))); + /* If on mariko, ensure SE2 has no error status. */ + if (fuse::GetSocType() == fuse::SocType_Mariko) { + ValidateErrStatus(GetRegisters2()); + } } } diff --git a/libraries/libexosphere/source/uart/uart_api.cpp b/libraries/libexosphere/source/uart/uart_api.cpp index 02e6d59a3..aaeaf2a1e 100644 --- a/libraries/libexosphere/source/uart/uart_api.cpp +++ b/libraries/libexosphere/source/uart/uart_api.cpp @@ -33,11 +33,11 @@ namespace ams::uart { } void WaitSymbols(int baud, u32 num) { - util::WaitMicroSeconds(util::DivideUp(1'000'000, baud) * num); + util::WaitMicroSeconds(util::DivideUp(num * 1'000'000, baud)); } void WaitCycles(int baud, u32 num) { - util::WaitMicroSeconds(util::DivideUp(1'000'000, 16 * baud) * num); + util::WaitMicroSeconds(util::DivideUp(num * 1'000'000, 16 * baud)); } ALWAYS_INLINE void WaitFifoNotFull(volatile UartRegisters *uart) { @@ -60,26 +60,6 @@ namespace ams::uart { constexpr inline u32 LockBit = (1 << 6); - void Lock(volatile UartRegisters *reg) { - while (true) { - if (reg->mie != 0) { - continue; - } - - reg->irda_csr = LockBit; - - if (reg->mie == 0) { - break; - } - - reg->irda_csr = 0; - } - } - - void Unlock(volatile UartRegisters *reg) { - reg->irda_csr = 0; - } - } void SetRegisterAddress(uintptr_t address) { @@ -97,7 +77,13 @@ namespace ams::uart { constexpr u32 UartClock = 408000000; const u32 divisor = (UartClock + (baud_rate * 16) / 2) / (baud_rate * 16); - /* Disable DLAB and all interrupts. */ + /* Wait for idle state. */ + WaitIdle(uart, UART_VENDOR_STATE_TX_IDLE); + + /* Wait 100 us. */ + util::WaitMicroSeconds(100); + + /* Disable interrupts. */ uart->lcr = uart->lcr & ~UART_LCR_DLAB; uart->ier = 0; uart->mcr = 0; @@ -128,8 +114,8 @@ namespace ams::uart { /* Wait for idle state. */ WaitIdle(uart, UART_VENDOR_STATE_TX_IDLE | UART_VENDOR_STATE_RX_IDLE); - /* Set scratch register to 0. */ - uart->spr = 0; + /* Wait 100 us. */ + util::WaitMicroSeconds(100); } void SendText(Port port, const void *data, size_t size) { @@ -139,10 +125,6 @@ namespace ams::uart { /* Get pointer to data. */ const u8 *p = static_cast(data); - /* Lock the uart registers. */ - Lock(uart); - ON_SCOPE_EXIT { Unlock(uart); }; - /* Send each byte. */ for (size_t i = 0; i < size; ++i) { WaitFifoNotFull(uart); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index 0eedb6d84..a09523477 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -23,6 +23,17 @@ namespace ams::kern::arch::arm64::init { + inline void ClearPhysicalMemory(KPhysicalAddress address, size_t size) { + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, sizeof(u64))); + + /* This Physical Address -> void * conversion is valid, because this is init page table code. */ + /* The MMU is necessarily not yet turned on, if we are creating an initial page table. */ + volatile u64 *ptr = reinterpret_cast(GetInteger(address)); + for (size_t i = 0; i < size / sizeof(u64); ++i) { + ptr[i] = 0; + } + } + class KInitialPageTable { public: class IPageAllocator { @@ -61,9 +72,7 @@ namespace ams::kern::arch::arm64::init { } static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address) { - /* This Physical Address -> void * conversion is valid, because this is page table code. */ - /* The MMU is necessarily not yet turned on, if we are creating an initial page table. */ - std::memset(reinterpret_cast(GetInteger(address)), 0, PageSize); + ClearPhysicalMemory(address, PageSize); } private: size_t NOINLINE GetBlockCount(KVirtualAddress virt_addr, size_t size, size_t block_size) { @@ -705,7 +714,7 @@ namespace ams::kern::arch::arm64::init { this->state.next_address += PageSize; } - std::memset(reinterpret_cast(allocated), 0, PageSize); + ClearPhysicalMemory(allocated, PageSize); return allocated; } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp index 5cd51a910..e3e519020 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp @@ -64,6 +64,10 @@ namespace ams::kern::arch::arm64::cpu { EnsureInstructionConsistency(); } + ALWAYS_INLINE void Yield() { + __asm__ __volatile__("yield" ::: "memory"); + } + ALWAYS_INLINE void SwitchProcess(u64 ttbr, u32 proc_id) { SetTtbr0El1(ttbr); ContextIdRegisterAccessor(0).SetProcId(proc_id).Store(); @@ -149,6 +153,25 @@ namespace ams::kern::arch::arm64::cpu { return true; } + ALWAYS_INLINE bool CanAccessAtomic(KProcessAddress addr, bool privileged = false) { + const uintptr_t va = GetInteger(addr); + + if (privileged) { + __asm__ __volatile__("at s1e1w, %[va]" :: [va]"r"(va) : "memory"); + } else { + __asm__ __volatile__("at s1e0w, %[va]" :: [va]"r"(va) : "memory"); + } + InstructionMemoryBarrier(); + + u64 par = GetParEl1(); + + if (par & 0x1) { + return false; + } + + return (par >> (BITSIZEOF(par) - BITSIZEOF(u8))) == 0xFF; + } + /* Synchronization helpers. */ NOINLINE void SynchronizeAllCores(); @@ -173,7 +196,7 @@ namespace ams::kern::arch::arm64::cpu { ALWAYS_INLINE void InvalidateTlbByAsid(u32 asid) { const u64 value = (static_cast(asid) << 48); - __asm__ __volatile__("tlbi aside1is, %[value]" :: [value]"r"(static_cast(value) << 48) : "memory"); + __asm__ __volatile__("tlbi aside1is, %[value]" :: [value]"r"(value) : "memory"); EnsureInstructionConsistency(); } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp index 7912bea3f..eb858242b 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp @@ -56,26 +56,69 @@ namespace ams::kern::arch::arm64::cpu { MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(OslarEl1, oslar_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TpidrEl0, tpidr_el0) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TpidrRoEl0, tpidrro_el0) - MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(EsrEl1, esr_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(ElrEl1, elr_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(EsrEl1, esr_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SpsrEl1, spsr_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Afsr0El1, afsr0_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Afsr1El1, afsr1_el1) - MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmUserEnrEl0, pmuserenr_el0) - MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmcCntrEl0, pmccntr_el0) - MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr0El0, pmevcntr0_el0) - MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr1El0, pmevcntr1_el0) - MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr2El0, pmevcntr2_el0) - MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr3El0, pmevcntr3_el0) - MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr4El0, pmevcntr4_el0) - MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr5El0, pmevcntr5_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MdscrEl1, mdscr_el1) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CpacrEl1, cpacr_el1) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(ContextidrEl1, contextidr_el1) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CntkCtlEl1, cntkctl_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CntpCtlEl0, cntp_ctl_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CntpCvalEl0, cntp_cval_el0) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Daif, daif) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SpEl0, sp_el0) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(IdAa64Dfr0El1, id_aa64dfr0_el1) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmcrEl0, pmcr_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmUserEnrEl0, pmuserenr_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmcCntrEl0, pmccntr_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmSelrEl0, pmselr_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmcCfiltrEl0, pmccfiltr_el0) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmIntEnSetEl1, pmintenset_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmCntEnSetEl0, pmcntenset_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmOvsSetEl0, pmovsset_el0) + + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmIntEnClrEl1, pmintenclr_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmCntEnClrEl0, pmcntenclr_el0) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmOvsClrEl0, pmovsclr_el0) + + #define FOR_I_IN_0_TO_30(HANDLER, ...) \ + HANDLER(0, ## __VA_ARGS__) HANDLER(1, ## __VA_ARGS__) HANDLER(2, ## __VA_ARGS__) HANDLER(3, ## __VA_ARGS__) \ + HANDLER(4, ## __VA_ARGS__) HANDLER(5, ## __VA_ARGS__) HANDLER(6, ## __VA_ARGS__) HANDLER(7, ## __VA_ARGS__) \ + HANDLER(8, ## __VA_ARGS__) HANDLER(9, ## __VA_ARGS__) HANDLER(10, ## __VA_ARGS__) HANDLER(11, ## __VA_ARGS__) \ + HANDLER(12, ## __VA_ARGS__) HANDLER(13, ## __VA_ARGS__) HANDLER(14, ## __VA_ARGS__) HANDLER(15, ## __VA_ARGS__) \ + HANDLER(16, ## __VA_ARGS__) HANDLER(17, ## __VA_ARGS__) HANDLER(18, ## __VA_ARGS__) HANDLER(19, ## __VA_ARGS__) \ + HANDLER(20, ## __VA_ARGS__) HANDLER(21, ## __VA_ARGS__) HANDLER(22, ## __VA_ARGS__) HANDLER(23, ## __VA_ARGS__) \ + HANDLER(24, ## __VA_ARGS__) HANDLER(25, ## __VA_ARGS__) HANDLER(26, ## __VA_ARGS__) HANDLER(27, ## __VA_ARGS__) \ + HANDLER(28, ## __VA_ARGS__) HANDLER(29, ## __VA_ARGS__) HANDLER(30, ## __VA_ARGS__) + + #define MESOSPHERE_CPU_DEFINE_PMEV_ACCESSORS(ID, ...) \ + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr##ID##El0, pmevcntr##ID##_el0) \ + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevTyper##ID##El0, pmevtyper##ID##_el0) + + FOR_I_IN_0_TO_30(MESOSPHERE_CPU_DEFINE_PMEV_ACCESSORS) + + #undef MESOSPHERE_CPU_DEFINE_PMEV_ACCESSORS + #undef FOR_I_IN_0_TO_30 #define FOR_I_IN_0_TO_15(HANDLER, ...) \ HANDLER(0, ## __VA_ARGS__) HANDLER(1, ## __VA_ARGS__) HANDLER(2, ## __VA_ARGS__) HANDLER(3, ## __VA_ARGS__) \ HANDLER(4, ## __VA_ARGS__) HANDLER(5, ## __VA_ARGS__) HANDLER(6, ## __VA_ARGS__) HANDLER(7, ## __VA_ARGS__) \ HANDLER(8, ## __VA_ARGS__) HANDLER(9, ## __VA_ARGS__) HANDLER(10, ## __VA_ARGS__) HANDLER(11, ## __VA_ARGS__) \ - HANDLER(12, ## __VA_ARGS__) HANDLER(13, ## __VA_ARGS__) HANDLER(14, ## __VA_ARGS__) HANDLER(15, ## __VA_ARGS__) \ + HANDLER(12, ## __VA_ARGS__) HANDLER(13, ## __VA_ARGS__) HANDLER(14, ## __VA_ARGS__) HANDLER(15, ## __VA_ARGS__) #define MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS(ID, ...) \ MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgWcr##ID##El1, dbgwcr##ID##_el1) \ @@ -158,6 +201,15 @@ namespace ams::kern::arch::arm64::cpu { const size_t shift_value = this->GetBits(16, 6); return size_t(1) << (size_t(64) - shift_value); } + + constexpr ALWAYS_INLINE bool GetEpd0() const { + return this->GetBits(7, 1) != 0; + } + + constexpr ALWAYS_INLINE decltype(auto) SetEpd0(bool set) { + this->SetBit(7, set); + return *this; + } }; MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ArchitecturalFeatureAccessControl) { @@ -189,6 +241,10 @@ namespace ams::kern::arch::arm64::cpu { constexpr ALWAYS_INLINE size_t GetNumBreakpoints() const { return this->GetBits(12, 4); } + + constexpr ALWAYS_INLINE size_t GetNumContextAwareBreakpoints() const { + return this->GetBits(28, 4); + } }; MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MonitorDebugSystemControl) { @@ -387,6 +443,27 @@ namespace ams::kern::arch::arm64::cpu { /* TODO: Other bitfield accessors? */ }; + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(PerformanceMonitorsControl) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(PerformanceMonitorsControl, pmcr_el0) + public: + constexpr ALWAYS_INLINE u64 GetN() const { + return this->GetBits(11, 5); + } + + constexpr ALWAYS_INLINE decltype(auto) SetEventCounterReset(bool en) { + this->SetBit(1, en); + return *this; + } + + constexpr ALWAYS_INLINE decltype(auto) SetCycleCounterReset(bool en) { + this->SetBit(2, en); + return *this; + } + + /* TODO: Other bitfield accessors? */ + }; + #undef FOR_I_IN_0_TO_15 #undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS #undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_debug.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_debug.hpp index 8bf3c8f6f..f06acd990 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_debug.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_debug.hpp @@ -30,6 +30,38 @@ namespace ams::kern::arch::arm64 { class KDebug final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KDebug, KSynchronizationObject); public: + explicit KDebug() { /* ... */ } + virtual ~KDebug() { /* ... */ } + + static void PostDestroy(uintptr_t arg) { /* ... */ } + public: + virtual Result GetThreadContextImpl(ams::svc::ThreadContext *out, KThread *thread, u32 context_flags) override; + virtual Result SetThreadContextImpl(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags) override; + private: + Result GetFpuContext(ams::svc::ThreadContext *out, KThread *thread, u32 context_flags); + Result SetFpuContext(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags); + public: + static uintptr_t GetProgramCounter(const KThread &thread); + static void SetPreviousProgramCounter(); + + static Result BreakIfAttached(ams::svc::BreakReason break_reason, uintptr_t address, size_t size); + static Result SetHardwareBreakPoint(ams::svc::HardwareBreakPointRegisterName name, u64 flags, u64 value); + + static constexpr bool IsBreakInstruction(u32 insn, u32 psr) { + constexpr u32 BreakInstructionAarch64 = 0xE7FFFFFF; + constexpr u32 BreakInstructionAarch32 = 0xE7FFDEFE; + constexpr u32 BreakInstructionThumb32 = 0xB68E; + if ((psr & 0x10) == 0) { + return insn == BreakInstructionAarch64; + } else { + if ((psr & 0x20) == 0) { + return insn == BreakInstructionAarch32; + } else { + return insn == BreakInstructionThumb32; + } + } + } + /* TODO: This is a placeholder definition. */ }; diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp index 9d99e334c..40982b697 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp @@ -22,9 +22,26 @@ namespace ams::kern::arch::arm64 { u64 x[(30 - 0) + 1]; u64 sp; u64 pc; - u64 psr; + u32 psr; + u32 write; u64 tpidr; u64 reserved; + + constexpr void GetSvcThreadContext(ams::svc::LastThreadContext *out) const { + if ((this->psr & 0x10) == 0) { + /* aarch64 thread. */ + out->fp = this->x[29]; + out->sp = this->sp; + out->lr = this->x[30]; + out->pc = this->pc; + } else { + /* aarch32 thread. */ + out->fp = this->x[11]; + out->sp = this->x[13]; + out->lr = this->x[14]; + out->pc = this->pc; + } + } }; static_assert(sizeof(KExceptionContext) == 0x120); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp index ce276c1a7..4051d4c56 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp @@ -112,17 +112,17 @@ namespace ams::kern::arch::arm64 { static constexpr s32 NumPriorityLevels = 4; public: struct LocalState { - u32 local_isenabler[NumLocalInterrupts / 32]; - u32 local_ipriorityr[NumLocalInterrupts / 4]; - u32 local_targetsr[NumLocalInterrupts / 4]; - u32 local_icfgr[NumLocalInterrupts / 16]; + u32 isenabler[NumLocalInterrupts / 32]; + u32 ipriorityr[NumLocalInterrupts / 4]; + u32 itargetsr[NumLocalInterrupts / 4]; + u32 icfgr[NumLocalInterrupts / 16]; }; struct GlobalState { - u32 global_isenabler[NumGlobalInterrupts / 32]; - u32 global_ipriorityr[NumGlobalInterrupts / 4]; - u32 global_targetsr[NumGlobalInterrupts / 4]; - u32 global_icfgr[NumGlobalInterrupts / 16]; + u32 isenabler[NumGlobalInterrupts / 32]; + u32 ipriorityr[NumGlobalInterrupts / 4]; + u32 itargetsr[NumGlobalInterrupts / 4]; + u32 icfgr[NumGlobalInterrupts / 16]; }; enum PriorityLevel : u8 { @@ -142,6 +142,11 @@ namespace ams::kern::arch::arm64 { void Initialize(s32 core_id); void Finalize(s32 core_id); + + void SaveCoreLocal(LocalState *state) const; + void SaveGlobal(GlobalState *state) const; + void RestoreCoreLocal(const LocalState *state) const; + void RestoreGlobal(const GlobalState *state) const; public: u32 GetIrq() const { return this->gicc->iar; @@ -213,12 +218,10 @@ namespace ams::kern::arch::arm64 { this->gicc->eoir = irq; } - bool IsInterruptDefined(s32 irq) { + bool IsInterruptDefined(s32 irq) const { const s32 num_interrupts = std::min(32 + 32 * (this->gicd->typer & 0x1F), static_cast(NumInterrupts)); return (0 <= irq && irq < num_interrupts); } - - /* TODO: Implement more KInterruptController functionality. */ public: static constexpr ALWAYS_INLINE bool IsSoftware(s32 id) { MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp index 2ecf5ebd5..e84b7a228 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp @@ -67,10 +67,21 @@ namespace ams::kern::arch::arm64 { NOINLINE void Initialize(s32 core_id); NOINLINE void Finalize(s32 core_id); - bool IsInterruptDefined(s32 irq) { + NOINLINE void Save(s32 core_id); + NOINLINE void Restore(s32 core_id); + + bool IsInterruptDefined(s32 irq) const { return this->interrupt_controller.IsInterruptDefined(irq); } + bool IsGlobal(s32 irq) const { + return this->interrupt_controller.IsGlobal(irq); + } + + bool IsLocal(s32 irq) const { + return this->interrupt_controller.IsLocal(irq); + } + NOINLINE Result BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level); NOINLINE Result UnbindHandler(s32 irq, s32 core); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp index 9d8376530..3905bc0ca 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp @@ -182,25 +182,31 @@ namespace ams::kern::arch::arm64 { NOINLINE Result InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager); Result Finalize(); private: - Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + Result MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + Result MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); + Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll); Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, size_t page_size, PageLinkedList *page_list, bool reuse_ll) { switch (page_size) { case L1BlockSize: + return this->MapL1Blocks(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); + case L2ContiguousBlockSize: + entry_template.SetContiguous(true); + [[fallthrough]]; #ifdef ATMOSPHERE_BOARD_NINTENDO_NX case L2TegraSmmuBlockSize: #endif case L2BlockSize: - case L3BlockSize: - break; - case L2ContiguousBlockSize: + return this->MapL2Blocks(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); case L3ContiguousBlockSize: entry_template.SetContiguous(true); - break; + [[fallthrough]]; + case L3BlockSize: + return this->MapL3Blocks(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } - return this->Map(virt_addr, phys_addr, num_pages, entry_template, page_list, reuse_ll); } Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp index c1597bf22..e88d5cd19 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp @@ -106,6 +106,8 @@ namespace ams::kern::arch::arm64 { NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end); L1PageTableEntry *Finalize(); + void Dump(uintptr_t start, size_t size) const; + bool BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const; bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const; diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp index 2df7f508b..c882adc66 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp @@ -44,6 +44,10 @@ namespace ams::kern::arch::arm64 { return this->page_table.SetProcessMemoryPermission(addr, size, perm); } + Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) { + return this->page_table.SetMemoryAttribute(addr, size, mask, attr); + } + Result SetHeapSize(KProcessAddress *out, size_t size) { return this->page_table.SetHeapSize(out, size); } @@ -56,6 +60,34 @@ namespace ams::kern::arch::arm64 { return this->page_table.QueryInfo(out_info, out_page_info, addr); } + Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const { + return this->page_table.QueryPhysicalAddress(out, address); + } + + Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { + return this->page_table.QueryStaticMapping(out, address, size); + } + + Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { + return this->page_table.QueryIoMapping(out, address, size); + } + + Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + return this->page_table.MapMemory(dst_address, src_address, size); + } + + Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + return this->page_table.UnmapMemory(dst_address, src_address, size); + } + + Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + return this->page_table.MapCodeMemory(dst_address, src_address, size); + } + + Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + return this->page_table.UnmapCodeMemory(dst_address, src_address, size); + } + Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { return this->page_table.MapIo(phys_addr, size, perm); } @@ -72,6 +104,10 @@ namespace ams::kern::arch::arm64 { return this->page_table.MapPageGroup(addr, pg, state, perm); } + Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) { + return this->page_table.UnmapPageGroup(address, pg, state); + } + Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) { return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm); } @@ -80,6 +116,10 @@ namespace ams::kern::arch::arm64 { return this->page_table.MapPages(out_addr, num_pages, state, perm); } + Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) { + return this->page_table.MapPages(address, num_pages, state, perm); + } + Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) { return this->page_table.UnmapPages(addr, num_pages, state); } @@ -88,11 +128,123 @@ namespace ams::kern::arch::arm64 { return this->page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr); } + Result MakeAndOpenPageGroupContiguous(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) { + return this->page_table.MakeAndOpenPageGroupContiguous(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr); + } + + Result InvalidateProcessDataCache(KProcessAddress address, size_t size) { + return this->page_table.InvalidateProcessDataCache(address, size); + } + + Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) { + return this->page_table.ReadDebugMemory(buffer, address, size); + } + + Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) { + return this->page_table.WriteDebugMemory(address, buffer, size); + } + + Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) { + return this->page_table.LockForDeviceAddressSpace(out, address, size, perm, is_aligned); + } + + Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) { + return this->page_table.UnlockForDeviceAddressSpace(address, size); + } + + Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) { + return this->page_table.LockForIpcUserBuffer(out, address, size); + } + + Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) { + return this->page_table.UnlockForIpcUserBuffer(address, size); + } + + Result LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm) { + return this->page_table.LockForTransferMemory(out, address, size, perm); + } + + Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg) { + return this->page_table.UnlockForTransferMemory(address, size, pg); + } + + Result LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size) { + return this->page_table.LockForCodeMemory(out, address, size); + } + + Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg) { + return this->page_table.UnlockForCodeMemory(address, size, pg); + } + + Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { + return this->page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr); + } + + Result CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { + return this->page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr); + } + + Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) { + return this->page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr); + } + + Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) { + return this->page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr); + } + + Result CopyMemoryFromHeapToHeap(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { + return this->page_table.CopyMemoryFromHeapToHeap(dst_page_table.page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr); + } + + Result CopyMemoryFromHeapToHeapWithoutCheckDestination(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { + return this->page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table.page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr); + } + + Result SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KProcessPageTable &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) { + return this->page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.page_table, test_perm, dst_state, send); + } + + Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state, KProcess *server_process) { + return this->page_table.CleanupForIpcServer(address, size, dst_state, server_process); + } + + Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) { + return this->page_table.CleanupForIpcClient(address, size, dst_state); + } + + Result MapPhysicalMemory(KProcessAddress address, size_t size) { + return this->page_table.MapPhysicalMemory(address, size); + } + + Result UnmapPhysicalMemory(KProcessAddress address, size_t size) { + return this->page_table.UnmapPhysicalMemory(address, size); + } + + Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { + return this->page_table.MapPhysicalMemoryUnsafe(address, size); + } + + Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { + return this->page_table.UnmapPhysicalMemoryUnsafe(address, size); + } + + void DumpTable() const { + return this->page_table.DumpTable(); + } + + void DumpMemoryBlocks() const { + return this->page_table.DumpMemoryBlocks(); + } + bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const { return this->page_table.GetPhysicalAddress(out, address); } bool Contains(KProcessAddress addr, size_t size) const { return this->page_table.Contains(addr, size); } + + bool IsInAliasRegion(KProcessAddress addr, size_t size) const { return this->page_table.IsInAliasRegion(addr, size); } + bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { return this->page_table.IsInUnsafeAliasRegion(addr, size); } + bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return this->page_table.CanContain(addr, size, state); } KProcessAddress GetAddressSpaceStart() const { return this->page_table.GetAddressSpaceStart(); } @@ -109,8 +261,11 @@ namespace ams::kern::arch::arm64 { size_t GetKernelMapRegionSize() const { return this->page_table.GetKernelMapRegionSize(); } size_t GetAliasCodeRegionSize() const { return this->page_table.GetAliasCodeRegionSize(); } + size_t GetNormalMemorySize() const { return this->page_table.GetNormalMemorySize(); } + + u32 GetAllocateOption() const { return this->page_table.GetAllocateOption(); } + KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) const { - /* TODO: Better way to convert address type? */ return this->page_table.GetHeapPhysicalAddress(address); } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_slab_heap_impl.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_slab_heap_impl.hpp new file mode 100644 index 000000000..c52f28528 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_slab_heap_impl.hpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern::arch::arm64 { + + template + concept SlabHeapNode = requires (T &t) { + { t.next } -> std::convertible_to; + }; + + template requires SlabHeapNode + ALWAYS_INLINE T *AllocateFromSlabAtomic(T **head) { + u32 tmp; + T *node, *next; + + __asm__ __volatile__( + "1:\n" + " ldaxr %[node], [%[head]]\n" + " cbz %[node], 2f\n" + " ldr %[next], [%[node]]\n" + " stlxr %w[tmp], %[next], [%[head]]\n" + " cbnz %w[tmp], 1b\n" + " b 3f\n" + "2:\n" + " clrex\n" + "3:\n" + : [tmp]"=&r"(tmp), [node]"=&r"(node), [next]"=&r"(next), [head]"+&r"(head) + : + : "cc", "memory" + ); + + return node; + } + + template requires SlabHeapNode + ALWAYS_INLINE void FreeToSlabAtomic(T **head, T *node) { + u32 tmp; + T *next; + + __asm__ __volatile__( + "1:\n" + " ldaxr %[next], [%[head]]\n" + " str %[next], [%[node]]\n" + " stlxr %w[tmp], %[node], [%[head]]\n" + " cbnz %w[tmp], 1b\n" + "2:\n" + : [tmp]"=&r"(tmp), [node]"+&r"(node), [next]"=&r"(next), [head]"+&r"(head) + : + : "cc", "memory" + ); + } + +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp index 0a1f71872..8e795aa7c 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp @@ -23,9 +23,9 @@ namespace ams::kern::arch::arm64 { class KSupervisorPageTable { private: KPageTable page_table; - u64 ttbr0[cpu::NumCores]; + u64 ttbr0_identity[cpu::NumCores]; public: - constexpr KSupervisorPageTable() : page_table(), ttbr0() { /* ... */ } + constexpr KSupervisorPageTable() : page_table(), ttbr0_identity() { /* ... */ } NOINLINE void Initialize(s32 core_id); @@ -41,8 +41,6 @@ namespace ams::kern::arch::arm64 { cpu::InvalidateEntireTlb(); } - void Finalize(s32 core_id); - Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { return this->page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm); } @@ -62,6 +60,8 @@ namespace ams::kern::arch::arm64 { bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const { return this->page_table.GetPhysicalAddress(out, address); } + + constexpr u64 GetIdentityMapTtbr0(s32 core_id) const { return this->ttbr0_identity[core_id]; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp index fe46d2f25..6a123eee5 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp @@ -66,7 +66,21 @@ namespace ams::kern::arch::arm64 { static void FpuContextSwitchHandler(KThread *thread); - /* TODO: More methods (especially FPU management) */ + u32 GetFpcr() const { return this->fpcr; } + u32 GetFpsr() const { return this->fpsr; } + + void SetFpcr(u32 v) { this->fpcr = v; } + void SetFpsr(u32 v) { this->fpsr = v; } + + void CloneFpuStatus(); + + void SetFpuRegisters(const u128 *v, bool is_64_bit); + + const u128 *GetFpuRegisters() const { return this->fpu_registers; } + public: + static void OnThreadTerminating(const KThread *thread); }; + void GetUserContext(ams::svc::ThreadContext *out, const KThread *thread); + } \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp index db67b0acb..38c6ab9f5 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp @@ -39,6 +39,10 @@ namespace ams::kern::arch::arm64 { static bool ClearMemoryAligned64Bit(void *dst, size_t size); static bool ClearMemorySize32Bit(void *dst); + static bool UpdateLockAtomic(u32 *out, u32 *address, u32 if_zero, u32 new_orr_mask); + static bool UpdateIfEqualAtomic(s32 *out, s32 *address, s32 compare_value, s32 new_value); + static bool DecrementIfLessThanAtomic(s32 *out, s32 *address, s32 compare); + static bool StoreDataCache(uintptr_t start, uintptr_t end); static bool FlushDataCache(uintptr_t start, uintptr_t end); static bool InvalidateDataCache(uintptr_t start, uintptr_t end); diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp index 48e5c8e99..98b110944 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp @@ -35,6 +35,14 @@ namespace ams::kern::board::nintendo::nx { u32 hs_attached_value; u32 hs_detached_value; private: + static ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress addr) { + return KMemoryLayout::IsHeapVirtualAddress(nullptr, addr); + } + + static ALWAYS_INLINE bool IsHeapPhysicalAddress(KPhysicalAddress addr) { + return KMemoryLayout::IsHeapPhysicalAddress(nullptr, addr); + } + static ALWAYS_INLINE KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) { return KPageTable::GetHeapVirtualAddress(addr); } @@ -53,7 +61,30 @@ namespace ams::kern::board::nintendo::nx { public: constexpr KDevicePageTable() : tables(), table_asids(), attached_device(), attached_value(), detached_value(), hs_attached_value(), hs_detached_value() { /* ... */ } + Result Initialize(u64 space_address, u64 space_size); + void Finalize(); + + Result Attach(ams::svc::DeviceName device_name, u64 space_address, u64 space_size); + Result Detach(ams::svc::DeviceName device_name); + + Result Map(size_t *out_mapped_size, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings); + Result Unmap(const KPageGroup &pg, KDeviceVirtualAddress device_address); + private: + Result MapDevicePage(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm); + + Result MapImpl(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm); + void UnmapImpl(KDeviceVirtualAddress address, u64 size, bool force); + + bool IsFree(KDeviceVirtualAddress address, u64 size) const; + Result MakePageGroup(KPageGroup *out, KDeviceVirtualAddress address, u64 size) const; + bool Compare(const KPageGroup &pg, KDeviceVirtualAddress device_address) const; + public: static void Initialize(); + + static void Lock(); + static void Unlock(); + static void Sleep(); + static void Wakeup(); }; } \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_memory_layout.board.nintendo_nx.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_memory_layout.board.nintendo_nx.hpp new file mode 100644 index 000000000..beb1775f9 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_memory_layout.board.nintendo_nx.hpp @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + constexpr inline size_t MainMemorySize = 4_GB; + +} diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp index 95372ca5f..080cc8c32 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp @@ -48,7 +48,7 @@ namespace ams::kern::board::nintendo::nx { /* Privileged Access. */ static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value); - static void ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value); + static Result ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value); static ALWAYS_INLINE u32 ReadRegisterPrivileged(ams::svc::PhysicalAddress address) { u32 v; @@ -67,6 +67,11 @@ namespace ams::kern::board::nintendo::nx { /* User access. */ static void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args); + + /* Secure Memory. */ + static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool); + static Result AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool); + static void FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool); }; } \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/kern_debug_log.hpp b/libraries/libmesosphere/include/mesosphere/kern_debug_log.hpp index 94aaabf66..dfabbaa05 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_debug_log.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_debug_log.hpp @@ -15,6 +15,7 @@ */ #pragma once #include +#include namespace ams::kern { @@ -26,6 +27,12 @@ namespace ams::kern { static NOINLINE void Printf(const char *format, ...) __attribute__((format(printf, 1, 2))); static NOINLINE void VPrintf(const char *format, ::std::va_list vl); + + static NOINLINE Result PrintUserString(ams::kern::svc::KUserPointer user_str, size_t len); + + /* Functionality for preserving across sleep. */ + static NOINLINE void Save(); + static NOINLINE void Restore(); }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp index 0457381f6..afe704cfa 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_address_space_info.hpp @@ -22,9 +22,9 @@ namespace ams::kern { struct KAddressSpaceInfo { public: enum Type { - Type_32Bit = 0, - Type_Small64Bit = 1, - Type_Large64Bit = 2, + Type_MapSmall = 0, + Type_MapLarge = 1, + Type_Map39Bit = 2, Type_Heap = 3, Type_Stack = 4, Type_Alias = 5, diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp index 01648d3ec..e0000ad53 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp @@ -127,6 +127,7 @@ namespace ams::kern { u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire); do { if (AMS_UNLIKELY(cur_ref_count == 0)) { + MESOSPHERE_AUDIT(cur_ref_count != 0); return false; } MESOSPHERE_ABORT_UNLESS(cur_ref_count < cur_ref_count + 1); @@ -177,10 +178,12 @@ namespace ams::kern { } }; - template + template requires std::derived_from class KScopedAutoObject { - static_assert(std::is_base_of::value); NON_COPYABLE(KScopedAutoObject); + private: + template + friend class KScopedAutoObject; private: T *obj; private: @@ -202,12 +205,28 @@ namespace ams::kern { this->obj = nullptr; } - constexpr ALWAYS_INLINE KScopedAutoObject(KScopedAutoObject &&rhs) { - this->obj = rhs.obj; - rhs.obj = nullptr; + template requires (std::derived_from || std::derived_from) + constexpr ALWAYS_INLINE KScopedAutoObject(KScopedAutoObject &&rhs) { + if constexpr (std::derived_from) { + /* Upcast. */ + this->obj = rhs.obj; + rhs.obj = nullptr; + } else { + /* Downcast. */ + T *derived = nullptr; + if (rhs.obj != nullptr) { + derived = rhs.obj->template DynamicCast(); + if (derived == nullptr) { + rhs.obj->Close(); + } + } + + this->obj = derived; + rhs.obj = nullptr; + } } - constexpr ALWAYS_INLINE KScopedAutoObject &operator=(KScopedAutoObject &&rhs) { + constexpr ALWAYS_INLINE KScopedAutoObject &operator=(KScopedAutoObject &&rhs) { rhs.Swap(*this); return *this; } @@ -221,6 +240,8 @@ namespace ams::kern { constexpr ALWAYS_INLINE T *GetPointerUnsafe() { return this->obj; } + constexpr ALWAYS_INLINE T *ReleasePointerUnsafe() { T *ret = this->obj; this->obj = nullptr; return ret; } + constexpr ALWAYS_INLINE bool IsNull() const { return this->obj == nullptr; } constexpr ALWAYS_INLINE bool IsNotNull() const { return this->obj != nullptr; } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp index 3d501bdf1..f89839e8c 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp @@ -23,7 +23,7 @@ namespace ams::kern { class KAutoObjectWithListContainer { NON_COPYABLE(KAutoObjectWithListContainer); NON_MOVEABLE(KAutoObjectWithListContainer); - private: + public: using ListType = util::IntrusiveRedBlackTreeMemberTraits<&KAutoObjectWithList::list_node>::TreeType; public: class ListAccessor : public KScopedLightLock { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp index 72d48666a..9c590e3db 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp @@ -221,6 +221,12 @@ namespace ams::kern { data[id / BitsPerWord] &= ~(1ul << (id % BitsPerWord)); } + static constexpr ALWAYS_INLINE bool GetSvcAllowedImpl(u8 *data, u32 id) { + constexpr size_t BitsPerWord = BITSIZEOF(*data); + MESOSPHERE_ASSERT(id < svc::SvcId_Count); + return (data[id / BitsPerWord] & (1ul << (id % BitsPerWord))) != 0; + } + bool SetSvcAllowed(u32 id) { if (id < BITSIZEOF(this->svc_access_flags)) { SetSvcAllowedImpl(this->svc_access_flags, id); @@ -230,10 +236,10 @@ namespace ams::kern { } } - bool SetInterruptAllowed(u32 id) { + bool SetInterruptPermitted(u32 id) { constexpr size_t BitsPerWord = BITSIZEOF(this->irq_access_flags[0]); if (id < BITSIZEOF(this->irq_access_flags)) { - this->irq_access_flags[id / BitsPerWord] = (1ul << (id % BitsPerWord)); + this->irq_access_flags[id / BitsPerWord] |= (1ul << (id % BitsPerWord)); return true; } else { return false; @@ -253,10 +259,12 @@ namespace ams::kern { Result SetCapability(const util::BitPack32 cap, u32 &set_flags, u32 &set_svc, KProcessPageTable *page_table); Result SetCapabilities(const u32 *caps, s32 num_caps, KProcessPageTable *page_table); + Result SetCapabilities(svc::KUserPointer user_caps, s32 num_caps, KProcessPageTable *page_table); public: constexpr KCapabilities() = default; Result Initialize(const u32 *caps, s32 num_caps, KProcessPageTable *page_table); + Result Initialize(svc::KUserPointer user_caps, s32 num_caps, KProcessPageTable *page_table); constexpr u64 GetCoreMask() const { return this->core_mask; } constexpr u64 GetPriorityMask() const { return this->priority_mask; } @@ -264,17 +272,88 @@ namespace ams::kern { ALWAYS_INLINE void CopySvcPermissionsTo(KThread::StackParameters &sp) const { static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + /* Copy permissions. */ std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags)); /* Clear specific SVCs based on our state. */ ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState); - if (sp.is_preemption_state_pinned) { + if (sp.is_pinned) { ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo); } } - /* TODO: Member functions. */ + ALWAYS_INLINE void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) const { + static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + /* Clear all permissions. */ + std::memset(sp.svc_permission, 0, sizeof(this->svc_access_flags)); + + /* Set specific SVCs based on our state. */ + SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState); + if (GetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException)) { + SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); + SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo); + } + } + + ALWAYS_INLINE void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) const { + static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + /* Get whether we have access to return from exception. */ + const bool return_from_exception = GetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); + + /* Copy permissions. */ + std::memcpy(sp.svc_permission, this->svc_access_flags, sizeof(this->svc_access_flags)); + + /* Clear/Set specific SVCs based on our state. */ + ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); + ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_SynchronizePreemptionState); + if (return_from_exception) { + SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); + } + } + + ALWAYS_INLINE void CopyEnterExceptionSvcPermissionsTo(KThread::StackParameters &sp) { + static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + + /* Set ReturnFromException if allowed. */ + if (GetSvcAllowedImpl(this->svc_access_flags, svc::SvcId_ReturnFromException)) { + SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); + } + + /* Set GetInfo if allowed. */ + if (GetSvcAllowedImpl(this->svc_access_flags, svc::SvcId_GetInfo)) { + SetSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo); + } + } + + ALWAYS_INLINE void CopyLeaveExceptionSvcPermissionsTo(KThread::StackParameters &sp) { + static_assert(sizeof(svc_access_flags) == sizeof(sp.svc_permission)); + + /* Clear ReturnFromException. */ + ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_ReturnFromException); + + /* If pinned, clear GetInfo. */ + if (sp.is_pinned) { + ClearSvcAllowedImpl(sp.svc_permission, svc::SvcId_GetInfo); + } + } + + constexpr bool IsPermittedInterrupt(u32 id) const { + constexpr size_t BitsPerWord = BITSIZEOF(this->irq_access_flags[0]); + if (id < BITSIZEOF(this->irq_access_flags)) { + return (this->irq_access_flags[id / BitsPerWord] & (1ul << (id % BitsPerWord))) != 0; + } else { + return false; + } + } + + constexpr bool IsPermittedDebug() const { + return this->debug_capabilities.Get(); + } + + constexpr bool CanForceDebug() const { + return this->debug_capabilities.Get(); + } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_client_port.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_client_port.hpp index 29768d908..5000f7626 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_client_port.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_client_port.hpp @@ -20,6 +20,10 @@ namespace ams::kern { class KPort; + class KSession; + class KClientSession; + class KLightSession; + class KLightClientSession; class KClientPort final : public KSynchronizationObject { MESOSPHERE_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject); @@ -33,6 +37,8 @@ namespace ams::kern { virtual ~KClientPort() { /* ... */ } void Initialize(KPort *parent, s32 max_sessions); + void OnSessionFinalized(); + void OnServerClosed(); constexpr const KPort *GetParent() const { return this->parent; } @@ -42,7 +48,8 @@ namespace ams::kern { virtual void Destroy() override; virtual bool IsSignaled() const override; - /* TODO: More of KClientPort. */ + Result CreateSession(KClientSession **out); + Result CreateLightSession(KLightClientSession **out); }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_client_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_client_session.hpp index c6eef389d..953e247e1 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_client_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_client_session.hpp @@ -34,11 +34,15 @@ namespace ams::kern { this->parent = parent; } + virtual void Destroy() override; static void PostDestroy(uintptr_t arg) { /* ... */ } - constexpr const KSession *GetParent() const { return this->parent; } + constexpr KSession *GetParent() const { return this->parent; } - /* TODO: More of KClientSession. */ + Result SendSyncRequest(uintptr_t address, size_t size); + Result SendAsyncRequest(KWritableEvent *event, uintptr_t address, size_t size); + + void OnServerClosed(); }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_code_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_code_memory.hpp index 5f3f30afa..29467b701 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_code_memory.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_code_memory.hpp @@ -22,8 +22,35 @@ namespace ams::kern { class KCodeMemory final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KCodeMemory, KAutoObject); + private: + TYPED_STORAGE(KPageGroup) page_group; + KProcess *owner; + KProcessAddress address; + KLightLock lock; + bool is_initialized; + bool is_owner_mapped; + bool is_mapped; public: - /* TODO: This is a placeholder definition. */ + explicit KCodeMemory() : owner(nullptr), address(Null), is_initialized(false), is_owner_mapped(false), is_mapped(false) { + /* ... */ + } + + virtual ~KCodeMemory() { /* ... */ } + + Result Initialize(KProcessAddress address, size_t size); + virtual void Finalize() override; + + Result Map(KProcessAddress address, size_t size); + Result Unmap(KProcessAddress address, size_t size); + Result MapToOwner(KProcessAddress address, size_t size, ams::svc::MemoryPermission perm); + Result UnmapFromOwner(KProcessAddress address, size_t size); + + virtual bool IsInitialized() const override { return this->is_initialized; } + static void PostDestroy(uintptr_t arg) { /* ... */ } + + KProcess *GetOwner() const { return this->owner; } + KProcessAddress GetSourceAddress() { return this->address; } + size_t GetSize() const { return this->is_initialized ? GetReference(this->page_group).GetNumPages() * PageSize : 0; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_condition_variable.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_condition_variable.hpp index 85462c611..57aca7def 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_condition_variable.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_condition_variable.hpp @@ -20,26 +20,11 @@ namespace ams::kern { - struct KConditionVariableComparator { - static constexpr ALWAYS_INLINE int Compare(const KThread &lhs, const KThread &rhs) { - const uintptr_t l_key = lhs.GetConditionVariableKey(); - const uintptr_t r_key = rhs.GetConditionVariableKey(); - - if (l_key < r_key) { - /* Sort first by key */ - return -1; - } else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) { - /* And then by priority. */ - return -1; - } else { - return 1; - } - } - }; + extern KThread g_cv_arbiter_compare_thread; class KConditionVariable { public: - using ThreadTree = util::IntrusiveRedBlackTreeMemberTraits<&KThread::condvar_arbiter_tree_node>::TreeType; + using ThreadTree = typename KThread::ConditionVariableThreadTreeType; private: ThreadTree tree; public: @@ -52,18 +37,20 @@ namespace ams::kern { /* Condition variable. */ void Signal(uintptr_t cv_key, s32 count); Result Wait(KProcessAddress addr, uintptr_t key, u32 value, s64 timeout); - - ALWAYS_INLINE void BeforeUpdatePriority(KThread *thread) { - MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); - - this->tree.erase(this->tree.iterator_to(*thread)); - } - - ALWAYS_INLINE void AfterUpdatePriority(KThread *thread) { - MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); - - this->tree.insert(*thread); - } + private: + KThread *SignalImpl(KThread *thread); }; + ALWAYS_INLINE void BeforeUpdatePriority(KConditionVariable::ThreadTree *tree, KThread *thread) { + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + tree->erase(tree->iterator_to(*thread)); + } + + ALWAYS_INLINE void AfterUpdatePriority(KConditionVariable::ThreadTree *tree, KThread *thread) { + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + tree->insert(*thread); + } + } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp index 442f04fed..9702fd465 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp @@ -30,6 +30,7 @@ namespace ams::kern { KInterruptTaskManager *interrupt_task_manager; s32 core_id; void *exception_stack_top; + ams::svc::ThreadLocalRegion *tlr; }; static_assert(std::is_standard_layout::value && std::is_trivially_destructible::value); static_assert(sizeof(KCurrentContext) <= cpu::DataCacheLineSize); @@ -80,6 +81,10 @@ namespace ams::kern { return impl::GetCurrentContext().core_id; } + ALWAYS_INLINE ams::svc::ThreadLocalRegion *GetCurrentThreadLocalRegion() { + return impl::GetCurrentContext().tlr; + } + ALWAYS_INLINE void SetCurrentThread(KThread *new_thread) { impl::GetCurrentContext().current_thread = new_thread; } @@ -88,4 +93,8 @@ namespace ams::kern { impl::GetCurrentContext().current_process = new_process; } + ALWAYS_INLINE void SetCurrentThreadLocalRegion(void *address) { + impl::GetCurrentContext().tlr = static_cast(address); + } + } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp index b1515b379..dc3951236 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp @@ -16,12 +16,68 @@ #pragma once #include #include +#include +#include +#include namespace ams::kern { class KDebugBase : public KSynchronizationObject { + protected: + using DebugEventList = util::IntrusiveListBaseTraits::ListType; + private: + DebugEventList event_info_list; + u32 continue_flags; + KProcess *process; + KLightLock lock; + KProcess::State old_process_state; public: - /* TODO: This is a placeholder definition. */ + explicit KDebugBase() { /* ... */ } + virtual ~KDebugBase() { /* ... */ } + protected: + bool Is64Bit() const; + public: + void Initialize(); + + Result Attach(KProcess *process); + Result BreakProcess(); + Result TerminateProcess(); + + Result ContinueDebug(const u32 flags, const u64 *thread_ids, size_t num_thread_ids); + + Result QueryMemoryInfo(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, KProcessAddress address); + Result ReadMemory(KProcessAddress buffer, KProcessAddress address, size_t size); + Result WriteMemory(KProcessAddress buffer, KProcessAddress address, size_t size); + + Result GetThreadContext(ams::svc::ThreadContext *out, u64 thread_id, u32 context_flags); + Result SetThreadContext(const ams::svc::ThreadContext &ctx, u64 thread_id, u32 context_flags); + + virtual Result GetThreadContextImpl(ams::svc::ThreadContext *out, KThread *thread, u32 context_flags) = 0; + virtual Result SetThreadContextImpl(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags) = 0; + + Result GetRunningThreadInfo(ams::svc::LastThreadContext *out_context, u64 *out_thread_id); + + Result GetDebugEventInfo(ams::svc::lp64::DebugEventInfo *out); + Result GetDebugEventInfo(ams::svc::ilp32::DebugEventInfo *out); + + KScopedAutoObject GetProcess(); + private: + void PushDebugEvent(ams::svc::DebugEvent event, uintptr_t param0 = 0, uintptr_t param1 = 0, uintptr_t param2 = 0, uintptr_t param3 = 0, uintptr_t param4 = 0); + void EnqueueDebugEventInfo(KEventInfo *info); + + template requires (std::same_as || std::same_as) + Result GetDebugEventInfoImpl(T *out); + public: + virtual void OnFinalizeSynchronizationObject() override; + virtual bool IsSignaled() const override; + private: + static Result ProcessDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4); + public: + static Result OnDebugEvent(ams::svc::DebugEvent event, uintptr_t param0 = 0, uintptr_t param1 = 0, uintptr_t param2 = 0, uintptr_t param3 = 0, uintptr_t param4 = 0); + static Result OnExitProcess(KProcess *process); + static Result OnTerminateProcess(KProcess *process); + static Result OnExitThread(KThread *thread); + static KEventInfo *CreateDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4, u64 thread_id); }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_device_address_space.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_device_address_space.hpp index 74fe90922..03b302f9a 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_device_address_space.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_device_address_space.hpp @@ -23,10 +23,39 @@ namespace ams::kern { class KDeviceAddressSpace final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KDeviceAddressSpace, KAutoObject); + private: + KLightLock lock; + KDevicePageTable table; + u64 space_address; + u64 space_size; + bool is_initialized; + public: + constexpr KDeviceAddressSpace() : lock(), table(), space_address(), space_size(), is_initialized() { /* ... */ } + virtual ~KDeviceAddressSpace() { /* ... */ } + + Result Initialize(u64 address, u64 size); + virtual void Finalize() override; + + virtual bool IsInitialized() const override { return this->is_initialized; } + static void PostDestroy(uintptr_t arg) { /* ... */ } + + Result Attach(ams::svc::DeviceName device_name); + Result Detach(ams::svc::DeviceName device_name); + + Result Map(size_t *out_mapped_size, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings) { + return this->Map(out_mapped_size, page_table, process_address, size, device_address, device_perm, false, refresh_mappings); + } + + Result MapAligned(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm) { + size_t dummy; + return this->Map(std::addressof(dummy), page_table, process_address, size, device_address, device_perm, true, false); + } + + Result Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address); + private: + Result Map(size_t *out_mapped_size, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool is_aligned, bool refresh_mappings); public: static void Initialize(); - - /* TODO: This is a placeholder definition. */ }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp index f1db211e1..2689bcf61 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_event.hpp @@ -18,13 +18,38 @@ #include #include #include +#include namespace ams::kern { class KEvent final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KEvent, KAutoObject); + private: + KReadableEvent readable_event; + KWritableEvent writable_event; + KProcess *owner; + bool initialized; public: - /* TODO: This is a placeholder definition. */ + constexpr KEvent() + : readable_event(), writable_event(), owner(), initialized() + { + /* ... */ + } + + virtual ~KEvent() { /* ... */ } + + void Initialize(); + virtual void Finalize() override; + + virtual bool IsInitialized() const override { return this->initialized; } + virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(this->owner); } + + static void PostDestroy(uintptr_t arg); + + virtual KProcess *GetOwner() const override { return this->owner; } + + KReadableEvent &GetReadableEvent() { return this->readable_event; } + KWritableEvent &GetWritableEvent() { return this->writable_event; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_event_info.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_event_info.hpp index c458da021..3c1e846a2 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_event_info.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_event_info.hpp @@ -21,7 +21,50 @@ namespace ams::kern { class KEventInfo : public KSlabAllocated, public util::IntrusiveListBaseNode { public: - /* TODO: This is a placeholder definition. */ + struct InfoCreateThread { + u32 thread_id; + uintptr_t tls_address; + uintptr_t entrypoint; + }; + + struct InfoExitProcess { + ams::svc::ProcessExitReason reason; + }; + + struct InfoExitThread { + ams::svc::ThreadExitReason reason; + }; + + struct InfoException { + ams::svc::DebugException exception_type; + s32 exception_data_count; + uintptr_t exception_address; + uintptr_t exception_data[4]; + }; + + struct InfoSystemCall { + s64 tick; + s32 id; + }; + public: + ams::svc::DebugEvent event; + u32 thread_id; + u32 flags; + bool is_attached; + bool continue_flag; + bool ignore_continue; + bool close_once; + union { + InfoCreateThread create_thread; + InfoExitProcess exit_process; + InfoExitThread exit_thread; + InfoException exception; + InfoSystemCall system_call; + } info; + KThread *debug_thread; + public: + explicit KEventInfo() : is_attached(), continue_flag(), ignore_continue() { /* ... */ } + ~KEventInfo() { /* ... */ } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp index d6ba17cdc..838f58028 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp @@ -126,20 +126,7 @@ namespace ams::kern { NOINLINE bool Remove(ams::svc::Handle handle); template - ALWAYS_INLINE KScopedAutoObject GetObject(ams::svc::Handle handle) const { - MESOSPHERE_ASSERT_THIS(); - - /* Handle pseudo-handles. */ - if constexpr (std::is_base_of::value) { - if (handle == ams::svc::PseudoHandle::CurrentProcess) { - return GetCurrentProcessPointer(); - } - } else if constexpr (std::is_base_of::value) { - if (handle == ams::svc::PseudoHandle::CurrentThread) { - return GetCurrentThreadPointer(); - } - } - + ALWAYS_INLINE KScopedAutoObject GetObjectWithoutPseudoHandle(ams::svc::Handle handle) const { /* Lock and look up in table. */ KScopedDisableDispatch dd; KScopedSpinLock lk(this->lock); @@ -147,25 +134,33 @@ namespace ams::kern { if constexpr (std::is_same::value) { return this->GetObjectImpl(handle); } else { - return this->GetObjectImpl(handle)->DynamicCast(); + if (auto *obj = this->GetObjectImpl(handle); obj != nullptr) { + return obj->DynamicCast(); + } else { + return nullptr; + } } } template - ALWAYS_INLINE KScopedAutoObject GetObjectForIpc(ams::svc::Handle handle) const { - static_assert(!std::is_base_of::value); + ALWAYS_INLINE KScopedAutoObject GetObject(ams::svc::Handle handle) const { + MESOSPHERE_ASSERT_THIS(); /* Handle pseudo-handles. */ - if constexpr (std::is_base_of::value) { + if constexpr (std::derived_from) { if (handle == ams::svc::PseudoHandle::CurrentProcess) { return GetCurrentProcessPointer(); } - } else if constexpr (std::is_base_of::value) { + } else if constexpr (std::derived_from) { if (handle == ams::svc::PseudoHandle::CurrentThread) { return GetCurrentThreadPointer(); } } + return this->template GetObjectWithoutPseudoHandle(handle); + } + + ALWAYS_INLINE KScopedAutoObject GetObjectForIpcWithoutPseudoHandle(ams::svc::Handle handle) const { /* Lock and look up in table. */ KScopedDisableDispatch dd; KScopedSpinLock lk(this->lock); @@ -174,11 +169,20 @@ namespace ams::kern { if (obj->DynamicCast() != nullptr) { return nullptr; } - if constexpr (std::is_same::value) { - return obj; - } else { - return obj->DynamicCast(); + + return obj; + } + + ALWAYS_INLINE KScopedAutoObject GetObjectForIpc(ams::svc::Handle handle, KThread *cur_thread) const { + /* Handle pseudo-handles. */ + if (handle == ams::svc::PseudoHandle::CurrentProcess) { + return static_cast(static_cast(cur_thread->GetOwnerProcess())); } + if (handle == ams::svc::PseudoHandle::CurrentThread) { + return static_cast(cur_thread); + } + + return GetObjectForIpcWithoutPseudoHandle(handle); } ALWAYS_INLINE KScopedAutoObject GetObjectByIndex(ams::svc::Handle *out_handle, size_t index) const { @@ -203,6 +207,49 @@ namespace ams::kern { static_assert(std::is_base_of::value); return this->Register(handle, obj, obj->GetTypeObj().GetClassToken()); } + + template + ALWAYS_INLINE bool GetMultipleObjects(T **out, const ams::svc::Handle *handles, size_t num_handles) const { + /* Try to convert and open all the handles. */ + size_t num_opened; + { + /* Lock the table. */ + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + for (num_opened = 0; num_opened < num_handles; num_opened++) { + /* Get the current handle. */ + const auto cur_handle = handles[num_opened]; + + /* Get the object for the current handle. */ + KAutoObject *cur_object = this->GetObjectImpl(cur_handle); + if (AMS_UNLIKELY(cur_object == nullptr)) { + break; + } + + /* Cast the current object to the desired type. */ + T *cur_t = cur_object->DynamicCast(); + if (AMS_UNLIKELY(cur_t == nullptr)) { + break; + } + + /* Open a reference to the current object. */ + cur_t->Open(); + out[num_opened] = cur_t; + } + } + + /* If we converted every object, succeed. */ + if (AMS_LIKELY(num_opened == num_handles)) { + return true; + } + + /* If we didn't convert entry object, close the ones we opened. */ + for (size_t i = 0; i < num_opened; i++) { + out[i]->Close(); + } + + return false; + } private: NOINLINE Result Add(ams::svc::Handle *out_handle, KAutoObject *obj, u16 type); NOINLINE void Register(ams::svc::Handle handle, KAutoObject *obj, u16 type); @@ -310,49 +357,6 @@ namespace ams::kern { *out_handle = EncodeHandle(index, entry->GetLinearId()); return entry->GetObject(); } - - template - ALWAYS_INLINE bool GetMultipleObjects(T **out, const ams::svc::Handle *handles, size_t num_handles) const { - /* Try to convert and open all the handles. */ - size_t num_opened; - { - /* Lock the table. */ - KScopedDisableDispatch dd; - KScopedSpinLock lk(this->lock); - for (num_opened = 0; num_opened < num_handles; num_opened++) { - /* Get the current handle. */ - const auto cur_handle = handles[num_opened]; - - /* Get the object for the current handle. */ - KAutoObject *cur_object = this->GetObjectImpl(cur_handle); - if (AMS_UNLIKELY(cur_object == nullptr)) { - break; - } - - /* Cast the current object to the desired type. */ - T *cur_t = cur_object->DynamicCast(); - if (AMS_UNLIKELY(cur_t == nullptr)) { - break; - } - - /* Open a reference to the current object. */ - cur_t->Open(); - out[num_opened] = cur_t; - } - } - - /* If we converted every object, succeed. */ - if (AMS_LIKELY(num_opened == num_handles)) { - return true; - } - - /* If we didn't convert entry object, close the ones we opened. */ - for (size_t i = 0; i < num_opened; i++) { - out[i]->Close(); - } - - return false; - } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_event.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_event.hpp index 15f102087..326994b10 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_event.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_event.hpp @@ -26,13 +26,40 @@ namespace ams::kern { class KInterruptEvent final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KInterruptEvent, KReadableEvent); + private: + KInterruptEventTask *task; + s32 interrupt_id; + bool is_initialized; public: - /* TODO: This is a placeholder definition. */ + constexpr KInterruptEvent() : task(nullptr), interrupt_id(-1), is_initialized(false) { /* ... */ } + virtual ~KInterruptEvent() { /* ... */ } + + Result Initialize(int32_t interrupt_name, ams::svc::InterruptType type); + virtual void Finalize() override; + + virtual Result Reset() override; + + virtual bool IsInitialized() const override { return this->is_initialized; } + + static void PostDestroy(uintptr_t arg) { /* ... */ } + + constexpr s32 GetInterruptId() const { return this->interrupt_id; } }; class KInterruptEventTask : public KSlabAllocated, public KInterruptTask { + private: + KInterruptEvent *event; + s32 interrupt_id; public: - /* TODO: This is a placeholder definition. */ + constexpr KInterruptEventTask() : event(nullptr), interrupt_id(-1) { /* ... */ } + ~KInterruptEventTask() { /* ... */ } + + virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override; + virtual void DoTask() override; + + void Unregister(); + public: + static Result Register(KInterruptEventTask **out, s32 interrupt_id, bool level, KInterruptEvent *event); }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp index eaeec87d7..d3c4d5113 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp @@ -49,8 +49,6 @@ namespace ams::kern { NOINLINE void Initialize(); void EnqueueTask(KInterruptTask *task); - - /* TODO: Actually implement KInterruptTaskManager. This is a placeholder. */ }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_client_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_client_session.hpp index 474ae35e5..f8c429bce 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_light_client_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_client_session.hpp @@ -34,11 +34,14 @@ namespace ams::kern { this->parent = parent; } + virtual void Destroy() override; static void PostDestroy(uintptr_t arg) { /* ... */ } constexpr const KLightSession *GetParent() const { return this->parent; } - /* TODO: More of KLightClientSession. */ + Result SendSyncRequest(u32 *data); + + void OnServerClosed(); }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp index 0c0f601a7..0949a5902 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp @@ -31,15 +31,18 @@ namespace ams::kern { MESOSPHERE_ASSERT_THIS(); const uintptr_t cur_thread = reinterpret_cast(GetCurrentThreadPointer()); + const uintptr_t cur_thread_tag = (cur_thread | 1); while (true) { uintptr_t old_tag = this->tag.load(std::memory_order_relaxed); while (!this->tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, std::memory_order_acquire)) { - /* ... */ + if ((old_tag | 1) == cur_thread_tag) { + return; + } } - if ((old_tag == 0) || ((old_tag | 1) == (cur_thread | 1))) { + if ((old_tag == 0) || ((old_tag | 1) == cur_thread_tag)) { break; } @@ -52,9 +55,11 @@ namespace ams::kern { const uintptr_t cur_thread = reinterpret_cast(GetCurrentThreadPointer()); uintptr_t expected = cur_thread; - if (!this->tag.compare_exchange_weak(expected, 0, std::memory_order_release)) { - this->UnlockSlowPath(cur_thread); - } + do { + if (expected != cur_thread) { + return this->UnlockSlowPath(cur_thread); + } + } while (!this->tag.compare_exchange_weak(expected, 0, std::memory_order_release)); } void LockSlowPath(uintptr_t owner, uintptr_t cur_thread); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_server_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_server_session.hpp index 7fcb23a13..1487a714d 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_light_server_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_server_session.hpp @@ -35,13 +35,22 @@ namespace ams::kern { constexpr KLightServerSession() : parent(), request_queue(), server_queue(), current_request(), server_thread() { /* ... */ } virtual ~KLightServerSession() { /* ... */ } - void Initialize(KLightSession *parent); + void Initialize(KLightSession *parent) { + /* Set member variables. */ + this->parent = parent; + } + virtual void Destroy() override; static void PostDestroy(uintptr_t arg) { /* ... */ } constexpr const KLightSession *GetParent() const { return this->parent; } - /* TODO: More of KLightServerSession. */ + Result OnRequest(KThread *request_thread); + Result ReplyAndReceive(u32 *data); + + void OnClientClosed(); + private: + void CleanupRequests(); }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp index b4257a150..d37f69b48 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp @@ -34,6 +34,9 @@ namespace ams::kern { ClientClosed = 2, ServerClosed = 3, }; + public: + static constexpr size_t DataSize = sizeof(u32) * 7; + static constexpr u32 ReplyFlag = (1u << (BITSIZEOF(u32) - 1)); private: KLightServerSession server; KLightClientSession client; @@ -51,12 +54,21 @@ namespace ams::kern { virtual ~KLightSession() { /* ... */ } + void Initialize(KClientPort *client_port, uintptr_t name); + virtual void Finalize() override; + virtual bool IsInitialized() const override { return this->initialized; } virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(this->process); } static void PostDestroy(uintptr_t arg); - /* TODO: This is a placeholder definition. */ + void OnServerClosed(); + void OnClientClosed(); + + bool IsServerClosed() const { return this->state != State::Normal; } + bool IsClientClosed() const { return this->state != State::Normal; } + + Result OnRequest(KThread *request_thread) { return this->server.OnRequest(request_thread); } KLightClientSession &GetClientSession() { return this->client; } KLightServerSession &GetServerSession() { return this->server; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp index 4cb1d5a7b..b5f4dbd3f 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp @@ -155,6 +155,8 @@ namespace ams::kern { KMemoryPermission_UserReadExecute = KMemoryPermission_UserRead | KMemoryPermission_UserExecute, KMemoryPermission_UserMask = ams::svc::MemoryPermission_Read | ams::svc::MemoryPermission_Write | ams::svc::MemoryPermission_Execute, + + KMemoryPermission_IpcLockChangeMask = KMemoryPermission_NotMapped | KMemoryPermission_UserReadWrite, }; constexpr KMemoryPermission ConvertToKMemoryPermission(ams::svc::MemoryPermission perm) { @@ -170,6 +172,10 @@ namespace ams::kern { KMemoryAttribute_IpcLocked = ams::svc::MemoryAttribute_IpcLocked, KMemoryAttribute_DeviceShared = ams::svc::MemoryAttribute_DeviceShared, KMemoryAttribute_Uncached = ams::svc::MemoryAttribute_Uncached, + + KMemoryAttribute_AnyLocked = 0x80, + + KMemoryAttribute_SetMask = KMemoryAttribute_Uncached, }; struct KMemoryInfo { @@ -213,6 +219,26 @@ namespace ams::kern { constexpr uintptr_t GetLastAddress() const { return this->GetEndAddress() - 1; } + + constexpr u16 GetIpcLockCount() const { + return this->ipc_lock_count; + } + + constexpr KMemoryState GetState() const { + return this->state; + } + + constexpr KMemoryPermission GetPermission() const { + return this->perm; + } + + constexpr KMemoryPermission GetOriginalPermission() const { + return this->original_perm; + } + + constexpr KMemoryAttribute GetAttribute() const { + return this->attribute; + } }; class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode { @@ -256,6 +282,22 @@ namespace ams::kern { return this->GetEndAddress() - 1; } + constexpr u16 GetIpcLockCount() const { + return this->ipc_lock_count; + } + + constexpr KMemoryPermission GetPermission() const { + return this->perm; + } + + constexpr KMemoryPermission GetOriginalPermission() const { + return this->original_perm; + } + + constexpr KMemoryAttribute GetAttribute() const { + return this->attribute; + } + constexpr KMemoryInfo GetMemoryInfo() const { return { .address = GetInteger(this->GetAddress()), @@ -352,6 +394,66 @@ namespace ams::kern { this->address = addr; this->num_pages -= block->num_pages; } + + constexpr void ShareToDevice(KMemoryPermission new_perm) { + /* We must either be shared or have a zero lock count. */ + MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared || this->device_use_count == 0); + + /* Share. */ + const u16 new_count = ++this->device_use_count; + MESOSPHERE_ABORT_UNLESS(new_count > 0); + + this->attribute = static_cast(this->attribute | KMemoryAttribute_DeviceShared); + } + + constexpr void UnshareToDevice(KMemoryPermission new_perm) { + /* We must be shared. */ + MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared); + + /* Unhare. */ + const u16 old_count = this->device_use_count--; + MESOSPHERE_ABORT_UNLESS(old_count > 0); + + if (old_count == 1) { + this->attribute = static_cast(this->attribute & ~KMemoryAttribute_DeviceShared); + } + } + + constexpr void LockForIpc(KMemoryPermission new_perm) { + /* We must either be locked or have a zero lock count. */ + MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_IpcLocked) == KMemoryAttribute_IpcLocked || this->ipc_lock_count == 0); + + /* Lock. */ + const u16 new_lock_count = ++this->ipc_lock_count; + MESOSPHERE_ABORT_UNLESS(new_lock_count > 0); + + /* If this is our first lock, update our permissions. */ + if (new_lock_count == 1) { + MESOSPHERE_ASSERT(this->original_perm == KMemoryPermission_None); + MESOSPHERE_ASSERT((this->perm | new_perm | KMemoryPermission_NotMapped) == (this->perm | KMemoryPermission_NotMapped)); + MESOSPHERE_ASSERT((this->perm & KMemoryPermission_UserExecute) != KMemoryPermission_UserExecute || (new_perm == KMemoryPermission_UserRead)); + this->original_perm = this->perm; + this->perm = static_cast((new_perm & KMemoryPermission_IpcLockChangeMask) | (this->original_perm & ~KMemoryPermission_IpcLockChangeMask)); + } + this->attribute = static_cast(this->attribute | KMemoryAttribute_IpcLocked); + } + + constexpr void UnlockForIpc(KMemoryPermission new_perm) { + /* We must be locked. */ + MESOSPHERE_ASSERT((this->attribute & KMemoryAttribute_IpcLocked) == KMemoryAttribute_IpcLocked); + + /* Unlock. */ + const u16 old_lock_count = this->ipc_lock_count--; + MESOSPHERE_ABORT_UNLESS(old_lock_count > 0); + + /* If this is our last unlock, update our permissions. */ + if (old_lock_count == 1) { + MESOSPHERE_ASSERT(this->original_perm != KMemoryPermission_None); + this->perm = this->original_perm; + this->original_perm = KMemoryPermission_None; + this->attribute = static_cast(this->attribute & ~KMemoryAttribute_IpcLocked); + } + } }; static_assert(std::is_trivially_destructible::value); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp index 19e992611..a88f3f350 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_block_manager.hpp @@ -96,6 +96,8 @@ namespace ams::kern { void Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr); void UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, void (KMemoryBlock::*lock_func)(KMemoryPermission new_perm), KMemoryPermission perm); + void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr); + iterator FindIterator(KProcessAddress address) const { return this->memory_block_tree.find(KMemoryBlock(address, 1, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None)); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index 22b593a42..d70485b62 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -17,6 +17,12 @@ #include #include +#if defined(ATMOSPHERE_BOARD_NINTENDO_NX) + #include +#else + #error "Unknown board for KMemoryLayout" +#endif + namespace ams::kern { constexpr size_t KernelAslrAlignment = 2_MB; @@ -268,6 +274,16 @@ namespace ams::kern { MESOSPHERE_INIT_ABORT(); } + iterator TryFindFirstRegionByType(u32 type_id) { + for (auto it = this->begin(); it != this->end(); it++) { + if (it->GetType() == type_id) { + return it; + } + } + + return this->end(); + } + iterator FindFirstDerivedRegion(u32 type_id) { for (auto it = this->begin(); it != this->end(); it++) { if (it->IsDerivedFrom(type_id)) { @@ -277,6 +293,16 @@ namespace ams::kern { MESOSPHERE_INIT_ABORT(); } + iterator TryFindFirstDerivedRegion(u32 type_id) { + for (auto it = this->begin(); it != this->end(); it++) { + if (it->IsDerivedFrom(type_id)) { + return it; + } + } + + return this->end(); + } + DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) const { DerivedRegionExtents extents; @@ -504,6 +530,33 @@ namespace ams::kern { return *GetVirtualLinearMemoryRegionTree().FindContainingRegion(GetInteger(address)); } + static NOINLINE const KMemoryRegion *TryGetKernelTraceBufferRegion() { + auto &tree = GetPhysicalMemoryRegionTree(); + if (KMemoryRegionTree::const_iterator it = tree.TryFindFirstDerivedRegion(KMemoryRegionType_KernelTraceBuffer); it != tree.end()) { + return std::addressof(*it); + } else { + return nullptr; + } + } + + static NOINLINE const KMemoryRegion *TryGetOnMemoryBootImageRegion() { + auto &tree = GetPhysicalMemoryRegionTree(); + if (KMemoryRegionTree::const_iterator it = tree.TryFindFirstDerivedRegion(KMemoryRegionType_OnMemoryBootImage); it != tree.end()) { + return std::addressof(*it); + } else { + return nullptr; + } + } + + static NOINLINE const KMemoryRegion *TryGetDTBRegion() { + auto &tree = GetPhysicalMemoryRegionTree(); + if (KMemoryRegionTree::const_iterator it = tree.TryFindFirstDerivedRegion(KMemoryRegionType_DTB); it != tree.end()) { + return std::addressof(*it); + } else { + return nullptr; + } + } + static NOINLINE bool IsHeapPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, const KMemoryRegion *hint = nullptr) { auto &tree = GetPhysicalLinearMemoryRegionTree(); KMemoryRegionTree::const_iterator it = tree.end(); @@ -546,6 +599,48 @@ namespace ams::kern { return false; } + static NOINLINE bool IsLinearMappedPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, const KMemoryRegion *hint = nullptr) { + auto &tree = GetPhysicalLinearMemoryRegionTree(); + KMemoryRegionTree::const_iterator it = tree.end(); + if (hint != nullptr) { + it = tree.iterator_to(*hint); + } + if (it == tree.end() || !it->Contains(GetInteger(address))) { + it = tree.FindContainingRegion(GetInteger(address)); + } + if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionAttr_LinearMapped)) { + if (out) { + *out = std::addressof(*it); + } + return true; + } + return false; + } + + static NOINLINE bool IsLinearMappedPhysicalAddress(const KMemoryRegion **out, KPhysicalAddress address, size_t size, const KMemoryRegion *hint = nullptr) { + auto &tree = GetPhysicalLinearMemoryRegionTree(); + KMemoryRegionTree::const_iterator it = tree.end(); + if (hint != nullptr) { + it = tree.iterator_to(*hint); + } + if (it == tree.end() || !it->Contains(GetInteger(address))) { + it = tree.FindContainingRegion(GetInteger(address)); + } + if (it != tree.end() && it->IsDerivedFrom(KMemoryRegionAttr_LinearMapped)) { + const uintptr_t last_address = GetInteger(address) + size - 1; + do { + if (last_address <= it->GetLastAddress()) { + if (out) { + *out = std::addressof(*it); + } + return true; + } + it++; + } while (it != tree.end() && it->IsDerivedFrom(KMemoryRegionAttr_LinearMapped)); + } + return false; + } + static NOINLINE bool IsHeapVirtualAddress(const KMemoryRegion **out, KVirtualAddress address, const KMemoryRegion *hint = nullptr) { auto &tree = GetVirtualLinearMemoryRegionTree(); KMemoryRegionTree::const_iterator it = tree.end(); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp index 8c17c54e0..4193633b2 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -53,6 +53,12 @@ namespace ams::kern { class Impl { private: using RefCount = u16; + public: + static size_t CalculateMetadataOverheadSize(size_t region_size); + + static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) { + return (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64); + } private: KPageHeap heap; RefCount *page_reference_counts; @@ -68,11 +74,19 @@ namespace ams::kern { KVirtualAddress AllocateBlock(s32 index, bool random) { return this->heap.AllocateBlock(index, random); } void Free(KVirtualAddress addr, size_t num_pages) { this->heap.Free(addr, num_pages); } - void TrackAllocationForOptimizedProcess(KVirtualAddress block, size_t num_pages); + void InitializeOptimizedMemory() { std::memset(GetVoidPointer(this->metadata_region), 0, CalculateOptimizedProcessOverheadSize(this->heap.GetSize())); } + void TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages); + size_t TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages); + + size_t ProcessOptimizedAllocation(bool *out_any_new, KVirtualAddress block, size_t num_pages, u8 fill_pattern); + + constexpr Pool GetPool() const { return this->pool; } constexpr size_t GetSize() const { return this->heap.GetSize(); } constexpr KVirtualAddress GetEndAddress() const { return this->heap.GetEndAddress(); } + size_t GetFreeSize() const { return this->heap.GetFreeSize(); } + constexpr void SetNext(Impl *n) { this->next = n; } constexpr void SetPrev(Impl *n) { this->prev = n; } constexpr Impl *GetNext() const { return this->next; } @@ -125,8 +139,6 @@ namespace ams::kern { this->Free(this->heap.GetAddress() + free_start * PageSize, free_count); } } - public: - static size_t CalculateMetadataOverheadSize(size_t region_size); }; private: KLightLock pool_locks[Pool_Count]; @@ -153,7 +165,7 @@ namespace ams::kern { } } - Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool optimize, bool random); + Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random); public: KMemoryManager() : pool_locks(), pool_managers_head(), pool_managers_tail(), managers(), num_managers(), optimized_process_ids(), has_optimized_process() @@ -163,8 +175,12 @@ namespace ams::kern { NOINLINE void Initialize(KVirtualAddress metadata_region, size_t metadata_region_size); + NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool); + NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool); + NOINLINE KVirtualAddress AllocateContinuous(size_t num_pages, size_t align_pages, u32 option); NOINLINE Result Allocate(KPageGroup *out, size_t num_pages, u32 option); + NOINLINE Result AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern); void Open(KVirtualAddress address, size_t num_pages) { /* Repeatedly open references until we've done so for all pages. */ @@ -204,6 +220,23 @@ namespace ams::kern { } return total; } + + size_t GetFreeSize() { + size_t total = 0; + for (size_t i = 0; i < this->num_managers; i++) { + total += this->managers[i].GetFreeSize(); + } + return total; + } + + size_t GetFreeSize(Pool pool) { + constexpr Direction GetSizeDirection = Direction_FromFront; + size_t total = 0; + for (auto *manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; manager = this->GetNextManager(manager, GetSizeDirection)) { + total += manager->GetFreeSize(); + } + return total; + } public: static size_t CalculateMetadataOverheadSize(size_t region_size) { return Impl::CalculateMetadataOverheadSize(region_size); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_object_name.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_object_name.hpp index 09ab27324..b57dd233c 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_object_name.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_object_name.hpp @@ -49,6 +49,11 @@ namespace ams::kern { return Delete(obj.GetPointerUnsafe(), name); } + + template requires std::derived_from + static KScopedAutoObject Find(const char *name) { + return Find(name); + } private: static KScopedAutoObject FindImpl(const char *name); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp index 47148acd8..76f527991 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp @@ -140,11 +140,14 @@ namespace ams::kern { constexpr size_t GetSize() const { return this->heap_size; } constexpr KVirtualAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); } constexpr size_t GetPageOffset(KVirtualAddress block) const { return (block - this->GetAddress()) / PageSize; } + constexpr size_t GetPageOffsetToEnd(KVirtualAddress block) const { return (this->GetEndAddress() - block) / PageSize; } void Initialize(KVirtualAddress heap_address, size_t heap_size, KVirtualAddress metadata_address, size_t metadata_size) { return Initialize(heap_address, heap_size, metadata_address, metadata_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts); } + size_t GetFreeSize() const { return this->GetNumFreePages() * PageSize; } + void UpdateUsedSize() { this->used_size = this->heap_size - (this->GetNumFreePages() * PageSize); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp index 2298dd9dd..f0a43b4bb 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp @@ -52,7 +52,6 @@ namespace ams::kern { OperationType_Unmap = 2, OperationType_ChangePermissions = 3, OperationType_ChangePermissionsAndRefresh = 4, - /* TODO: perm/attr operations */ }; static constexpr size_t MaxPhysicalMapAlignment = 1_GB; @@ -134,7 +133,7 @@ namespace ams::kern { KProcessAddress code_region_start; KProcessAddress code_region_end; size_t max_heap_size; - size_t max_physical_memory_size; + size_t mapped_physical_memory_size; size_t mapped_unsafe_physical_memory; mutable KLightLock general_lock; mutable KLightLock map_physical_memory_lock; @@ -157,7 +156,7 @@ namespace ams::kern { address_space_start(), address_space_end(), heap_region_start(), heap_region_end(), current_heap_end(), alias_region_start(), alias_region_end(), stack_region_start(), stack_region_end(), kernel_map_region_start(), kernel_map_region_end(), alias_code_region_start(), alias_code_region_end(), code_region_start(), code_region_end(), - max_heap_size(), max_physical_memory_size(),mapped_unsafe_physical_memory(), general_lock(), map_physical_memory_lock(), + max_heap_size(), mapped_physical_memory_size(), mapped_unsafe_physical_memory(), general_lock(), map_physical_memory_lock(), impl(), memory_block_manager(), allocate_option(), address_space_width(), is_kernel(), enable_aslr(), memory_block_slab_manager(), block_info_manager(), cached_physical_linear_region(), cached_physical_heap_region(), cached_virtual_heap_region(), heap_fill_value(), ipc_fill_value(), stack_fill_value() @@ -181,6 +180,15 @@ namespace ams::kern { return this->address_space_start <= addr && addr < addr + size && addr + size - 1 <= this->address_space_end - 1; } + constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const { + return this->Contains(addr, size) && this->alias_region_start <= addr && addr + size - 1 <= this->alias_region_end - 1; + } + + bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { + /* Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the alias code region. */ + return this->CanContain(addr, size, KMemoryState_AliasCode); + } + KProcessAddress GetRegionAddress(KMemoryState state) const; size_t GetRegionSize(KMemoryState state) const; bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const; @@ -194,12 +202,30 @@ namespace ams::kern { bool IsLockedByCurrentThread() const { return this->general_lock.IsLockedByCurrentThread(); } + bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + return KMemoryLayout::IsLinearMappedPhysicalAddress(std::addressof(this->cached_physical_linear_region), phys_addr, this->cached_physical_linear_region); + } + + bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + return KMemoryLayout::IsLinearMappedPhysicalAddress(std::addressof(this->cached_physical_linear_region), phys_addr, size, this->cached_physical_linear_region); + } + bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, this->cached_physical_heap_region); } + bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) { + MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread()); + + return KMemoryLayout::IsHeapPhysicalAddress(std::addressof(this->cached_physical_heap_region), phys_addr, this->cached_physical_heap_region); + } + bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); @@ -225,13 +251,21 @@ namespace ams::kern { constexpr size_t GetNumGuardPages() const { return this->IsKernel() ? 1 : 4; } ALWAYS_INLINE KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const; + Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const; + Result CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const; Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const; Result CheckMemoryState(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const { return this->CheckMemoryState(nullptr, nullptr, nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr); } + Result LockMemoryAndOpen(KPageGroup *out_pg, KPhysicalAddress *out_paddr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr); + Result UnlockMemory(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr, const KPageGroup *pg); + Result QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const; + + Result QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, KMemoryState state) const; + Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties properties); Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll); @@ -239,6 +273,10 @@ namespace ams::kern { bool IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages); NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm); + + Result SetupForIpcClient(PageLinkedList *page_list, KProcessAddress address, size_t size, KMemoryPermission test_perm, KMemoryState dst_state); + Result SetupForIpcServer(KProcessAddress *out_addr, size_t size, KProcessAddress src_addr, KMemoryPermission test_perm, KMemoryState dst_state, KPageTableBase &src_page_table, bool send); + Result CleanupForIpcClientOnServerSetupFailure(PageLinkedList *page_list, KProcessAddress address, size_t size, KMemoryPermission test_perm); public: bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const { return this->GetImpl().GetPhysicalAddress(out, virt_addr); @@ -248,9 +286,17 @@ namespace ams::kern { Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm); Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm); + Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr); Result SetHeapSize(KProcessAddress *out, size_t size); Result SetMaxHeapSize(size_t size); Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const; + Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const; + Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { return this->QueryMappingImpl(out, address, size, KMemoryState_Static); } + Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { return this->QueryMappingImpl(out, address, size, KMemoryState_Io); } + Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); + Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); + Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); + Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size); Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm); Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm); Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm); @@ -267,12 +313,62 @@ namespace ams::kern { return this->MapPages(out_addr, num_pages, PageSize, Null, false, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm); } + Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm); Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state); + Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm); Result MapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm); Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state); Result MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr); + Result MakeAndOpenPageGroupContiguous(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr); + + Result InvalidateProcessDataCache(KProcessAddress address, size_t size); + + Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size); + Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size); + + Result LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned); + Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size); + Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size); + Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size); + + Result LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm); + Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg); + Result LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size); + Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg); + + Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr); + Result CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr); + Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr); + Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr); + Result CopyMemoryFromHeapToHeap(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr); + Result CopyMemoryFromHeapToHeapWithoutCheckDestination(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr); + + Result SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KPageTableBase &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send); + Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state, KProcess *server_process); + Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state); + + Result MapPhysicalMemory(KProcessAddress address, size_t size); + Result UnmapPhysicalMemory(KProcessAddress address, size_t size); + + Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size); + Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size); + + void DumpTable() const { + KScopedLightLock lk(this->general_lock); + this->GetImpl().Dump(GetInteger(this->address_space_start), this->address_space_end - this->address_space_start); + } + + void DumpMemoryBlocks() const { + KScopedLightLock lk(this->general_lock); + this->DumpMemoryBlocksLocked(); + } + + void DumpMemoryBlocksLocked() const { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + this->memory_block_manager.DumpBlocks(); + } public: KProcessAddress GetAddressSpaceStart() const { return this->address_space_start; } KProcessAddress GetHeapRegionStart() const { return this->heap_region_start; } @@ -287,29 +383,38 @@ namespace ams::kern { size_t GetStackRegionSize() const { return this->stack_region_end - this->stack_region_start; } size_t GetKernelMapRegionSize() const { return this->kernel_map_region_end - this->kernel_map_region_start; } size_t GetAliasCodeRegionSize() const { return this->alias_code_region_end - this->alias_code_region_start; } + + size_t GetNormalMemorySize() const { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + return (this->current_heap_end - this->heap_region_start) + this->mapped_physical_memory_size; + } + + u32 GetAllocateOption() const { return this->allocate_option; } public: - static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress addr) { + static ALWAYS_INLINE KVirtualAddress GetLinearMappedVirtualAddress(KPhysicalAddress addr) { return KMemoryLayout::GetLinearVirtualAddress(addr); } - static ALWAYS_INLINE KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress addr) { + static ALWAYS_INLINE KPhysicalAddress GetLinearMappedPhysicalAddress(KVirtualAddress addr) { return KMemoryLayout::GetLinearPhysicalAddress(addr); } static ALWAYS_INLINE KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) { - return GetLinearVirtualAddress(addr); + return GetLinearMappedVirtualAddress(addr); } static ALWAYS_INLINE KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) { - return GetLinearPhysicalAddress(addr); + return GetLinearMappedPhysicalAddress(addr); } static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) { - return GetLinearVirtualAddress(addr); + return GetLinearMappedVirtualAddress(addr); } static ALWAYS_INLINE KPhysicalAddress GetPageTablePhysicalAddress(KVirtualAddress addr) { - return GetLinearPhysicalAddress(addr); + return GetLinearMappedPhysicalAddress(addr); } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_port.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_port.hpp index 65a4609ff..36406d405 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_port.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_port.hpp @@ -22,6 +22,9 @@ namespace ams::kern { + class KServerSession; + class KLightServerSession; + class KPort final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KPort, KAutoObject); private: @@ -50,7 +53,8 @@ namespace ams::kern { uintptr_t GetName() const { return this->name; } bool IsLight() const { return this->is_light; } - /* TODO: More of KPort */ + Result EnqueueSession(KServerSession *session); + Result EnqueueSession(KLightServerSession *session); KClientPort &GetClientPort() { return this->client; } KServerPort &GetServerPort() { return this->server; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp index 033c59373..a9ad07006 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp @@ -400,7 +400,7 @@ namespace ams::kern { /* Remove from the suggested queue and add to the scheduled queue for the new core. */ if (new_core >= 0) { - this->suggested_queue.Remove(priority, prev_core, member); + this->suggested_queue.Remove(priority, new_core, member); if (to_front) { this->scheduled_queue.PushFront(priority, new_core, member); } else { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp index 897be0840..48e6bfe46 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp @@ -48,6 +48,8 @@ namespace ams::kern { }; using ThreadList = util::IntrusiveListMemberTraits<&KThread::process_list_node>::ListType; + + static constexpr size_t AslrAlignment = KernelAslrAlignment; private: using SharedMemoryInfoList = util::IntrusiveListBaseTraits::ListType; using TLPTree = util::IntrusiveRedBlackTreeBaseTraits::TreeType; @@ -64,7 +66,7 @@ namespace ams::kern { size_t system_resource_num_pages{}; size_t memory_release_hint{}; State state{}; - KLightLock lock{}; + KLightLock state_lock{}; KLightLock list_lock{}; KConditionVariable cond_var{}; KAddressArbiter address_arbiter{}; @@ -118,38 +120,127 @@ namespace ams::kern { KPageTableManager page_table_manager{}; private: Result Initialize(const ams::svc::CreateProcessParameter ¶ms); + + void StartTermination(); + void FinishTermination(); public: KProcess() { /* ... */ } virtual ~KProcess() { /* ... */ } Result Initialize(const ams::svc::CreateProcessParameter ¶ms, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool); + Result Initialize(const ams::svc::CreateProcessParameter ¶ms, svc::KUserPointer caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool); + void Exit(); constexpr const char *GetName() const { return this->name; } + constexpr ams::svc::ProgramId GetProgramId() const { return this->program_id; } + constexpr u64 GetProcessId() const { return this->process_id; } + constexpr State GetState() const { return this->state; } + constexpr u64 GetCoreMask() const { return this->capabilities.GetCoreMask(); } constexpr u64 GetPriorityMask() const { return this->capabilities.GetPriorityMask(); } + constexpr s32 GetIdealCoreId() const { return this->ideal_core_id; } constexpr void SetIdealCoreId(s32 core_id) { this->ideal_core_id = core_id; } + constexpr bool CheckThreadPriority(s32 prio) const { return ((1ul << prio) & this->GetPriorityMask()) != 0; } + + constexpr u32 GetCreateProcessFlags() const { return this->flags; } + constexpr bool Is64Bit() const { return this->flags & ams::svc::CreateProcessFlag_Is64Bit; } constexpr KProcessAddress GetEntryPoint() const { return this->code_address; } - constexpr bool IsSuspended() const { - return this->is_suspended; + constexpr KMemoryManager::Pool GetMemoryPool() const { return this->memory_pool; } + + constexpr u64 GetRandomEntropy(size_t i) const { return this->entropy[i]; } + + constexpr bool IsApplication() const { return this->is_application; } + + constexpr bool IsSuspended() const { return this->is_suspended; } + constexpr void SetSuspended(bool suspended) { this->is_suspended = suspended; } + + Result Terminate(); + + constexpr bool IsTerminated() const { + return this->state == State_Terminated; } - KThread *GetPreemptionStatePinnedThread(s32 core_id) const { + constexpr bool IsAttachedToDebugger() const { + return this->attached_object != nullptr; + } + + constexpr bool IsPermittedInterrupt(int32_t interrupt_id) const { + return this->capabilities.IsPermittedInterrupt(interrupt_id); + } + + constexpr bool IsPermittedDebug() const { + return this->capabilities.IsPermittedDebug(); + } + + constexpr bool CanForceDebug() const { + return this->capabilities.CanForceDebug(); + } + + u32 GetAllocateOption() const { return this->page_table.GetAllocateOption(); } + + ThreadList &GetThreadList() { return this->thread_list; } + const ThreadList &GetThreadList() const { return this->thread_list; } + + constexpr void *GetDebugObject() const { return this->attached_object; } + KProcess::State SetDebugObject(void *debug_object); + void ClearDebugObject(KProcess::State state); + + bool EnterJitDebug(ams::svc::DebugEvent event, ams::svc::DebugException exception, uintptr_t param1 = 0, uintptr_t param2 = 0, uintptr_t param3 = 0, uintptr_t param4 = 0); + + KEventInfo *GetJitDebugInfo(); + void ClearJitDebugInfo(); + + bool EnterUserException(); + bool LeaveUserException(); + bool ReleaseUserException(KThread *thread); + + KThread *GetPinnedThread(s32 core_id) const { MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast(cpu::NumCores)); return this->pinned_threads[core_id]; } + void PinThread(s32 core_id, KThread *thread) { + MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast(cpu::NumCores)); + MESOSPHERE_ASSERT(thread != nullptr); + MESOSPHERE_ASSERT(this->pinned_threads[core_id] == nullptr); + this->pinned_threads[core_id] = thread; + } + + void UnpinThread(s32 core_id, KThread *thread) { + MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast(cpu::NumCores)); + MESOSPHERE_ASSERT(thread != nullptr); + MESOSPHERE_ASSERT(this->pinned_threads[core_id] == thread); + this->pinned_threads[core_id] = nullptr; + } + void CopySvcPermissionsTo(KThread::StackParameters &sp) { this->capabilities.CopySvcPermissionsTo(sp); } + void CopyPinnedSvcPermissionsTo(KThread::StackParameters &sp) { + this->capabilities.CopyPinnedSvcPermissionsTo(sp); + } + + void CopyUnpinnedSvcPermissionsTo(KThread::StackParameters &sp) { + this->capabilities.CopyUnpinnedSvcPermissionsTo(sp); + } + + void CopyEnterExceptionSvcPermissionsTo(KThread::StackParameters &sp) { + this->capabilities.CopyEnterExceptionSvcPermissionsTo(sp); + } + + void CopyLeaveExceptionSvcPermissionsTo(KThread::StackParameters &sp) { + this->capabilities.CopyLeaveExceptionSvcPermissionsTo(sp); + } + constexpr KResourceLimit *GetResourceLimit() const { return this->resource_limit; } bool ReserveResource(ams::svc::LimitableResource which, s64 value); @@ -157,27 +248,114 @@ namespace ams::kern { void ReleaseResource(ams::svc::LimitableResource which, s64 value); void ReleaseResource(ams::svc::LimitableResource which, s64 value, s64 hint); + constexpr KLightLock &GetStateLock() { return this->state_lock; } + constexpr KLightLock &GetListLock() { return this->list_lock; } + constexpr KProcessPageTable &GetPageTable() { return this->page_table; } constexpr const KProcessPageTable &GetPageTable() const { return this->page_table; } constexpr KHandleTable &GetHandleTable() { return this->handle_table; } constexpr const KHandleTable &GetHandleTable() const { return this->handle_table; } + KWaitObject *GetWaitObjectPointer() { return std::addressof(this->wait_object); } + + size_t GetUsedUserPhysicalMemorySize() const; + size_t GetTotalUserPhysicalMemorySize() const; + size_t GetUsedNonSystemUserPhysicalMemorySize() const; + size_t GetTotalNonSystemUserPhysicalMemorySize() const; + + Result AddSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size); + void RemoveSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size); + Result CreateThreadLocalRegion(KProcessAddress *out); + Result DeleteThreadLocalRegion(KProcessAddress addr); void *GetThreadLocalRegionPointer(KProcessAddress addr); + constexpr KProcessAddress GetProcessLocalRegionAddress() const { return this->plr_address; } + void AddCpuTime(s64 diff) { this->cpu_time += diff; } + + constexpr s64 GetScheduledCount() const { return this->schedule_count; } void IncrementScheduledCount() { ++this->schedule_count; } void IncrementThreadCount(); void DecrementThreadCount(); + size_t GetTotalSystemResourceSize() const { return this->system_resource_num_pages * PageSize; } + size_t GetUsedSystemResourceSize() const { + if (this->system_resource_num_pages == 0) { + return 0; + } + return this->dynamic_page_manager.GetUsed() * PageSize; + } + + void SetRunningThread(s32 core, KThread *thread, u64 idle_count) { + this->running_threads[core] = thread; + this->running_thread_idle_counts[core] = idle_count; + } + + void ClearRunningThread(KThread *thread) { + for (size_t i = 0; i < util::size(this->running_threads); ++i) { + if (this->running_threads[i] == thread) { + this->running_threads[i] = nullptr; + } + } + } + + constexpr KThread *GetRunningThread(s32 core) const { return this->running_threads[core]; } + constexpr u64 GetRunningThreadIdleCount(s32 core) const { return this->running_thread_idle_counts[core]; } + void RegisterThread(KThread *thread); void UnregisterThread(KThread *thread); Result Run(s32 priority, size_t stack_size); - void SetPreemptionState(); + Result Reset(); + + void SetDebugBreak() { + if (this->state == State_RunningAttached) { + this->ChangeState(State_DebugBreak); + } + } + + void SetAttached() { + if (this->state == State_DebugBreak) { + this->ChangeState(State_RunningAttached); + } + } + + Result SetActivity(ams::svc::ProcessActivity activity); + + void PinCurrentThread(); + + Result SignalToAddress(KProcessAddress address) { + return this->cond_var.SignalToAddress(address); + } + + Result WaitForAddress(ams::svc::Handle handle, KProcessAddress address, u32 tag) { + return this->cond_var.WaitForAddress(handle, address, tag); + } + + void SignalConditionVariable(uintptr_t cv_key, int32_t count) { + return this->cond_var.Signal(cv_key, count); + } + + Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) { + return this->cond_var.Wait(address, cv_key, tag, ns); + } + + Result SignalAddressArbiter(uintptr_t address, ams::svc::SignalType signal_type, s32 value, s32 count) { + return this->address_arbiter.SignalToAddress(address, signal_type, value, count); + } + + Result WaitAddressArbiter(uintptr_t address, ams::svc::ArbitrationType arb_type, s32 value, s64 timeout) { + return this->address_arbiter.WaitForAddress(address, arb_type, value, timeout); + } + + Result GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer out_thread_ids, s32 max_out_count); + + static KProcess *GetProcessFromId(u64 process_id); + static Result GetProcessList(s32 *out_num_processes, ams::kern::svc::KUserPointer out_process_ids, s32 max_out_count); static void Switch(KProcess *cur_process, KProcess *next_process) { /* Set the current process pointer. */ diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp index 2e8247f5a..27ea0a1ef 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp @@ -70,6 +70,11 @@ namespace ams::kern { NOINLINE void Initialize(KThread *idle_thread); NOINLINE void Activate(); + ALWAYS_INLINE void SetInterruptTaskRunnable() { + this->state.interrupt_task_thread_runnable = true; + this->state.needs_scheduling = true; + } + ALWAYS_INLINE void RequestScheduleOnInterrupt() { SetSchedulerUpdateNeeded(); @@ -77,6 +82,22 @@ namespace ams::kern { this->ScheduleOnInterrupt(); } } + + ALWAYS_INLINE u64 GetIdleCount() const { + return this->state.idle_count; + } + + ALWAYS_INLINE KThread *GetIdleThread() const { + return this->idle_thread; + } + + ALWAYS_INLINE KThread *GetPreviousThread() const { + return this->prev_thread; + } + + ALWAYS_INLINE s64 GetLastContextSwitchTime() const { + return this->last_context_switch_time; + } private: /* Static private API. */ static ALWAYS_INLINE bool IsSchedulerUpdateNeeded() { return s_scheduler_update_needed; } @@ -85,13 +106,13 @@ namespace ams::kern { static ALWAYS_INLINE KSchedulerPriorityQueue &GetPriorityQueue() { return s_priority_queue; } static NOINLINE u64 UpdateHighestPriorityThreadsImpl(); + + static NOINLINE void InterruptTaskThreadToRunnable(); public: /* Static public API. */ static ALWAYS_INLINE bool CanSchedule() { return GetCurrentThread().GetDisableDispatchCount() == 0; } static ALWAYS_INLINE bool IsSchedulerLockedByCurrentThread() { return s_scheduler_lock.IsLockedByCurrentThread(); } - static NOINLINE void SetInterruptTaskThreadRunnable(); - static ALWAYS_INLINE void DisableScheduling() { MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 0); GetCurrentThread().DisableDispatch(); @@ -116,12 +137,20 @@ namespace ams::kern { } } + static NOINLINE void ClearPreviousThread(KThread *thread); + + static NOINLINE void PinCurrentThread(KProcess *cur_process); + static NOINLINE void UnpinCurrentThread(KProcess *cur_process); + static NOINLINE void OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state); static NOINLINE void OnThreadPriorityChanged(KThread *thread, s32 old_priority); static NOINLINE void OnThreadAffinityMaskChanged(KThread *thread, const KAffinityMask &old_affinity, s32 old_core); - /* TODO: Yield operations */ static NOINLINE void RotateScheduledQueue(s32 priority, s32 core_id); + + static NOINLINE void YieldWithoutCoreMigration(); + static NOINLINE void YieldWithCoreMigration(); + static NOINLINE void YieldToAnyThread(); private: /* Instanced private API. */ void ScheduleImpl(); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_server_port.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_server_port.hpp index c8a429a5d..e5862aa51 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_server_port.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_server_port.hpp @@ -38,6 +38,11 @@ namespace ams::kern { virtual ~KServerPort() { /* ... */ } void Initialize(KPort *parent); + void EnqueueSession(KServerSession *session); + void EnqueueSession(KLightServerSession *session); + + KServerSession *AcceptSession(); + KLightServerSession *AcceptLightSession(); constexpr const KPort *GetParent() const { return this->parent; } @@ -46,11 +51,8 @@ namespace ams::kern { /* Overridden virtual functions. */ virtual void Destroy() override; virtual bool IsSignaled() const override; - - /* TODO: More of KClientPort. */ private: void CleanupSessions(); - /* TODO: This is a placeholder definition. */ }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_server_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_server_session.hpp index 70d0583e9..136a5dc6f 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_server_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_server_session.hpp @@ -36,13 +36,23 @@ namespace ams::kern { constexpr KServerSession() : parent(), request_list(), current_request(), lock() { /* ... */ } virtual ~KServerSession() { /* ... */ } - void Initialize(KSession *parent); + virtual void Destroy() override; + + void Initialize(KSession *p) { this->parent = p; } constexpr const KSession *GetParent() const { return this->parent; } - virtual bool IsSignaled() const override { MESOSPHERE_UNIMPLEMENTED(); } + virtual bool IsSignaled() const override; - /* TODO: More of KServerSession. */ + Result OnRequest(KSessionRequest *request); + + Result ReceiveRequest(uintptr_t message, uintptr_t buffer_size, KPhysicalAddress message_paddr); + Result SendReply(uintptr_t message, uintptr_t buffer_size, KPhysicalAddress message_paddr); + + void OnClientClosed(); + private: + bool IsSignaledImpl() const; + void CleanupRequests(); }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp index 0ee803b05..b67f81e3a 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp @@ -51,12 +51,21 @@ namespace ams::kern { virtual ~KSession() { /* ... */ } + void Initialize(KClientPort *client_port, uintptr_t name); + virtual void Finalize() override; + virtual bool IsInitialized() const override { return this->initialized; } virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(this->process); } static void PostDestroy(uintptr_t arg); - /* TODO: This is a placeholder definition. */ + void OnServerClosed(); + void OnClientClosed(); + + bool IsServerClosed() const { return this->state != State::Normal; } + bool IsClientClosed() const { return this->state != State::Normal; } + + Result OnRequest(KSessionRequest *request) { return this->server.OnRequest(request); } KClientSession &GetClientSession() { return this->client; } KServerSession &GetServerSession() { return this->server; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp index 95a1d0a2c..03f1d6ef6 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp @@ -17,13 +17,209 @@ #include #include #include +#include +#include +#include +#include namespace ams::kern { class KSessionRequest final : public KSlabAllocated, public KAutoObject, public util::IntrusiveListBaseNode { MESOSPHERE_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject); public: - /* TODO: This is a placeholder definition. */ + class SessionMappings { + private: + static constexpr size_t NumStaticMappings = 8; + + class Mapping { + private: + KProcessAddress client_address; + KProcessAddress server_address; + size_t size; + KMemoryState state; + public: + constexpr void Set(KProcessAddress c, KProcessAddress s, size_t sz, KMemoryState st) { + this->client_address = c; + this->server_address = s; + this->size = sz; + this->state = st; + } + + constexpr KProcessAddress GetClientAddress() const { return this->client_address; } + constexpr KProcessAddress GetServerAddress() const { return this->server_address; } + constexpr size_t GetSize() const { return this->size; } + constexpr KMemoryState GetMemoryState() const { return this->state; } + }; + private: + Mapping static_mappings[NumStaticMappings]; + Mapping *mappings; + u8 num_send; + u8 num_recv; + u8 num_exch; + public: + constexpr explicit SessionMappings() : static_mappings(), mappings(), num_send(), num_recv(), num_exch() { /* ... */ } + + void Initialize() { /* ... */ } + void Finalize(); + + constexpr size_t GetSendCount() const { return this->num_send; } + constexpr size_t GetReceiveCount() const { return this->num_recv; } + constexpr size_t GetExchangeCount() const { return this->num_exch; } + + Result PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state); + Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state); + Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state); + + constexpr KProcessAddress GetSendClientAddress(size_t i) const { return GetSendMapping(i).GetClientAddress(); } + constexpr KProcessAddress GetSendServerAddress(size_t i) const { return GetSendMapping(i).GetServerAddress(); } + constexpr size_t GetSendSize(size_t i) const { return GetSendMapping(i).GetSize(); } + constexpr KMemoryState GetSendMemoryState(size_t i) const { return GetSendMapping(i).GetMemoryState(); } + + constexpr KProcessAddress GetReceiveClientAddress(size_t i) const { return GetReceiveMapping(i).GetClientAddress(); } + constexpr KProcessAddress GetReceiveServerAddress(size_t i) const { return GetReceiveMapping(i).GetServerAddress(); } + constexpr size_t GetReceiveSize(size_t i) const { return GetReceiveMapping(i).GetSize(); } + constexpr KMemoryState GetReceiveMemoryState(size_t i) const { return GetReceiveMapping(i).GetMemoryState(); } + + constexpr KProcessAddress GetExchangeClientAddress(size_t i) const { return GetExchangeMapping(i).GetClientAddress(); } + constexpr KProcessAddress GetExchangeServerAddress(size_t i) const { return GetExchangeMapping(i).GetServerAddress(); } + constexpr size_t GetExchangeSize(size_t i) const { return GetExchangeMapping(i).GetSize(); } + constexpr KMemoryState GetExchangeMemoryState(size_t i) const { return GetExchangeMapping(i).GetMemoryState(); } + private: + Result PushMap(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state, size_t index); + + constexpr const Mapping &GetSendMapping(size_t i) const { + MESOSPHERE_ASSERT(i < this->num_send); + + const size_t index = i; + if (index < NumStaticMappings) { + return this->static_mappings[index]; + } else { + return this->mappings[index - NumStaticMappings]; + } + } + + constexpr const Mapping &GetReceiveMapping(size_t i) const { + MESOSPHERE_ASSERT(i < this->num_recv); + + const size_t index = this->num_send + i; + if (index < NumStaticMappings) { + return this->static_mappings[index]; + } else { + return this->mappings[index - NumStaticMappings]; + } + } + + constexpr const Mapping &GetExchangeMapping(size_t i) const { + MESOSPHERE_ASSERT(i < this->num_exch); + + const size_t index = this->num_send + this->num_recv + i; + if (index < NumStaticMappings) { + return this->static_mappings[index]; + } else { + return this->mappings[index - NumStaticMappings]; + } + } + + + }; + private: + SessionMappings mappings; + KThread *thread; + KProcess *server; + KWritableEvent *event; + uintptr_t address; + size_t size; + public: + constexpr KSessionRequest() : mappings(), thread(), server(), event(), address(), size() { /* ... */ } + virtual ~KSessionRequest() { /* ... */ } + + static KSessionRequest *Create() { + KSessionRequest *req = KSessionRequest::Allocate(); + if (req != nullptr) { + KAutoObject::Create(req); + } + return req; + } + + virtual void Destroy() override { + this->Finalize(); + KSessionRequest::Free(this); + } + + void Initialize(KWritableEvent *event, uintptr_t address, size_t size) { + this->mappings.Initialize(); + + this->thread = std::addressof(GetCurrentThread()); + this->event = event; + this->address = address; + this->size = size; + + this->thread->Open(); + if (this->event != nullptr) { + this->event->Open(); + } + } + + virtual void Finalize() override { + this->mappings.Finalize(); + + if (this->thread) { + this->thread->Close(); + } + if (this->event) { + this->event->Close(); + } + if (this->server) { + this->server->Close(); + } + } + + static void PostDestroy(uintptr_t arg) { /* ... */ } + + constexpr KThread *GetThread() const { return this->thread; } + constexpr KWritableEvent *GetEvent() const { return this->event; } + constexpr uintptr_t GetAddress() const { return this->address; } + constexpr size_t GetSize() const { return this->size; } + constexpr KProcess *GetServerProcess() const { return this->server; } + + void SetServerProcess(KProcess *process) { + this->server = process; + this->server->Open(); + } + + constexpr void ClearThread() { this->thread = nullptr; } + constexpr void ClearEvent() { this->event = nullptr; } + + constexpr size_t GetSendCount() const { return this->mappings.GetSendCount(); } + constexpr size_t GetReceiveCount() const { return this->mappings.GetReceiveCount(); } + constexpr size_t GetExchangeCount() const { return this->mappings.GetExchangeCount(); } + + Result PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) { + return this->mappings.PushSend(client, server, size, state); + } + + Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) { + return this->mappings.PushReceive(client, server, size, state); + } + + Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) { + return this->mappings.PushExchange(client, server, size, state); + } + + constexpr KProcessAddress GetSendClientAddress(size_t i) const { return this->mappings.GetSendClientAddress(i); } + constexpr KProcessAddress GetSendServerAddress(size_t i) const { return this->mappings.GetSendServerAddress(i); } + constexpr size_t GetSendSize(size_t i) const { return this->mappings.GetSendSize(i); } + constexpr KMemoryState GetSendMemoryState(size_t i) const { return this->mappings.GetSendMemoryState(i); } + + constexpr KProcessAddress GetReceiveClientAddress(size_t i) const { return this->mappings.GetReceiveClientAddress(i); } + constexpr KProcessAddress GetReceiveServerAddress(size_t i) const { return this->mappings.GetReceiveServerAddress(i); } + constexpr size_t GetReceiveSize(size_t i) const { return this->mappings.GetReceiveSize(i); } + constexpr KMemoryState GetReceiveMemoryState(size_t i) const { return this->mappings.GetReceiveMemoryState(i); } + + constexpr KProcessAddress GetExchangeClientAddress(size_t i) const { return this->mappings.GetExchangeClientAddress(i); } + constexpr KProcessAddress GetExchangeServerAddress(size_t i) const { return this->mappings.GetExchangeServerAddress(i); } + constexpr size_t GetExchangeSize(size_t i) const { return this->mappings.GetExchangeSize(i); } + constexpr KMemoryState GetExchangeMemoryState(size_t i) const { return this->mappings.GetExchangeMemoryState(i); } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp index 796d264e3..82fb2432e 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp @@ -17,13 +17,43 @@ #include #include #include +#include namespace ams::kern { + class KProcess; + class KResourceLimit; + class KSharedMemory final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject); + private: + KPageGroup page_group; + KResourceLimit *resource_limit; + u64 owner_process_id; + ams::svc::MemoryPermission owner_perm; + ams::svc::MemoryPermission remote_perm; + bool is_initialized; public: - /* TODO: This is a placeholder definition. */ + explicit KSharedMemory() + : page_group(std::addressof(Kernel::GetBlockInfoManager())), resource_limit(nullptr), owner_process_id(std::numeric_limits::max()), + owner_perm(ams::svc::MemoryPermission_None), remote_perm(ams::svc::MemoryPermission_None), is_initialized(false) + { + /* ... */ + } + + virtual ~KSharedMemory() { /* ... */ } + + Result Initialize(KProcess *owner, size_t size, ams::svc::MemoryPermission own_perm, ams::svc::MemoryPermission rem_perm); + virtual void Finalize() override; + + virtual bool IsInitialized() const override { return this->is_initialized; } + static void PostDestroy(uintptr_t arg) { /* ... */ } + + Result Map(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process, ams::svc::MemoryPermission map_perm); + Result Unmap(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process); + + u64 GetOwnerProcessId() const { return this->owner_process_id; } + size_t GetSize() const { return this->page_group.GetNumPages() * PageSize; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp index 2d24df367..494777700 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp @@ -17,6 +17,21 @@ #include #include +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include + namespace ams::kern { + using ams::kern::arch::arm64::AllocateFromSlabAtomic; + using ams::kern::arch::arm64::FreeToSlabAtomic; + } + +#else + + #error "Unknown architecture for KSlabHeapImpl" + +#endif + + namespace ams::kern { namespace impl { @@ -29,8 +44,8 @@ namespace ams::kern { Node *next; }; private: - std::atomic head; - size_t obj_size; + Node * head; + size_t obj_size; public: constexpr KSlabHeapImpl() : head(nullptr), obj_size(0) { MESOSPHERE_ASSERT_THIS(); } @@ -50,15 +65,7 @@ namespace ams::kern { void *Allocate() { MESOSPHERE_ASSERT_THIS(); - Node *ret = this->head.load(); - - do { - if (AMS_UNLIKELY(ret == nullptr)) { - break; - } - } while (!this->head.compare_exchange_weak(ret, ret->next)); - - return ret; + return AllocateFromSlabAtomic(std::addressof(this->head)); } void Free(void *obj) { @@ -66,10 +73,7 @@ namespace ams::kern { Node *node = reinterpret_cast(obj); - Node *cur_head = this->head.load(); - do { - node->next = cur_head; - } while (!this->head.compare_exchange_weak(cur_head, node)); + return FreeToSlabAtomic(std::addressof(this->head), node); } }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp index 9f7934cd9..31532f9f4 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp @@ -42,8 +42,8 @@ namespace ams::kern { virtual bool IsSignaled() const = 0; virtual void DebugWaiters(); - iterator AddWaiterThread(KThread *thread); - iterator RemoveWaiterThread(iterator it); + iterator RegisterWaitingThread(KThread *thread); + iterator UnregisterWaitingThread(iterator it); iterator begin(); iterator end(); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index 8e3ace54e..5ffb5b434 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -87,7 +87,7 @@ namespace ams::kern { u8 current_svc_id; bool is_calling_svc; bool is_in_exception_handler; - bool is_preemption_state_pinned; + bool is_pinned; s32 disable_count; KThreadContext *context; }; @@ -113,12 +113,29 @@ namespace ams::kern { private: static constexpr size_t PriorityInheritanceCountMax = 10; union SyncObjectBuffer { - KSynchronizationObject *sync_objects[ams::svc::MaxWaitSynchronizationHandleCount]; - ams::svc::Handle handles[ams::svc::MaxWaitSynchronizationHandleCount * (sizeof(KSynchronizationObject *) / sizeof(ams::svc::Handle))]; + KSynchronizationObject *sync_objects[ams::svc::ArgumentHandleCountMax]; + ams::svc::Handle handles[ams::svc::ArgumentHandleCountMax * (sizeof(KSynchronizationObject *) / sizeof(ams::svc::Handle))]; constexpr SyncObjectBuffer() : sync_objects() { /* ... */ } }; static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles)); + + struct ConditionVariableComparator { + static constexpr ALWAYS_INLINE int Compare(const KThread &lhs, const KThread &rhs) { + const uintptr_t l_key = lhs.GetConditionVariableKey(); + const uintptr_t r_key = rhs.GetConditionVariableKey(); + + if (l_key < r_key) { + /* Sort first by key */ + return -1; + } else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) { + /* And then by priority. */ + return -1; + } else { + return 1; + } + } + }; private: static inline std::atomic s_next_thread_id = 0; private: @@ -130,7 +147,7 @@ namespace ams::kern { KLightLock *waiting_lock{}; uintptr_t condvar_key{}; uintptr_t entrypoint{}; - KProcessAddress arbiter_key{}; + KProcessAddress address_key{}; KProcess *parent{}; void *kernel_stack_top{}; u32 *light_ipc_data{}; @@ -150,12 +167,15 @@ namespace ams::kern { using WaiterListTraits = util::IntrusiveListMemberTraitsDeferredAssert<&KThread::waiter_list_node>; using WaiterList = WaiterListTraits::ListType; + using ConditionVariableThreadTreeTraits = util::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&KThread::condvar_arbiter_tree_node>; + using ConditionVariableThreadTree = ConditionVariableThreadTreeTraits::TreeType; + WaiterList waiter_list{}; - WaiterList paused_waiter_list{}; + WaiterList pinned_waiter_list{}; KThread *lock_owner{}; - KConditionVariable *cond_var{}; + ConditionVariableThreadTree *condvar_tree{}; uintptr_t debug_params[3]{}; - u32 arbiter_value{}; + u32 address_key_value{}; u32 suspend_request_flags{}; u32 suspend_allowed_flags{}; Result wait_result; @@ -183,7 +203,6 @@ namespace ams::kern { constexpr KThread() : wait_result(svc::ResultNoSynchronizationObject()), debug_exception_result(ResultSuccess()) { /* ... */ } virtual ~KThread() { /* ... */ } - /* TODO: Is a constexpr KThread() possible? */ Result Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type); @@ -211,6 +230,10 @@ namespace ams::kern { const StackParameters &GetStackParameters() const { return *(reinterpret_cast(this->kernel_stack_top) - 1); } + public: + StackParameters &GetStackParametersForExceptionSvcPermission() { + return *(reinterpret_cast(this->kernel_stack_top) - 1); + } public: ALWAYS_INLINE s32 GetDisableDispatchCount() const { MESOSPHERE_ASSERT_THIS(); @@ -229,6 +252,21 @@ namespace ams::kern { this->GetStackParameters().disable_count--; } + void Pin(); + void Unpin(); + + ALWAYS_INLINE void SaveDebugParams(uintptr_t param1, uintptr_t param2, uintptr_t param3) { + this->debug_params[0] = param1; + this->debug_params[1] = param2; + this->debug_params[2] = param3; + } + + ALWAYS_INLINE void RestoreDebugParams(uintptr_t *param1, uintptr_t *param2, uintptr_t *param3) { + *param1 = this->debug_params[0]; + *param2 = this->debug_params[1]; + *param3 = this->debug_params[2]; + } + NOINLINE void DisableCoreMigration(); NOINLINE void EnableCoreMigration(); @@ -247,6 +285,16 @@ namespace ams::kern { return this->GetStackParameters().is_in_exception_handler; } + ALWAYS_INLINE bool IsCallingSvc() const { + MESOSPHERE_ASSERT_THIS(); + return this->GetStackParameters().is_calling_svc; + } + + ALWAYS_INLINE u8 GetSvcId() const { + MESOSPHERE_ASSERT_THIS(); + return this->GetStackParameters().current_svc_id; + } + ALWAYS_INLINE void RegisterDpc(DpcFlag flag) { this->GetStackParameters().dpc_flags |= flag; } @@ -261,19 +309,26 @@ namespace ams::kern { ALWAYS_INLINE bool HasDpc() const { MESOSPHERE_ASSERT_THIS(); - return this->GetDpc() != 0;; + return this->GetDpc() != 0; } private: void Suspend(); ALWAYS_INLINE void AddWaiterImpl(KThread *thread); ALWAYS_INLINE void RemoveWaiterImpl(KThread *thread); ALWAYS_INLINE static void RestorePriority(KThread *thread); + + void StartTermination(); + void FinishTermination(); public: constexpr u64 GetThreadId() const { return this->thread_id; } constexpr KThreadContext &GetContext() { return this->thread_context; } constexpr const KThreadContext &GetContext() const { return this->thread_context; } + constexpr const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; } + Result GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask); + Result SetCoreMask(int32_t ideal_core, u64 affinity_mask); + constexpr ThreadState GetState() const { return static_cast(this->thread_state & ThreadState_Mask); } constexpr ThreadState GetRawState() const { return this->thread_state; } NOINLINE void SetState(ThreadState state); @@ -281,12 +336,53 @@ namespace ams::kern { NOINLINE KThreadContext *GetContextForSchedulerLoop(); constexpr uintptr_t GetConditionVariableKey() const { return this->condvar_key; } + constexpr uintptr_t GetAddressArbiterKey() const { return this->condvar_key; } + + constexpr void SetupForConditionVariableCompare(uintptr_t cv_key, int priority) { + this->condvar_key = cv_key; + this->priority = priority; + } + + constexpr void SetConditionVariable(ConditionVariableThreadTree *tree, KProcessAddress address, uintptr_t cv_key, u32 value) { + this->condvar_tree = tree; + this->condvar_key = cv_key; + this->address_key = address; + this->address_key_value = value; + } + + constexpr void ClearConditionVariable() { + this->condvar_tree = nullptr; + } + + constexpr bool IsWaitingForConditionVariable() const { + return this->condvar_tree != nullptr; + } + + constexpr void SetupForAddressArbiterCompare(uintptr_t address, int priority) { + this->condvar_key = address; + this->priority = priority; + } + + constexpr void SetAddressArbiter(ConditionVariableThreadTree *tree, uintptr_t address) { + this->condvar_tree = tree; + this->condvar_key = address; + } + + constexpr void ClearAddressArbiter() { + this->condvar_tree = nullptr; + } + + constexpr bool IsWaitingForAddressArbiter() const { + return this->condvar_tree != nullptr; + } constexpr s32 GetIdealCore() const { return this->ideal_core_id; } constexpr s32 GetActiveCore() const { return this->core_id; } constexpr void SetActiveCore(s32 core) { this->core_id = core; } + constexpr s32 GetPriority() const { return this->priority; } constexpr void SetPriority(s32 prio) { this->priority = prio; } + constexpr s32 GetBasePriority() const { return this->base_priority; } constexpr QueueEntry &GetPriorityQueueEntry(s32 core) { return this->per_core_priority_queue_entry[core]; } @@ -296,7 +392,7 @@ namespace ams::kern { constexpr const QueueEntry &GetSleepingQueueEntry() const { return this->sleeping_queue_entry; } constexpr void SetSleepingQueue(KThreadQueue *q) { this->sleeping_queue = q; } - constexpr KConditionVariable *GetConditionVariable() const { return this->cond_var; } + constexpr ConditionVariableThreadTree *GetConditionVariableTree() const { return this->condvar_tree; } constexpr s32 GetNumKernelWaiters() const { return this->num_kernel_waiters; } @@ -304,41 +400,101 @@ namespace ams::kern { void RemoveWaiter(KThread *thread); KThread *RemoveWaiterByKey(s32 *out_num_waiters, KProcessAddress key); - constexpr KProcessAddress GetAddressKey() const { return this->arbiter_key; } - constexpr void SetAddressKey(KProcessAddress key) { this->arbiter_key = key; } + constexpr KProcessAddress GetAddressKey() const { return this->address_key; } + constexpr u32 GetAddressKeyValue() const { return this->address_key_value; } + constexpr void SetAddressKey(KProcessAddress key) { this->address_key = key; } + constexpr void SetAddressKey(KProcessAddress key, u32 val) { this->address_key = key; this->address_key_value = val; } + constexpr void SetLockOwner(KThread *owner) { this->lock_owner = owner; } constexpr KThread *GetLockOwner() const { return this->lock_owner; } constexpr void SetSyncedObject(KSynchronizationObject *obj, Result wait_res) { + MESOSPHERE_ASSERT_THIS(); + this->synced_object = obj; this->wait_result = wait_res; } + constexpr Result GetWaitResult(KSynchronizationObject **out) const { + MESOSPHERE_ASSERT_THIS(); + + *out = this->synced_object; + return this->wait_result; + } + + constexpr void SetDebugExceptionResult(Result result) { + MESOSPHERE_ASSERT_THIS(); + this->debug_exception_result = result; + } + + constexpr Result GetDebugExceptionResult() const { + MESOSPHERE_ASSERT_THIS(); + return this->debug_exception_result; + } + + void WaitCancel(); + + bool IsWaitCancelled() const { return this->wait_cancelled; } + void ClearWaitCancelled() { this->wait_cancelled = false; } + + void ClearCancellable() { this->cancellable = false; } + void SetCancellable() { this->cancellable = true; } + + constexpr u32 *GetLightSessionData() const { return this->light_ipc_data; } + constexpr void SetLightSessionData(u32 *data) { this->light_ipc_data = data; } + bool HasWaiters() const { return !this->waiter_list.empty(); } constexpr s64 GetLastScheduledTick() const { return this->last_scheduled_tick; } constexpr void SetLastScheduledTick(s64 tick) { this->last_scheduled_tick = tick; } + constexpr s64 GetYieldScheduleCount() const { return this->schedule_count; } + constexpr void SetYieldScheduleCount(s64 count) { this->schedule_count = count; } + constexpr KProcess *GetOwnerProcess() const { return this->parent; } constexpr bool IsUserThread() const { return this->parent != nullptr; } + constexpr uintptr_t GetEntrypoint() const { return this->entrypoint; } + constexpr KProcessAddress GetThreadLocalRegionAddress() const { return this->tls_address; } constexpr void *GetThreadLocalRegionHeapAddress() const { return this->tls_heap_address; } - constexpr u16 GetUserPreemptionState() const { return *GetPointer(this->tls_address + 0x100); } - constexpr void SetKernelPreemptionState(u16 state) const { *GetPointer(this->tls_address + 0x100 + sizeof(u16)) = state; } + constexpr KSynchronizationObject **GetSynchronizationObjectBuffer() { return std::addressof(this->sync_object_buffer.sync_objects[0]); } + constexpr ams::svc::Handle *GetHandleBuffer() { return std::addressof(this->sync_object_buffer.handles[sizeof(this->sync_object_buffer.sync_objects) / sizeof(ams::svc::Handle) - ams::svc::ArgumentHandleCountMax]); } - void AddCpuTime(s64 amount) { + u16 GetUserDisableCount() const { return static_cast(this->tls_heap_address)->disable_count; } + void SetInterruptFlag() const { static_cast(this->tls_heap_address)->interrupt_flag = 1; } + void ClearInterruptFlag() const { static_cast(this->tls_heap_address)->interrupt_flag = 0; } + + constexpr void SetDebugAttached() { this->debug_attached = true; } + constexpr bool IsAttachedToDebugger() const { return this->debug_attached; } + + void AddCpuTime(s32 core_id, s64 amount) { this->cpu_time += amount; + /* TODO: Debug kernels track per-core tick counts. Should we? */ + } + + s64 GetCpuTime() const { return this->cpu_time; } + + s64 GetCpuTime(s32 core_id) const { + MESOSPHERE_ABORT_UNLESS(0 <= core_id && core_id < static_cast(cpu::NumCores)); + + /* TODO: Debug kernels track per-core tick counts. Should we? */ + return 0; } constexpr u32 GetSuspendFlags() const { return this->suspend_allowed_flags & this->suspend_request_flags; } constexpr bool IsSuspended() const { return this->GetSuspendFlags() != 0; } + constexpr bool IsSuspendRequested(SuspendType type) const { return (this->suspend_request_flags & (1u << (ThreadState_SuspendShift + type))) != 0; } + constexpr bool IsSuspendRequested() const { return this->suspend_request_flags != 0; } void RequestSuspend(SuspendType type); void Resume(SuspendType type); void TrySuspend(); void Continue(); + Result SetActivity(ams::svc::ThreadActivity activity); + Result GetThreadContext3(ams::svc::ThreadContext *out); + void ContinueIfHasKernelWaiters() { if (this->GetNumKernelWaiters() > 0) { this->Continue(); @@ -347,16 +503,20 @@ namespace ams::kern { void Wakeup(); + void SetBasePriority(s32 priority); Result SetPriorityToIdle(); Result Run(); void Exit(); + void Terminate(); + ThreadState RequestTerminate(); + + Result Sleep(s64 timeout); + ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast(this->kernel_stack_top) - 1; } ALWAYS_INLINE void *GetKernelStackTop() const { return this->kernel_stack_top; } - /* TODO: This is kind of a placeholder definition. */ - ALWAYS_INLINE bool IsTerminationRequested() const { return this->termination_requested || this->GetRawState() == ThreadState_Terminated; } @@ -378,9 +538,19 @@ namespace ams::kern { static constexpr bool IsWaiterListValid() { return WaiterListTraits::IsValid(); } + + static constexpr bool IsConditionVariableThreadTreeValid() { + return ConditionVariableThreadTreeTraits::IsValid(); + } + + static KThread *GetThreadFromId(u64 thread_id); + static Result GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer out_thread_ids, s32 max_out_count); + + using ConditionVariableThreadTreeType = ConditionVariableThreadTree; }; static_assert(alignof(KThread) == 0x10); static_assert(KThread::IsWaiterListValid()); + static_assert(KThread::IsConditionVariableThreadTreeValid()); class KScopedDisableDispatch { public: @@ -404,4 +574,12 @@ namespace ams::kern { } }; + ALWAYS_INLINE KExceptionContext *GetExceptionContext(KThread *thread) { + return reinterpret_cast(reinterpret_cast(thread->GetKernelStackTop()) - sizeof(KThread::StackParameters) - sizeof(KExceptionContext)); + } + + ALWAYS_INLINE const KExceptionContext *GetExceptionContext(const KThread *thread) { + return reinterpret_cast(reinterpret_cast(thread->GetKernelStackTop()) - sizeof(KThread::StackParameters) - sizeof(KExceptionContext)); + } + } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread_context.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread_context.hpp index 25df4ae70..d8ff08ef3 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread_context.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread_context.hpp @@ -20,6 +20,8 @@ namespace ams::kern { using ams::kern::arch::arm64::KThreadContext; + + using ams::kern::arch::arm64::GetUserContext; } #else #error "Unknown architecture for KThreadContext" diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp index 9dd0d98bc..a0000202a 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_transfer_memory.hpp @@ -22,8 +22,34 @@ namespace ams::kern { class KTransferMemory final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KTransferMemory, KAutoObject); + private: + TYPED_STORAGE(KPageGroup) page_group; + KProcess *owner; + KProcessAddress address; + KLightLock lock; + ams::svc::MemoryPermission owner_perm; + bool is_initialized; + bool is_mapped; public: - /* TODO: This is a placeholder definition. */ + explicit KTransferMemory() : owner(nullptr), address(Null), owner_perm(ams::svc::MemoryPermission_None), is_initialized(false), is_mapped(false) { + /* ... */ + } + + virtual ~KTransferMemory() { /* ... */ } + + Result Initialize(KProcessAddress addr, size_t size, ams::svc::MemoryPermission own_perm); + virtual void Finalize() override; + + virtual bool IsInitialized() const override { return this->is_initialized; } + virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(this->owner); } + static void PostDestroy(uintptr_t arg); + + Result Map(KProcessAddress address, size_t size, ams::svc::MemoryPermission map_perm); + Result Unmap(KProcessAddress address, size_t size); + + KProcess *GetOwner() const { return this->owner; } + KProcessAddress GetSourceAddress() { return this->address; } + size_t GetSize() const { return this->is_initialized ? GetReference(this->page_group).GetNumPages() * PageSize : 0; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp index d078ff484..bc291e0bd 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp @@ -124,8 +124,6 @@ namespace ams::kern { return this->address != rhs; } - /* TODO: <, <=, >, >= against uintptr_t? would need to be declared outside of class. Maybe worth it. */ - /* Allow getting the address explicitly, for use in accessors. */ constexpr ALWAYS_INLINE uintptr_t GetValue() const { return this->address; @@ -244,6 +242,5 @@ namespace ams::kern { /* Accessors. */ static_assert(15 == GetInteger(KPhysicalAddress(15))); static_assert(0 == GetInteger(Null)); - /* TODO: reinterpret_cast<> not valid in a constant expression, can't test get pointers. */ } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_wait_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_wait_object.hpp index ac287c126..80a0ed78c 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_wait_object.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_wait_object.hpp @@ -25,13 +25,42 @@ namespace ams::kern { using Entry = KThread::QueueEntry; private: Entry root; - bool uses_timer; + bool timer_used; public: - constexpr KWaitObject() : root(), uses_timer() { /* ... */ } + constexpr KWaitObject() : root(), timer_used() { /* ... */ } virtual void OnTimer() override; + Result Synchronize(s64 timeout); + private: + constexpr ALWAYS_INLINE void Enqueue(KThread *add) { + /* Get the entry associated with the added thread. */ + Entry &add_entry = add->GetSleepingQueueEntry(); - /* TODO: Member functions */ + /* Get the entry associated with the end of the queue. */ + KThread *tail = this->root.GetPrev(); + Entry &tail_entry = (tail != nullptr) ? tail->GetSleepingQueueEntry() : this->root; + + /* Link the entries. */ + add_entry.SetPrev(tail); + add_entry.SetNext(nullptr); + tail_entry.SetNext(add); + this->root.SetPrev(add); + } + + constexpr ALWAYS_INLINE void Remove(KThread *remove) { + /* Get the entry associated with the thread. */ + Entry &remove_entry = remove->GetSleepingQueueEntry(); + + /* Get the entries associated with next and prev. */ + KThread *prev = remove_entry.GetPrev(); + KThread *next = remove_entry.GetNext(); + Entry &prev_entry = (prev != nullptr) ? prev->GetSleepingQueueEntry() : this->root; + Entry &next_entry = (next != nullptr) ? next->GetSleepingQueueEntry() : this->root; + + /* Unlink. */ + prev_entry.SetNext(next); + next_entry.SetPrev(prev); + } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_writable_event.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_writable_event.hpp new file mode 100644 index 000000000..f67c43979 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_writable_event.hpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KEvent; + + class KWritableEvent final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KWritableEvent, KAutoObject); + private: + KEvent *parent; + public: + constexpr explicit KWritableEvent() : parent(nullptr) { /* ... */ } + virtual ~KWritableEvent() { /* ... */ } + + virtual void Destroy() override; + + static void PostDestroy(uintptr_t arg) { /* ... */ } + + void Initialize(KEvent *p); + Result Signal(); + Result Clear(); + + constexpr KEvent *GetParent() const { return this->parent; } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp b/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp index 4d4845240..eb98eaa9c 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp @@ -94,6 +94,10 @@ namespace ams::kern { static KThread &GetMainThread(s32 core_id); static KThread &GetIdleThread(s32 core_id); + static ALWAYS_INLINE KCurrentContext &GetCurrentContext(s32 core_id) { + return GetCoreLocalContext(core_id).current; + } + static ALWAYS_INLINE KScheduler &GetScheduler() { return GetCoreLocalContext().scheduler; } @@ -111,7 +115,7 @@ namespace ams::kern { } static ALWAYS_INLINE KHardwareTimer &GetHardwareTimer() { - return GetCoreLocalContext().hardware_timer; + return GetCoreLocalContext(GetCurrentCoreId()).hardware_timer; } static ALWAYS_INLINE KResourceLimit &GetSystemResourceLimit() { diff --git a/libraries/libmesosphere/include/mesosphere/kern_panic.hpp b/libraries/libmesosphere/include/mesosphere/kern_panic.hpp index 2f31453ae..3e7eaf72b 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_panic.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_panic.hpp @@ -20,7 +20,7 @@ namespace ams::kern { template - ALWAYS_INLINE void UnusedImpl(ArgTypes... args) { + ALWAYS_INLINE void UnusedImpl(ArgTypes &&... args) { (static_cast(args), ...); } diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp index 7b4c2b93e..3a03d53a4 100644 --- a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp @@ -24,6 +24,8 @@ namespace ams::kern::svc { /* 33 */ using ::ams::svc::ResultNotImplemented; + /* 54 */ using ::ams::svc::ResultStopProcessingException; + /* 57 */ using ::ams::svc::ResultNoSynchronizationObject; /* 59 */ using ::ams::svc::ResultTerminationRequested; @@ -37,7 +39,7 @@ namespace ams::kern::svc { /* 105 */ using ::ams::svc::ResultOutOfHandles; /* 106 */ using ::ams::svc::ResultInvalidCurrentMemory; - /* 108 */ using ::ams::svc::ResultInvalidNewMemoryPermissions; + /* 108 */ using ::ams::svc::ResultInvalidNewMemoryPermission; /* 110 */ using ::ams::svc::ResultInvalidMemoryRegion; @@ -58,15 +60,19 @@ namespace ams::kern::svc { /* 126 */ using ::ams::svc::ResultReservedUsed; /* 127 */ using ::ams::svc::ResultNotSupported; /* 128 */ using ::ams::svc::ResultDebug; - /* 129 */ using ::ams::svc::ResultThreadNotOwned; - + /* 129 */ using ::ams::svc::ResultNoThread; + /* 130 */ using ::ams::svc::ResultUnknownThread; /* 131 */ using ::ams::svc::ResultPortClosed; /* 132 */ using ::ams::svc::ResultLimitReached; + /* 133 */ using ::ams::svc::ResultInvalidMemoryPool; /* 258 */ using ::ams::svc::ResultReceiveListBroken; /* 259 */ using ::ams::svc::ResultOutOfAddressSpace; /* 260 */ using ::ams::svc::ResultMessageTooLarge; + /* 517 */ using ::ams::svc::ResultInvalidProcessId; + /* 518 */ using ::ams::svc::ResultInvalidThreadId; + /* 519 */ using ::ams::svc::ResultInvalidId; /* 520 */ using ::ams::svc::ResultProcessTerminated; } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp index 60fd0473e..170ae14e0 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp @@ -60,7 +60,7 @@ namespace ams::kern::arch::arm64::cpu { void Wait() { while (!this->done) { - __asm__ __volatile__("yield"); + cpu::Yield(); } } @@ -173,7 +173,7 @@ namespace ams::kern::arch::arm64::cpu { Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask); this->ProcessOperation(); while (this->target_cores != 0) { - __asm__ __volatile__("yield"); + cpu::Yield(); } } else { /* Request all cores. */ @@ -370,24 +370,24 @@ namespace ams::kern::arch::arm64::cpu { Result StoreDataCache(const void *addr, size_t size) { KScopedCoreMigrationDisable dm; - const uintptr_t start = util::AlignDown(reinterpret_cast(addr), DataCacheLineSize); - const uintptr_t end = util::AlignUp( reinterpret_cast(addr), DataCacheLineSize); + const uintptr_t start = util::AlignDown(reinterpret_cast(addr), DataCacheLineSize); + const uintptr_t end = util::AlignUp( reinterpret_cast(addr) + size, DataCacheLineSize); return StoreDataCacheRange(start, end); } Result FlushDataCache(const void *addr, size_t size) { KScopedCoreMigrationDisable dm; - const uintptr_t start = util::AlignDown(reinterpret_cast(addr), DataCacheLineSize); - const uintptr_t end = util::AlignUp( reinterpret_cast(addr), DataCacheLineSize); + const uintptr_t start = util::AlignDown(reinterpret_cast(addr), DataCacheLineSize); + const uintptr_t end = util::AlignUp( reinterpret_cast(addr) + size, DataCacheLineSize); return FlushDataCacheRange(start, end); } Result InvalidateInstructionCache(void *addr, size_t size) { KScopedCoreMigrationDisable dm; - const uintptr_t start = util::AlignDown(reinterpret_cast(addr), InstructionCacheLineSize); - const uintptr_t end = util::AlignUp( reinterpret_cast(addr), InstructionCacheLineSize); + const uintptr_t start = util::AlignDown(reinterpret_cast(addr), InstructionCacheLineSize); + const uintptr_t end = util::AlignUp( reinterpret_cast(addr) + size, InstructionCacheLineSize); R_TRY(InvalidateInstructionCacheRange(start, end)); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp b/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp index 37fefefb4..b728ca612 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp @@ -15,10 +15,52 @@ */ #include +namespace ams::kern::svc { + + void RestoreContext(uintptr_t sp); + +} + namespace ams::kern::arch::arm64 { namespace { + constexpr inline u32 El0PsrMask = 0xFF0FFE20; + + enum EsrEc : u32 { + EsrEc_Unknown = 0b000000, + EsrEc_WaitForInterruptOrEvent = 0b000001, + EsrEc_Cp15McrMrc = 0b000011, + EsrEc_Cp15McrrMrrc = 0b000100, + EsrEc_Cp14McrMrc = 0b000101, + EsrEc_FpAccess = 0b000111, + EsrEc_Cp14Mrrc = 0b001100, + EsrEc_BranchTarget = 0b001101, + EsrEc_IllegalExecution = 0b001110, + EsrEc_Svc32 = 0b010001, + EsrEc_Svc64 = 0b010101, + EsrEc_SystemInstruction64 = 0b011000, + EsrEc_SveZen = 0b011001, + EsrEc_PointerAuthInstruction = 0b011100, + EsrEc_InstructionAbortEl0 = 0b100000, + EsrEc_InstructionAbortEl1 = 0b100001, + EsrEc_PcAlignmentFault = 0b100010, + EsrEc_DataAbortEl0 = 0b100100, + EsrEc_DataAbortEl1 = 0b100101, + EsrEc_SpAlignmentFault = 0b100110, + EsrEc_FpException32 = 0b101000, + EsrEc_FpException64 = 0b101100, + EsrEc_SErrorInterrupt = 0b101111, + EsrEc_BreakPointEl0 = 0b110000, + EsrEc_BreakPointEl1 = 0b110001, + EsrEc_SoftwareStepEl0 = 0b110010, + EsrEc_SoftwareStepEl1 = 0b110011, + EsrEc_WatchPointEl0 = 0b110100, + EsrEc_WatchPointEl1 = 0b110101, + EsrEc_BkptInstruction = 0b111000, + EsrEc_BrkInstruction = 0b111100, + }; + constexpr u32 GetInstructionData(const KExceptionContext *context, u64 esr) { /* Check for THUMB usermode */ if ((context->psr & 0x3F) == 0x30) { @@ -35,47 +77,234 @@ namespace ams::kern::arch::arm64 { } void HandleUserException(KExceptionContext *context, u64 esr, u64 far, u64 afsr0, u64 afsr1, u32 data) { - KProcess *cur_process = GetCurrentProcessPointer(); + KProcess &cur_process = GetCurrentProcess(); bool should_process_user_exception = KTargetSystem::IsUserExceptionHandlersEnabled(); const u64 ec = (esr >> 26) & 0x3F; switch (ec) { - case 0x0: /* Unknown */ - case 0xE: /* Illegal Execution State */ - case 0x11: /* SVC instruction from Aarch32 */ - case 0x15: /* SVC instruction from Aarch64 */ - case 0x22: /* PC Misalignment */ - case 0x26: /* SP Misalignment */ - case 0x2F: /* SError */ - case 0x30: /* Breakpoint from lower EL */ - case 0x32: /* SoftwareStep from lower EL */ - case 0x34: /* Watchpoint from lower EL */ - case 0x38: /* BKPT instruction */ - case 0x3C: /* BRK instruction */ + case EsrEc_Unknown: + case EsrEc_IllegalExecution: + case EsrEc_Svc32: + case EsrEc_Svc64: + case EsrEc_PcAlignmentFault: + case EsrEc_SpAlignmentFault: + case EsrEc_SErrorInterrupt: + case EsrEc_BreakPointEl0: + case EsrEc_SoftwareStepEl0: + case EsrEc_WatchPointEl0: + case EsrEc_BkptInstruction: + case EsrEc_BrkInstruction: break; default: { - MESOSPHERE_TODO("Get memory state."); - /* If state is KMemoryState_Code and the user can't read it, set should_process_user_exception = true; */ + /* If the fault address's state is KMemoryState_Code and the user can't read the address, force processing exception. */ + KMemoryInfo info; + ams::svc::PageInfo pi; + if (R_SUCCEEDED(cur_process.GetPageTable().QueryInfo(std::addressof(info), std::addressof(pi), far))) { + if (info.GetState() == KMemoryState_Code && ((info.GetPermission() & KMemoryPermission_UserRead) != KMemoryPermission_UserRead)) { + should_process_user_exception = true; + } + } } break; } - if (should_process_user_exception) { - MESOSPHERE_TODO("Process the user exception."); + /* If we should process the user exception (and it's not a breakpoint), try to enter. */ + const bool is_software_break = (ec == EsrEc_Unknown || ec == EsrEc_IllegalExecution || ec == EsrEc_BkptInstruction || ec == EsrEc_BrkInstruction); + const bool is_breakpoint = (ec == EsrEc_BreakPointEl0 || ec == EsrEc_SoftwareStepEl0 || ec == EsrEc_WatchPointEl0); + if ((should_process_user_exception) && + !(is_software_break && cur_process.IsAttachedToDebugger() && KDebug::IsBreakInstruction(data, context->psr)) && + !(is_breakpoint)) + { + if (cur_process.EnterUserException()) { + /* Fill out the exception info. */ + const bool is_aarch64 = (context->psr & 0x10) == 0; + if (is_aarch64) { + /* 64-bit. */ + ams::svc::aarch64::ExceptionInfo *info = std::addressof(GetPointer(cur_process.GetProcessLocalRegionAddress())->exception_info); + + for (size_t i = 0; i < util::size(info->r); ++i) { + info->r[i] = context->x[i]; + } + info->sp = context->sp; + info->lr = context->x[30]; + info->pc = context->pc; + info->pstate = (context->psr & El0PsrMask); + info->afsr0 = afsr0; + info->afsr1 = afsr1; + info->esr = esr; + info->far = far; + } else { + /* 32-bit. */ + ams::svc::aarch32::ExceptionInfo *info = std::addressof(GetPointer(cur_process.GetProcessLocalRegionAddress())->exception_info); + + for (size_t i = 0; i < util::size(info->r); ++i) { + info->r[i] = context->x[i]; + } + info->sp = context->x[13]; + info->lr = context->x[14]; + info->pc = context->pc; + info->flags = 1; + + info->status_64.pstate = (context->psr & El0PsrMask); + info->status_64.afsr0 = afsr0; + info->status_64.afsr1 = afsr1; + info->status_64.esr = esr; + info->status_64.far = far; + } + + /* Save the debug parameters to the current thread. */ + GetCurrentThread().SaveDebugParams(far, esr, data); + + /* Get the exception type. */ + u32 type; + switch (ec) { + case EsrEc_Unknown: + case EsrEc_IllegalExecution: + case EsrEc_Cp15McrMrc: + case EsrEc_Cp15McrrMrrc: + case EsrEc_Cp14McrMrc: + case EsrEc_Cp14Mrrc: + case EsrEc_SystemInstruction64: + case EsrEc_BkptInstruction: + case EsrEc_BrkInstruction: + type = ams::svc::ExceptionType_InstructionAbort; + break; + case EsrEc_PcAlignmentFault: + type = ams::svc::ExceptionType_UnalignedInstruction; + break; + case EsrEc_SpAlignmentFault: + type = ams::svc::ExceptionType_UnalignedData; + break; + case EsrEc_Svc32: + case EsrEc_Svc64: + type = ams::svc::ExceptionType_InvalidSystemCall; + break; + case EsrEc_SErrorInterrupt: + type = ams::svc::ExceptionType_MemorySystemError; + break; + case EsrEc_InstructionAbortEl0: + type = ams::svc::ExceptionType_InstructionAbort; + break; + case EsrEc_DataAbortEl0: + default: + type = ams::svc::ExceptionType_DataAbort; + break; + } + + /* We want to enter at the process entrypoint, with x0 = type. */ + context->pc = GetInteger(cur_process.GetEntryPoint()); + context->x[0] = type; + if (is_aarch64) { + context->x[1] = GetInteger(cur_process.GetProcessLocalRegionAddress() + __builtin_offsetof(ams::svc::aarch64::ProcessLocalRegion, exception_info)); + + auto *plr = GetPointer(cur_process.GetProcessLocalRegionAddress()); + context->sp = util::AlignDown(reinterpret_cast(plr->data) + sizeof(plr->data), 0x10); + context->psr = 0; + } else { + context->x[1] = GetInteger(cur_process.GetProcessLocalRegionAddress() + __builtin_offsetof(ams::svc::aarch32::ProcessLocalRegion, exception_info)); + + auto *plr = GetPointer(cur_process.GetProcessLocalRegionAddress()); + context->x[13] = util::AlignDown(reinterpret_cast(plr->data) + sizeof(plr->data), 0x10); + context->psr = 0x10; + } + + /* Set exception SVC permissions. */ + cur_process.CopyEnterExceptionSvcPermissionsTo(GetCurrentThread().GetStackParametersForExceptionSvcPermission()); + return; + } } { - MESOSPHERE_TODO("Process for KDebug."); + /* Collect additional information based on the ec. */ + ams::svc::DebugException exception; + uintptr_t param2 = 0; + uintptr_t param3 = 0; + switch (ec) { + case EsrEc_Unknown: + case EsrEc_IllegalExecution: + case EsrEc_BkptInstruction: + case EsrEc_BrkInstruction: + { + exception = ams::svc::DebugException_UndefinedInstruction; + param2 = far; + param3 = data; + } + break; + case EsrEc_PcAlignmentFault: + case EsrEc_SpAlignmentFault: + { + exception = ams::svc::DebugException_AlignmentFault; + param2 = far; + } + break; + case EsrEc_Svc32: + case EsrEc_Svc64: + { + exception = ams::svc::DebugException_UndefinedSystemCall; + param2 = far; + param3 = (esr & 0xFF); + } + break; + case EsrEc_BreakPointEl0: + case EsrEc_SoftwareStepEl0: + { + exception = ams::svc::DebugException_BreakPoint; + param2 = far; + param3 = ams::svc::BreakPointType_HardwareInstruction; + } + break; + case EsrEc_WatchPointEl0: + { + exception = ams::svc::DebugException_BreakPoint; + param2 = far; + param3 = ams::svc::BreakPointType_HardwareInstruction; + } + break; + case EsrEc_SErrorInterrupt: + { + exception = ams::svc::DebugException_MemorySystemError; + param2 = far; + } + break; + case EsrEc_InstructionAbortEl0: + { + exception = ams::svc::DebugException_InstructionAbort; + param2 = far; + } + break; + case EsrEc_DataAbortEl0: + default: + { + exception = ams::svc::DebugException_DataAbort; + param2 = far; + } + break; + } - MESOSPHERE_TODO("cur_process->GetProgramId()"); - MESOSPHERE_RELEASE_LOG("Exception occurred. %016lx\n", 0ul); + /* Process the debug event. */ + Result result = KDebug::OnDebugEvent(ams::svc::DebugEvent_Exception, exception, param2, param3); - MESOSPHERE_TODO("if (!svc::ResultNotHandled::Includes(res)) { debug process }."); + /* If we should stop processing the exception, do so. */ + if (svc::ResultStopProcessingException::Includes(result)) { + return; + } + + /* Print that an exception occurred. */ + MESOSPHERE_RELEASE_LOG("Exception occurred. %016lx\n", GetCurrentProcess().GetProgramId()); + + + /* If the SVC is handled, handle it. */ + if (!svc::ResultNotHandled::Includes(result)) { + /* If we successfully enter jit debug, stop processing the exception. */ + if (cur_process.EnterJitDebug(ams::svc::DebugEvent_Exception, exception, param2, param3)) { + return; + } + } } - MESOSPHERE_TODO("cur_process->Exit();"); - (void)cur_process; + /* Exit the current process. */ + cur_process.Exit(); } } @@ -85,6 +314,149 @@ namespace ams::kern::arch::arm64 { KThreadContext::FpuContextSwitchHandler(GetCurrentThreadPointer()); } + /* NOTE: This function is called from ASM. */ + void ReturnFromException(Result user_result) { + /* Get the current thread. */ + KThread *cur_thread = GetCurrentThreadPointer(); + + /* Get the current exception context. */ + KExceptionContext *e_ctx = GetExceptionContext(cur_thread); + + /* Get the current process. */ + KProcess &cur_process = GetCurrentProcess(); + + /* Read the exception info that userland put in tls. */ + union { + ams::svc::aarch64::ExceptionInfo info64; + ams::svc::aarch32::ExceptionInfo info32; + } info = {}; + + + const bool is_aarch64 = (e_ctx->psr & 0x10) == 0; + if (is_aarch64) { + /* We're 64-bit. */ + info.info64 = GetPointer(cur_process.GetProcessLocalRegionAddress())->exception_info; + } else { + /* We're 32-bit. */ + info.info32 = GetPointer(cur_process.GetProcessLocalRegionAddress())->exception_info; + } + + /* Try to leave the user exception. */ + if (cur_process.LeaveUserException()) { + /* We left user exception. Alter our SVC permissions accordingly. */ + cur_process.CopyLeaveExceptionSvcPermissionsTo(cur_thread->GetStackParametersForExceptionSvcPermission()); + + /* Copy the user context to the thread context. */ + if (is_aarch64) { + for (size_t i = 0; i < util::size(info.info64.r); ++i) { + e_ctx->x[i] = info.info64.r[i]; + } + e_ctx->x[30] = info.info64.lr; + e_ctx->sp = info.info64.sp; + e_ctx->pc = info.info64.pc; + e_ctx->psr = (info.info64.pstate & El0PsrMask) | (e_ctx->psr & ~El0PsrMask); + } else { + for (size_t i = 0; i < util::size(info.info32.r); ++i) { + e_ctx->x[i] = info.info32.r[i]; + } + e_ctx->x[14] = info.info32.lr; + e_ctx->x[13] = info.info32.sp; + e_ctx->pc = info.info32.pc; + e_ctx->psr = (info.info32.status_64.pstate & El0PsrMask) | (e_ctx->psr & ~El0PsrMask); + } + + /* Note that PC was adjusted. */ + e_ctx->write = 1; + + if (R_SUCCEEDED(user_result)) { + /* If result handling succeeded, just restore the context. */ + svc::RestoreContext(reinterpret_cast(e_ctx)); + } else { + /* Restore the debug params for the exception. */ + uintptr_t far, esr, data; + GetCurrentThread().RestoreDebugParams(std::addressof(far), std::addressof(esr), std::addressof(data)); + + /* Collect additional information based on the ec. */ + ams::svc::DebugException exception; + uintptr_t param2 = 0; + uintptr_t param3 = 0; + switch ((esr >> 26) & 0x3F) { + case EsrEc_Unknown: + case EsrEc_IllegalExecution: + case EsrEc_BkptInstruction: + case EsrEc_BrkInstruction: + { + exception = ams::svc::DebugException_UndefinedInstruction; + param2 = far; + param3 = data; + } + break; + case EsrEc_PcAlignmentFault: + case EsrEc_SpAlignmentFault: + { + exception = ams::svc::DebugException_AlignmentFault; + param2 = far; + } + break; + case EsrEc_Svc32: + case EsrEc_Svc64: + { + exception = ams::svc::DebugException_UndefinedSystemCall; + param2 = far; + param3 = (esr & 0xFF); + } + break; + case EsrEc_SErrorInterrupt: + { + exception = ams::svc::DebugException_MemorySystemError; + param2 = far; + } + break; + case EsrEc_InstructionAbortEl0: + { + exception = ams::svc::DebugException_InstructionAbort; + param2 = far; + } + break; + case EsrEc_DataAbortEl0: + default: + { + exception = ams::svc::DebugException_DataAbort; + param2 = far; + } + break; + } + + /* Process the debug event. */ + Result result = KDebug::OnDebugEvent(ams::svc::DebugEvent_Exception, exception, param2, param3); + + /* If the SVC is handled, handle it. */ + if (!svc::ResultNotHandled::Includes(result)) { + /* If we should stop processing the exception, restore. */ + if (svc::ResultStopProcessingException::Includes(result)) { + svc::RestoreContext(reinterpret_cast(e_ctx)); + } + + /* If we successfully enter jit debug, restore. */ + if (cur_process.EnterJitDebug(ams::svc::DebugEvent_Exception, exception, param2, param3)) { + svc::RestoreContext(reinterpret_cast(e_ctx)); + } + } + + /* Otherwise, if result debug was returned, restore. */ + if (svc::ResultDebug::Includes(result)) { + svc::RestoreContext(reinterpret_cast(e_ctx)); + } + } + } + + /* Print that an exception occurred. */ + MESOSPHERE_RELEASE_LOG("Exception occurred. %016lx\n", GetCurrentProcess().GetProgramId()); + + /* Exit the current process. */ + GetCurrentProcess().Exit(); + } + /* NOTE: This function is called from ASM. */ void HandleException(KExceptionContext *context) { MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled()); @@ -98,14 +470,14 @@ namespace ams::kern::arch::arm64 { /* Collect far and data based on the ec. */ switch ((esr >> 26) & 0x3F) { - case 0x0: /* Unknown */ - case 0xE: /* Illegal Execution State */ - case 0x38: /* BKPT instruction */ - case 0x3C: /* BRK instruction */ + case EsrEc_Unknown: + case EsrEc_IllegalExecution: + case EsrEc_BkptInstruction: + case EsrEc_BrkInstruction: far = context->pc; data = GetInstructionData(context, esr); break; - case 0x11: /* SVC instruction from Aarch32 */ + case EsrEc_Svc32: if (context->psr & 0x20) { /* Thumb mode. */ context->pc -= 2; @@ -115,11 +487,11 @@ namespace ams::kern::arch::arm64 { } far = context->pc; break; - case 0x15: /* SVC instruction from Aarch64 */ + case EsrEc_Svc64: context->pc -= 4; far = context->pc; break; - case 0x30: /* Breakpoint from lower EL */ + case EsrEc_BreakPointEl0: far = context->pc; break; default: @@ -129,18 +501,20 @@ namespace ams::kern::arch::arm64 { /* Note that we're in an exception handler. */ GetCurrentThread().SetInExceptionHandler(); + + /* Verify that spsr's M is allowable (EL0t). */ { const bool is_user_mode = (context->psr & 0xF) == 0; if (is_user_mode) { - /* Handle any changes needed to the user preemption state. */ - if (GetCurrentThread().GetUserPreemptionState() != 0 && GetCurrentProcess().GetPreemptionStatePinnedThread(GetCurrentCoreId()) == nullptr) { + /* If the user disable count is set, we may need to pin the current thread. */ + if (GetCurrentThread().GetUserDisableCount() != 0 && GetCurrentProcess().GetPinnedThread(GetCurrentCoreId()) == nullptr) { KScopedSchedulerLock lk; - /* Note the preemption state in process. */ - GetCurrentProcess().SetPreemptionState(); + /* Pin the current thread. */ + KScheduler::PinCurrentThread(GetCurrentProcessPointer()); - /* Set the kernel preemption state flag. */ - GetCurrentThread().SetKernelPreemptionState(1); + /* Set the interrupt flag for the thread. */ + GetCurrentThread().SetInterruptFlag(); } /* Enable interrupts while we process the usermode exception. */ @@ -150,14 +524,18 @@ namespace ams::kern::arch::arm64 { HandleUserException(context, esr, far, afsr0, afsr1, data); } } else { - MESOSPHERE_LOG("Unhandled Exception in Supervisor Mode\n"); - MESOSPHERE_LOG("Current Process = %s\n", GetCurrentProcess().GetName()); + const s32 core_id = GetCurrentCoreId(); + + MESOSPHERE_LOG("%d: Unhandled Exception in Supervisor Mode\n", core_id); + if (GetCurrentProcessPointer() != nullptr) { + MESOSPHERE_LOG("%d: Current Process = %s\n", core_id, GetCurrentProcess().GetName()); + } for (size_t i = 0; i < 31; i++) { - MESOSPHERE_LOG("X[%02zu] = %016lx\n", i, context->x[i]); + MESOSPHERE_LOG("%d: X[%02zu] = %016lx\n", core_id, i, context->x[i]); } - MESOSPHERE_LOG("PC = %016lx\n", context->pc); - MESOSPHERE_LOG("SP = %016lx\n", context->sp); + MESOSPHERE_LOG("%d: PC = %016lx\n", core_id, context->pc); + MESOSPHERE_LOG("%d: SP = %016lx\n", core_id, context->sp); MESOSPHERE_PANIC("Unhandled Exception in Supervisor Mode\n"); } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_debug.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_debug.cpp new file mode 100644 index 000000000..b6286a02a --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_debug.cpp @@ -0,0 +1,387 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::arch::arm64 { + + namespace { + + constexpr inline u64 ForbiddenBreakPointFlagsMask = (((1ul << 40) - 1) << 24) | /* Reserved upper bits. */ + (((1ul << 1) - 1) << 23) | /* Match VMID BreakPoint Type. */ + (((1ul << 2) - 1) << 14) | /* Security State Control. */ + (((1ul << 1) - 1) << 13) | /* Hyp Mode Control. */ + (((1ul << 4) - 1) << 9) | /* Reserved middle bits. */ + (((1ul << 2) - 1) << 3) | /* Reserved lower bits. */ + (((1ul << 2) - 1) << 1); /* Privileged Mode Control. */ + + static_assert(ForbiddenBreakPointFlagsMask == 0xFFFFFFFFFF80FE1Eul); + + constexpr inline u64 ForbiddenWatchPointFlagsMask = (((1ul << 32) - 1) << 32) | /* Reserved upper bits. */ + (((1ul << 4) - 1) << 20) | /* WatchPoint Type. */ + (((1ul << 2) - 1) << 14) | /* Security State Control. */ + (((1ul << 1) - 1) << 13) | /* Hyp Mode Control. */ + (((1ul << 2) - 1) << 1); /* Privileged Access Control. */ + + static_assert(ForbiddenWatchPointFlagsMask == 0xFFFFFFFF00F0E006ul); + + constexpr inline u32 El0PsrMask = 0xFF0FFE20; + + } + + uintptr_t KDebug::GetProgramCounter(const KThread &thread) { + return GetExceptionContext(std::addressof(thread))->pc; + } + + void KDebug::SetPreviousProgramCounter() { + /* Get the current thread. */ + KThread *thread = GetCurrentThreadPointer(); + MESOSPHERE_ASSERT(thread->IsCallingSvc()); + + /* Get the exception context. */ + KExceptionContext *e_ctx = GetExceptionContext(thread); + + /* Set the previous pc. */ + if (e_ctx->write == 0) { + /* Subtract from the program counter. */ + if (thread->GetOwnerProcess()->Is64Bit()) { + e_ctx->pc -= sizeof(u32); + } else { + e_ctx->pc -= (e_ctx->psr & 0x20) ? sizeof(u16) : sizeof(u32); + } + + /* Mark that we've set. */ + e_ctx->write = 1; + } + } + + Result KDebug::GetThreadContextImpl(ams::svc::ThreadContext *out, KThread *thread, u32 context_flags) { + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + MESOSPHERE_ASSERT(thread != GetCurrentThreadPointer()); + + /* Get the exception context. */ + const KExceptionContext *e_ctx = GetExceptionContext(thread); + + /* If general registers are requested, get them. */ + if ((context_flags & ams::svc::ThreadContextFlag_General) != 0) { + if (!thread->IsCallingSvc() || thread->GetSvcId() == svc::SvcId_ReturnFromException) { + if (this->Is64Bit()) { + /* Get X0-X28. */ + for (auto i = 0; i <= 28; ++i) { + out->r[i] = e_ctx->x[i]; + } + } else { + /* Get R0-R12. */ + for (auto i = 0; i <= 12; ++i) { + out->r[i] = static_cast(e_ctx->x[i]); + } + } + } + } + + /* If control flags are requested, get them. */ + if ((context_flags & ams::svc::ThreadContextFlag_Control) != 0) { + if (this->Is64Bit()) { + out->fp = e_ctx->x[29]; + out->lr = e_ctx->x[30]; + out->sp = e_ctx->sp; + out->pc = e_ctx->pc; + out->pstate = (e_ctx->psr & El0PsrMask); + + /* Adjust PC if we should. */ + if (e_ctx->write == 0 && thread->IsCallingSvc()) { + out->pc -= sizeof(u32); + } + + out->tpidr = e_ctx->tpidr; + } else { + out->r[11] = static_cast(e_ctx->x[11]); + out->r[13] = static_cast(e_ctx->x[13]); + out->r[14] = static_cast(e_ctx->x[14]); + out->lr = 0; + out->sp = 0; + out->pc = e_ctx->pc; + out->pstate = (e_ctx->psr & El0PsrMask); + + /* Adjust PC if we should. */ + if (e_ctx->write == 0 && thread->IsCallingSvc()) { + out->pc -= (e_ctx->psr & 0x20) ? sizeof(u16) : sizeof(u32); + } + + out->tpidr = static_cast(e_ctx->tpidr); + } + } + + /* Get the FPU context. */ + return this->GetFpuContext(out, thread, context_flags); + } + + Result KDebug::SetThreadContextImpl(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags) { + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + MESOSPHERE_ASSERT(thread != GetCurrentThreadPointer()); + + /* Get the exception context. */ + KExceptionContext *e_ctx = GetExceptionContext(thread); + + /* If general registers are requested, set them. */ + if ((context_flags & ams::svc::ThreadContextFlag_General) != 0) { + if (this->Is64Bit()) { + /* Set X0-X28. */ + for (auto i = 0; i <= 28; ++i) { + e_ctx->x[i] = ctx.r[i]; + } + } else { + /* Set R0-R12. */ + for (auto i = 0; i <= 12; ++i) { + e_ctx->x[i] = static_cast(ctx.r[i]); + } + } + } + + /* If control flags are requested, set them. */ + if ((context_flags & ams::svc::ThreadContextFlag_Control) != 0) { + /* Mark ourselve as having adjusted pc. */ + e_ctx->write = 1; + + if (this->Is64Bit()) { + e_ctx->x[29] = ctx.fp; + e_ctx->x[30] = ctx.lr; + e_ctx->sp = ctx.sp; + e_ctx->pc = ctx.pc; + e_ctx->psr = ((ctx.pstate & El0PsrMask) | (e_ctx->psr & ~El0PsrMask)); + e_ctx->tpidr = ctx.tpidr; + } else { + e_ctx->x[13] = static_cast(ctx.r[13]); + e_ctx->x[14] = static_cast(ctx.r[14]); + e_ctx->x[30] = 0; + e_ctx->sp = 0; + e_ctx->pc = static_cast(ctx.pc); + e_ctx->psr = ((ctx.pstate & El0PsrMask) | (e_ctx->psr & ~El0PsrMask)); + e_ctx->tpidr = ctx.tpidr; + } + } + + /* Set the FPU context. */ + return this->SetFpuContext(ctx, thread, context_flags); + } + + Result KDebug::GetFpuContext(ams::svc::ThreadContext *out, KThread *thread, u32 context_flags) { + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + MESOSPHERE_ASSERT(thread != GetCurrentThreadPointer()); + + /* Succeed if there's nothing to do. */ + R_SUCCEED_IF((context_flags & (ams::svc::ThreadContextFlag_Fpu | ams::svc::ThreadContextFlag_FpuControl)) == 0); + + /* Get the thread context. */ + KThreadContext *t_ctx = std::addressof(thread->GetContext()); + + /* Get the FPU control registers, if required. */ + if ((context_flags & ams::svc::ThreadContextFlag_FpuControl) != 0) { + out->fpsr = t_ctx->GetFpsr(); + out->fpcr = t_ctx->GetFpcr(); + } + + /* Get the FPU registers, if required. */ + if ((context_flags & ams::svc::ThreadContextFlag_Fpu) != 0) { + static_assert(util::size(ams::svc::ThreadContext{}.v) == KThreadContext::NumFpuRegisters); + const u128 *f = t_ctx->GetFpuRegisters(); + + if (this->Is64Bit()) { + for (size_t i = 0; i < KThreadContext::NumFpuRegisters; ++i) { + out->v[i] = f[i]; + } + } else { + for (size_t i = 0; i < KThreadContext::NumFpuRegisters / 2; ++i) { + out->v[i] = f[i]; + } + for (size_t i = KThreadContext::NumFpuRegisters / 2; i < KThreadContext::NumFpuRegisters; ++i) { + out->v[i] = 0; + } + } + } + + return ResultSuccess(); + } + + Result KDebug::SetFpuContext(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags) { + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + MESOSPHERE_ASSERT(thread != GetCurrentThreadPointer()); + + /* Succeed if there's nothing to do. */ + R_SUCCEED_IF((context_flags & (ams::svc::ThreadContextFlag_Fpu | ams::svc::ThreadContextFlag_FpuControl)) == 0); + + /* Get the thread context. */ + KThreadContext *t_ctx = std::addressof(thread->GetContext()); + + /* Set the FPU control registers, if required. */ + if ((context_flags & ams::svc::ThreadContextFlag_FpuControl) != 0) { + t_ctx->SetFpsr(ctx.fpsr); + t_ctx->SetFpcr(ctx.fpcr); + } + + /* Set the FPU registers, if required. */ + if ((context_flags & ams::svc::ThreadContextFlag_Fpu) != 0) { + static_assert(util::size(ams::svc::ThreadContext{}.v) == KThreadContext::NumFpuRegisters); + t_ctx->SetFpuRegisters(ctx.v, this->Is64Bit()); + } + + return ResultSuccess(); + } + + Result KDebug::BreakIfAttached(ams::svc::BreakReason break_reason, uintptr_t address, size_t size) { + return KDebugBase::OnDebugEvent(ams::svc::DebugEvent_Exception, ams::svc::DebugException_UserBreak, GetProgramCounter(GetCurrentThread()), break_reason, address, size); + } + + #define MESOSPHERE_SET_HW_BREAK_POINT(ID, FLAGS, VALUE) \ + ({ \ + cpu::SetDbgBcr##ID##El1(0); \ + cpu::EnsureInstructionConsistency(); \ + cpu::SetDbgBvr##ID##El1(VALUE); \ + cpu::EnsureInstructionConsistency(); \ + cpu::SetDbgBcr##ID##El1(FLAGS); \ + cpu::EnsureInstructionConsistency(); \ + }) + + #define MESOSPHERE_SET_HW_WATCH_POINT(ID, FLAGS, VALUE) \ + ({ \ + cpu::SetDbgWcr##ID##El1(0); \ + cpu::EnsureInstructionConsistency(); \ + cpu::SetDbgWvr##ID##El1(VALUE); \ + cpu::EnsureInstructionConsistency(); \ + cpu::SetDbgWcr##ID##El1(FLAGS); \ + cpu::EnsureInstructionConsistency(); \ + }) + + Result KDebug::SetHardwareBreakPoint(ams::svc::HardwareBreakPointRegisterName name, u64 flags, u64 value) { + /* Get the debug feature register. */ + cpu::DebugFeatureRegisterAccessor dfr0; + + /* Extract interesting info from the debug feature register. */ + const auto num_bp = dfr0.GetNumBreakpoints(); + const auto num_wp = dfr0.GetNumWatchpoints(); + const auto num_ctx = dfr0.GetNumContextAwareBreakpoints(); + + if (ams::svc::HardwareBreakPointRegisterName_I0 <= name && name <= ams::svc::HardwareBreakPointRegisterName_I15) { + /* Check that the name is a valid instruction breakpoint. */ + R_UNLESS((name - ams::svc::HardwareBreakPointRegisterName_I0) <= num_bp, svc::ResultNotSupported()); + + /* We may be getting the process, so prepare a scoped reference holder. */ + KScopedAutoObject process; + + /* Configure flags/value. */ + if ((flags & 1) != 0) { + /* We're enabling the breakpoint. Check that the flags are allowable. */ + R_UNLESS((flags & ForbiddenBreakPointFlagsMask) == 0, svc::ResultInvalidCombination()); + + /* Require that the breakpoint be linked or match context id. */ + R_UNLESS((flags & ((1ul << 21) | (1ul << 20))) != 0, svc::ResultInvalidCombination()); + + /* If the breakpoint matches context id, we need to get the context id. */ + if ((flags & (1ul << 21)) != 0) { + /* Ensure that the breakpoint is context-aware. */ + R_UNLESS((name - ams::svc::HardwareBreakPointRegisterName_I0) <= (num_bp - num_ctx), svc::ResultNotSupported()); + + /* Check that the breakpoint does not have the mismatch bit. */ + R_UNLESS((flags & (1ul << 22)) == 0, svc::ResultInvalidCombination()); + + /* Get the debug object from the current handle table. */ + KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(static_cast(value)); + R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the process from the debug object. */ + process = debug->GetProcess(); + R_UNLESS(process.IsNotNull(), svc::ResultProcessTerminated()); + + /* Set the value to be the context id. */ + value = process->GetId() & 0xFFFFFFFF; + } + + /* Set the breakpoint as non-secure EL0-only. */ + flags |= (1ul << 14) | (2ul << 1); + } else { + /* We're disabling the breakpoint. */ + flags = 0; + value = 0; + } + + /* Set the breakpoint. */ + switch (name) { + case ams::svc::HardwareBreakPointRegisterName_I0: MESOSPHERE_SET_HW_BREAK_POINT( 0, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I1: MESOSPHERE_SET_HW_BREAK_POINT( 1, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I2: MESOSPHERE_SET_HW_BREAK_POINT( 2, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I3: MESOSPHERE_SET_HW_BREAK_POINT( 3, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I4: MESOSPHERE_SET_HW_BREAK_POINT( 4, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I5: MESOSPHERE_SET_HW_BREAK_POINT( 5, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I6: MESOSPHERE_SET_HW_BREAK_POINT( 6, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I7: MESOSPHERE_SET_HW_BREAK_POINT( 7, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I8: MESOSPHERE_SET_HW_BREAK_POINT( 8, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I9: MESOSPHERE_SET_HW_BREAK_POINT( 9, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I10: MESOSPHERE_SET_HW_BREAK_POINT(10, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I11: MESOSPHERE_SET_HW_BREAK_POINT(11, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I12: MESOSPHERE_SET_HW_BREAK_POINT(12, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I13: MESOSPHERE_SET_HW_BREAK_POINT(13, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I14: MESOSPHERE_SET_HW_BREAK_POINT(14, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_I15: MESOSPHERE_SET_HW_BREAK_POINT(15, flags, value); break; + default: break; + } + } else if (ams::svc::HardwareBreakPointRegisterName_D0 <= name && name <= ams::svc::HardwareBreakPointRegisterName_D15) { + /* Check that the name is a valid data breakpoint. */ + R_UNLESS((name - ams::svc::HardwareBreakPointRegisterName_D0) <= num_wp, svc::ResultNotSupported()); + + /* Configure flags/value. */ + if ((flags & 1) != 0) { + /* We're enabling the watchpoint. Check that the flags are allowable. */ + R_UNLESS((flags & ForbiddenWatchPointFlagsMask) == 0, svc::ResultInvalidCombination()); + + /* Set the breakpoint as linked non-secure EL0-only. */ + flags |= (1ul << 20) | (1ul << 14) | (2ul << 1); + } else { + /* We're disabling the watchpoint. */ + flags = 0; + value = 0; + } + + /* Set the watchkpoint. */ + switch (name) { + case ams::svc::HardwareBreakPointRegisterName_D0: MESOSPHERE_SET_HW_WATCH_POINT( 0, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D1: MESOSPHERE_SET_HW_WATCH_POINT( 1, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D2: MESOSPHERE_SET_HW_WATCH_POINT( 2, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D3: MESOSPHERE_SET_HW_WATCH_POINT( 3, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D4: MESOSPHERE_SET_HW_WATCH_POINT( 4, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D5: MESOSPHERE_SET_HW_WATCH_POINT( 5, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D6: MESOSPHERE_SET_HW_WATCH_POINT( 6, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D7: MESOSPHERE_SET_HW_WATCH_POINT( 7, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D8: MESOSPHERE_SET_HW_WATCH_POINT( 8, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D9: MESOSPHERE_SET_HW_WATCH_POINT( 9, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D10: MESOSPHERE_SET_HW_WATCH_POINT(10, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D11: MESOSPHERE_SET_HW_WATCH_POINT(11, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D12: MESOSPHERE_SET_HW_WATCH_POINT(12, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D13: MESOSPHERE_SET_HW_WATCH_POINT(13, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D14: MESOSPHERE_SET_HW_WATCH_POINT(14, flags, value); break; + case ams::svc::HardwareBreakPointRegisterName_D15: MESOSPHERE_SET_HW_WATCH_POINT(15, flags, value); break; + default: break; + } + } else { + /* Invalid name. */ + return svc::ResultInvalidEnumValue(); + } + + return ResultSuccess(); + } + + #undef MESOSPHERE_SET_HW_WATCH_POINT + #undef MESOSPHERE_SET_HW_BREAK_POINT + +} diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_controller.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_controller.cpp index 5cce0c31f..33fef41f7 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_controller.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_controller.cpp @@ -84,4 +84,114 @@ namespace ams::kern::arch::arm64 { this->gicc = nullptr; } + void KInterruptController::SaveCoreLocal(LocalState *state) const { + /* Save isenabler. */ + for (size_t i = 0; i < util::size(state->isenabler); ++i) { + constexpr size_t Offset = 0; + state->isenabler[i] = this->gicd->isenabler[i + Offset]; + this->gicd->isenabler[i + Offset] = 0xFFFFFFFF; + } + + /* Save ipriorityr. */ + for (size_t i = 0; i < util::size(state->ipriorityr); ++i) { + constexpr size_t Offset = 0; + state->ipriorityr[i] = this->gicd->ipriorityr.words[i + Offset]; + this->gicd->ipriorityr.words[i + Offset] = 0xFFFFFFFF; + } + + /* Save itargetsr. */ + for (size_t i = 0; i < util::size(state->itargetsr); ++i) { + constexpr size_t Offset = 0; + state->itargetsr[i] = this->gicd->itargetsr.words[i + Offset]; + } + + /* Save icfgr. */ + for (size_t i = 0; i < util::size(state->icfgr); ++i) { + constexpr size_t Offset = 0; + state->icfgr[i] = this->gicd->icfgr[i + Offset]; + } + } + + void KInterruptController::SaveGlobal(GlobalState *state) const { + /* Save isenabler. */ + for (size_t i = 0; i < util::size(state->isenabler); ++i) { + constexpr size_t Offset = util::size(LocalState{}.isenabler); + state->isenabler[i] = this->gicd->isenabler[i + Offset]; + this->gicd->isenabler[i + Offset] = 0xFFFFFFFF; + } + + /* Save ipriorityr. */ + for (size_t i = 0; i < util::size(state->ipriorityr); ++i) { + constexpr size_t Offset = util::size(LocalState{}.ipriorityr); + state->ipriorityr[i] = this->gicd->ipriorityr.words[i + Offset]; + this->gicd->ipriorityr.words[i + Offset] = 0xFFFFFFFF; + } + + /* Save itargetsr. */ + for (size_t i = 0; i < util::size(state->itargetsr); ++i) { + constexpr size_t Offset = util::size(LocalState{}.itargetsr); + state->itargetsr[i] = this->gicd->itargetsr.words[i + Offset]; + } + + /* Save icfgr. */ + for (size_t i = 0; i < util::size(state->icfgr); ++i) { + constexpr size_t Offset = util::size(LocalState{}.icfgr); + state->icfgr[i] = this->gicd->icfgr[i + Offset]; + } + } + + void KInterruptController::RestoreCoreLocal(const LocalState *state) const { + /* Restore ipriorityr. */ + for (size_t i = 0; i < util::size(state->ipriorityr); ++i) { + constexpr size_t Offset = 0; + this->gicd->ipriorityr.words[i + Offset] = state->ipriorityr[i]; + } + + /* Restore itargetsr. */ + for (size_t i = 0; i < util::size(state->itargetsr); ++i) { + constexpr size_t Offset = 0; + this->gicd->itargetsr.words[i + Offset] = state->itargetsr[i]; + } + + /* Restore icfgr. */ + for (size_t i = 0; i < util::size(state->icfgr); ++i) { + constexpr size_t Offset = 0; + this->gicd->icfgr[i + Offset] = state->icfgr[i]; + } + + /* Restore isenabler. */ + for (size_t i = 0; i < util::size(state->isenabler); ++i) { + constexpr size_t Offset = 0; + this->gicd->icenabler[i + Offset] = 0xFFFFFFFF; + this->gicd->isenabler[i + Offset] = state->isenabler[i]; + } + } + + void KInterruptController::RestoreGlobal(const GlobalState *state) const { + /* Restore ipriorityr. */ + for (size_t i = 0; i < util::size(state->ipriorityr); ++i) { + constexpr size_t Offset = util::size(LocalState{}.ipriorityr); + this->gicd->ipriorityr.words[i + Offset] = state->ipriorityr[i]; + } + + /* Restore itargetsr. */ + for (size_t i = 0; i < util::size(state->itargetsr); ++i) { + constexpr size_t Offset = util::size(LocalState{}.itargetsr); + this->gicd->itargetsr.words[i + Offset] = state->itargetsr[i]; + } + + /* Restore icfgr. */ + for (size_t i = 0; i < util::size(state->icfgr); ++i) { + constexpr size_t Offset = util::size(LocalState{}.icfgr); + this->gicd->icfgr[i + Offset] = state->icfgr[i]; + } + + /* Restore isenabler. */ + for (size_t i = 0; i < util::size(state->isenabler); ++i) { + constexpr size_t Offset = util::size(LocalState{}.isenabler); + this->gicd->icenabler[i + Offset] = 0xFFFFFFFF; + this->gicd->isenabler[i + Offset] = state->isenabler[i]; + } + } + } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp index 78711d0e1..f1995e103 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp @@ -31,6 +31,81 @@ namespace ams::kern::arch::arm64 { this->interrupt_controller.Finalize(core_id); } + void KInterruptManager::Save(s32 core_id) { + /* Ensure all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* If on core 0, save the global interrupts. */ + if (core_id == 0) { + MESOSPHERE_ABORT_UNLESS(!s_global_state_saved); + this->interrupt_controller.SaveGlobal(std::addressof(s_global_state)); + s_global_state_saved = true; + } + + /* Ensure all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* Save all local interrupts. */ + MESOSPHERE_ABORT_UNLESS(!this->local_state_saved); + this->interrupt_controller.SaveCoreLocal(std::addressof(this->local_state)); + this->local_state_saved = true; + + /* Ensure all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* Finalize all cores other than core 0. */ + if (core_id != 0) { + this->Finalize(core_id); + } + + /* Ensure all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* Finalize core 0. */ + if (core_id == 0) { + this->Finalize(core_id); + } + } + + void KInterruptManager::Restore(s32 core_id) { + /* Ensure all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* Initialize core 0. */ + if (core_id == 0) { + this->Initialize(core_id); + } + + /* Ensure all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* Initialize all cores other than core 0. */ + if (core_id != 0) { + this->Initialize(core_id); + } + + /* Ensure all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* Restore all local interrupts. */ + MESOSPHERE_ASSERT(this->local_state_saved); + this->interrupt_controller.RestoreCoreLocal(std::addressof(this->local_state)); + this->local_state_saved = false; + + /* Ensure all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* If on core 0, restore the global interrupts. */ + if (core_id == 0) { + MESOSPHERE_ASSERT(s_global_state_saved); + this->interrupt_controller.RestoreGlobal(std::addressof(s_global_state)); + s_global_state_saved = false; + } + + /* Ensure all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + } + bool KInterruptManager::OnHandleInterrupt() { /* Get the interrupt id. */ const u32 raw_irq = this->interrupt_controller.GetIrq(); @@ -100,15 +175,15 @@ namespace ams::kern::arch::arm64 { /* If we need scheduling, */ if (needs_scheduling) { - /* Handle any changes needed to the user preemption state. */ - if (user_mode && GetCurrentThread().GetUserPreemptionState() != 0 && GetCurrentProcess().GetPreemptionStatePinnedThread(GetCurrentCoreId()) == nullptr) { + /* If the user disable count is set, we may need to pin the current thread. */ + if (user_mode && GetCurrentThread().GetUserDisableCount() != 0 && GetCurrentProcess().GetPinnedThread(GetCurrentCoreId()) == nullptr) { KScopedSchedulerLock sl; - /* Note the preemption state in process. */ - GetCurrentProcess().SetPreemptionState(); + /* Pin the current thread. */ + KScheduler::PinCurrentThread(GetCurrentProcessPointer()); - /* Set the kernel preemption state flag. */ - GetCurrentThread().SetKernelPreemptionState(1);; + /* Set the interrupt flag for the thread. */ + GetCurrentThread().SetInterruptFlag(); /* Request interrupt scheduling. */ Kernel::GetScheduler().RequestScheduleOnInterrupt(); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp index 2922328dc..18e89bfad 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp @@ -172,7 +172,7 @@ namespace ams::kern::arch::arm64 { const KVirtualAddress page = this->manager->Allocate(); MESOSPHERE_ASSERT(page != Null); cpu::ClearPageToZero(GetVoidPointer(page)); - this->ttbr = GetInteger(KPageTableBase::GetLinearPhysicalAddress(page)) | asid_tag; + this->ttbr = GetInteger(KPageTableBase::GetLinearMappedPhysicalAddress(page)) | asid_tag; /* Initialize the base page table. */ MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end)); @@ -212,7 +212,103 @@ namespace ams::kern::arch::arm64 { } Result KPageTable::Finalize() { - MESOSPHERE_UNIMPLEMENTED(); + /* Only process tables should be finalized. */ + MESOSPHERE_ASSERT(!this->IsKernel()); + + /* Note that we've updated (to ensure we're synchronized). */ + this->NoteUpdated(); + + /* Free all pages in the table. */ + { + /* Get implementation objects. */ + auto &impl = this->GetImpl(); + auto &mm = Kernel::GetMemoryManager(); + + /* Traverse, freeing all pages. */ + { + /* Get the address space size. */ + const size_t as_size = this->GetAddressSpaceSize(); + + /* Begin the traversal. */ + TraversalContext context; + TraversalEntry cur_entry = {}; + bool cur_valid = false; + TraversalEntry next_entry; + bool next_valid; + size_t tot_size = 0; + + next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), this->GetAddressSpaceStart()); + + /* Iterate over entries. */ + while (true) { + if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { + cur_entry.block_size += next_entry.block_size; + } else { + if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) { + mm.Close(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize); + } + + /* Update tracking variables. */ + tot_size += cur_entry.block_size; + cur_entry = next_entry; + cur_valid = next_valid; + } + + if (cur_entry.block_size + tot_size >= as_size) { + break; + } + + next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + } + + /* Handle the last block. */ + if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) { + mm.Close(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize); + } + } + + /* Cache address space extents for convenience. */ + const KProcessAddress as_start = this->GetAddressSpaceStart(); + const KProcessAddress as_last = as_start + this->GetAddressSpaceSize() - 1; + + /* Free all L3 tables. */ + for (KProcessAddress cur_address = as_start; cur_address <= as_last; cur_address += L2BlockSize) { + L1PageTableEntry *l1_entry = impl.GetL1Entry(cur_address); + if (l1_entry->IsTable()) { + L2PageTableEntry *l2_entry = impl.GetL2Entry(l1_entry, cur_address); + if (l2_entry->IsTable()) { + KVirtualAddress l3_table = GetPageTableVirtualAddress(l2_entry->GetTable()); + if (this->GetPageTableManager().IsInPageTableHeap(l3_table)) { + while (!this->GetPageTableManager().Close(l3_table, 1)) { /* ... */ } + this->GetPageTableManager().Free(l3_table); + } + } + } + } + + /* Free all L2 tables. */ + for (KProcessAddress cur_address = as_start; cur_address <= as_last; cur_address += L1BlockSize) { + L1PageTableEntry *l1_entry = impl.GetL1Entry(cur_address); + if (l1_entry->IsTable()) { + KVirtualAddress l2_table = GetPageTableVirtualAddress(l1_entry->GetTable()); + if (this->GetPageTableManager().IsInPageTableHeap(l2_table)) { + while (!this->GetPageTableManager().Close(l2_table, 1)) { /* ... */ } + this->GetPageTableManager().Free(l2_table); + } + } + } + + /* Free the L1 table. */ + this->GetPageTableManager().Free(reinterpret_cast(impl.Finalize())); + + /* Perform inherited finalization. */ + KPageTableBase::Finalize(); + } + + /* Release our asid. */ + g_asid_manager.Release(this->asid); + + return ResultSuccess(); } Result KPageTable::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) { @@ -262,7 +358,82 @@ namespace ams::kern::arch::arm64 { } } - Result KPageTable::Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + Result KPageTable::MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), L1BlockSize)); + MESOSPHERE_ASSERT(util::IsAligned(num_pages * PageSize, L1BlockSize)); + + auto &impl = this->GetImpl(); + + /* Iterate, mapping each block. */ + for (size_t i = 0; i < num_pages; i += L1BlockSize / PageSize) { + /* Map the block. */ + *impl.GetL1Entry(virt_addr) = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), false); + virt_addr += L1BlockSize; + phys_addr += L1BlockSize; + } + + return ResultSuccess(); + } + + Result KPageTable::MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), L2BlockSize)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), L2BlockSize)); + MESOSPHERE_ASSERT(util::IsAligned(num_pages * PageSize, L2BlockSize)); + + auto &impl = this->GetImpl(); + KVirtualAddress l2_virt = Null; + int l2_open_count = 0; + + /* Iterate, mapping each block. */ + for (size_t i = 0; i < num_pages; i += L2BlockSize / PageSize) { + KPhysicalAddress l2_phys = Null; + + /* If we have no L2 table, we should get or allocate one. */ + if (l2_virt == Null) { + if (L1PageTableEntry *l1_entry = impl.GetL1Entry(virt_addr); !l1_entry->GetTable(l2_phys)) { + /* Allocate table. */ + l2_virt = AllocatePageTable(page_list, reuse_ll); + R_UNLESS(l2_virt != Null, svc::ResultOutOfResource()); + + /* Set the entry. */ + l2_phys = GetPageTablePhysicalAddress(l2_virt); + PteDataSynchronizationBarrier(); + *l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, l2_phys, this->IsKernel(), true); + PteDataSynchronizationBarrier(); + } else { + l2_virt = GetPageTableVirtualAddress(l2_phys); + } + } + MESOSPHERE_ASSERT(l2_virt != Null); + + /* Map the block. */ + *impl.GetL2EntryFromTable(l2_virt, virt_addr) = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), false); + l2_open_count++; + virt_addr += L2BlockSize; + phys_addr += L2BlockSize; + + /* Account for hitting end of table. */ + if (util::IsAligned(GetInteger(virt_addr), L1BlockSize)) { + if (this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { + this->GetPageTableManager().Open(l2_virt, l2_open_count); + } + l2_virt = Null; + l2_open_count = 0; + } + } + + /* Perform any remaining opens. */ + if (l2_open_count > 0 && this->GetPageTableManager().IsInPageTableHeap(l2_virt)) { + this->GetPageTableManager().Open(l2_virt, l2_open_count); + } + + return ResultSuccess(); + } + + Result KPageTable::MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, PageLinkedList *page_list, bool reuse_ll) { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); @@ -401,7 +572,8 @@ namespace ams::kern::arch::arm64 { MESOSPHERE_ABORT_UNLESS(force); const size_t cur_size = std::min(next_entry.block_size - (GetInteger(virt_addr) & (next_entry.block_size - 1)), remaining_pages * PageSize); remaining_pages -= cur_size / PageSize; - virt_addr += cur_size; + virt_addr += cur_size; + next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); continue; } @@ -548,7 +720,7 @@ namespace ams::kern::arch::arm64 { size_t alignment; for (alignment = ContiguousPageSize; (virt_addr & (alignment - 1)) == (phys_addr & (alignment - 1)); alignment = GetLargerAlignment(alignment)) { /* Check if this would be our last map. */ - const size_t pages_to_map = (alignment - (virt_addr & (alignment - 1))) & (alignment - 1); + const size_t pages_to_map = ((alignment - (virt_addr & (alignment - 1))) & (alignment - 1)) / PageSize; if (pages_to_map + (alignment / PageSize) > remaining_pages) { break; } @@ -619,7 +791,7 @@ namespace ams::kern::arch::arm64 { if (num_pages < ContiguousPageSize / PageSize) { for (const auto &block : pg) { - const KPhysicalAddress block_phys_addr = GetLinearPhysicalAddress(block.GetAddress()); + const KPhysicalAddress block_phys_addr = GetLinearMappedPhysicalAddress(block.GetAddress()); const size_t cur_pages = block.GetNumPages(); R_TRY(this->Map(virt_addr, block_phys_addr, cur_pages, entry_template, L3BlockSize, page_list, reuse_ll)); @@ -631,7 +803,7 @@ namespace ams::kern::arch::arm64 { AlignedMemoryBlock virt_block(GetInteger(virt_addr), num_pages, L1BlockSize); for (const auto &block : pg) { /* Create a block representing this physical group, synchronize its alignment to our virtual block. */ - const KPhysicalAddress block_phys_addr = GetLinearPhysicalAddress(block.GetAddress()); + const KPhysicalAddress block_phys_addr = GetLinearMappedPhysicalAddress(block.GetAddress()); size_t cur_pages = block.GetNumPages(); AlignedMemoryBlock phys_block(GetInteger(block_phys_addr), cur_pages, virt_block.GetAlignment()); @@ -858,7 +1030,7 @@ namespace ams::kern::arch::arm64 { } /* Open references to the L2 table. */ - Kernel::GetPageTableManager().Open(l2_table, L1BlockSize / L2BlockSize); + this->GetPageTableManager().Open(l2_table, L1BlockSize / L2BlockSize); /* Replace the L1 entry with one to the new table. */ PteDataSynchronizationBarrier(); @@ -905,7 +1077,7 @@ namespace ams::kern::arch::arm64 { } /* Open references to the L3 table. */ - Kernel::GetPageTableManager().Open(l3_table, L2BlockSize / L3BlockSize); + this->GetPageTableManager().Open(l3_table, L2BlockSize / L3BlockSize); /* Replace the L2 entry with one to the new table. */ PteDataSynchronizationBarrier(); diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index e232942ee..1ab6f0e91 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -292,4 +292,134 @@ namespace ams::kern::arch::arm64 { return false; } + void KPageTableImpl::Dump(uintptr_t start, size_t size) const { + /* If zero size, there's nothing to dump. */ + if (size == 0) { + return; + } + + /* Define extents. */ + const uintptr_t end = start + size; + const uintptr_t last = end - 1; + + MESOSPHERE_LOG("==== PAGE TABLE DUMP START (%012lx - %012lx) ====\n", start, last); + ON_SCOPE_EXIT { MESOSPHERE_LOG("==== PAGE TABLE DUMP END ====\n"); }; + + /* Define tracking variables. */ + bool unmapped = false; + uintptr_t unmapped_start = 0; + + /* Walk the table. */ + uintptr_t cur = start; + while (cur < end) { + /* Validate that we can read the actual entry. */ + const size_t l0_index = GetL0Index(cur); + const size_t l1_index = GetL1Index(cur); + if (this->is_kernel) { + /* Kernel entries must be accessed via TTBR1. */ + if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - this->num_entries)) { + return; + } + } else { + /* User entries must be accessed with TTBR0. */ + if ((l0_index != 0) || l1_index >= this->num_entries) { + return; + } + } + + /* Try to get from l1 table. */ + const L1PageTableEntry *l1_entry = this->GetL1Entry(cur); + if (l1_entry->IsBlock()) { + /* Update. */ + cur = util::AlignDown(cur, L1BlockSize); + if (unmapped) { + unmapped = false; + MESOSPHERE_LOG("%012lx - %012lx: ---\n", unmapped_start, cur - 1); + } + + /* Print. */ + MESOSPHERE_LOG("%012lx: %016lx\n", cur, *reinterpret_cast(l1_entry)); + + /* Advance. */ + cur += L1BlockSize; + continue; + } else if (!l1_entry->IsTable()) { + /* Update. */ + cur = util::AlignDown(cur, L1BlockSize); + if (!unmapped) { + unmapped_start = cur; + unmapped = true; + } + + /* Advance. */ + cur += L1BlockSize; + continue; + } + + /* Try to get from l2 table. */ + const L2PageTableEntry *l2_entry = this->GetL2Entry(l1_entry, cur); + if (l2_entry->IsBlock()) { + /* Update. */ + cur = util::AlignDown(cur, L2BlockSize); + if (unmapped) { + unmapped = false; + MESOSPHERE_LOG("%012lx - %012lx: ---\n", unmapped_start, cur - 1); + } + + /* Print. */ + MESOSPHERE_LOG("%012lx: %016lx\n", cur, *reinterpret_cast(l2_entry)); + + /* Advance. */ + cur += L2BlockSize; + continue; + } else if (!l2_entry->IsTable()) { + /* Update. */ + cur = util::AlignDown(cur, L2BlockSize); + if (!unmapped) { + unmapped_start = cur; + unmapped = true; + } + + /* Advance. */ + cur += L2BlockSize; + continue; + } + + /* Try to get from l3 table. */ + const L3PageTableEntry *l3_entry = this->GetL3Entry(l2_entry, cur); + if (l3_entry->IsBlock()) { + /* Update. */ + cur = util::AlignDown(cur, L3BlockSize); + if (unmapped) { + unmapped = false; + MESOSPHERE_LOG("%012lx - %012lx: ---\n", unmapped_start, cur - 1); + } + + /* Print. */ + MESOSPHERE_LOG("%012lx: %016lx\n", cur, *reinterpret_cast(l3_entry)); + + /* Advance. */ + cur += L3BlockSize; + continue; + } else { + /* Update. */ + cur = util::AlignDown(cur, L3BlockSize); + if (!unmapped) { + unmapped_start = cur; + unmapped = true; + } + + /* Advance. */ + cur += L3BlockSize; + continue; + } + } + + /* Print the last unmapped range if necessary. */ + if (unmapped) { + MESOSPHERE_LOG("%012lx - %012lx: ---\n", unmapped_start, last); + } + } + + } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp index 846766e5d..17d808e64 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp @@ -19,7 +19,7 @@ namespace ams::kern::arch::arm64 { void KSupervisorPageTable::Initialize(s32 core_id) { /* Get the identity mapping ttbr0. */ - this->ttbr0[core_id] = cpu::GetTtbr0El1(); + this->ttbr0_identity[core_id] = cpu::GetTtbr0El1(); /* Set sctlr_el1 */ cpu::SystemControlRegisterAccessor().SetWxn(true).Store(); @@ -34,12 +34,9 @@ namespace ams::kern::arch::arm64 { const u64 ttbr1 = cpu::GetTtbr1El1() & 0xFFFFFFFFFFFFul; const u64 kernel_vaddr_start = 0xFFFFFF8000000000ul; const u64 kernel_vaddr_end = 0xFFFFFFFFFFE00000ul; - void *table = GetVoidPointer(KPageTableBase::GetLinearVirtualAddress(ttbr1)); + void *table = GetVoidPointer(KPageTableBase::GetLinearMappedVirtualAddress(ttbr1)); this->page_table.InitializeForKernel(table, kernel_vaddr_start, kernel_vaddr_end); } } - void KSupervisorPageTable::Finalize(s32 core_id) { - MESOSPHERE_UNIMPLEMENTED(); - } } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp index 67a18aee3..90ad97c8a 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp @@ -26,7 +26,7 @@ namespace ams::kern::arch::arm64 { /* Send KDebug event for this thread's creation. */ { KScopedInterruptEnable ei; - /* TODO */ + KDebug::OnDebugEvent(ams::svc::DebugEvent_CreateThread, GetCurrentThread().GetId(), GetInteger(GetCurrentThread().GetThreadLocalRegionAddress()), GetCurrentThread().GetEntrypoint()); } /* Handle any pending dpc. */ @@ -40,6 +40,8 @@ namespace ams::kern::arch::arm64 { namespace { + constexpr inline u32 El0PsrMask = 0xFF0FFE20; + ALWAYS_INLINE bool IsFpuEnabled() { return cpu::ArchitecturalFeatureAccessControlRegisterAccessor().IsFpEnabled(); } @@ -61,16 +63,17 @@ namespace ams::kern::arch::arm64 { std::memset(ctx, 0, sizeof(*ctx)); /* Set PC and argument. */ - ctx->pc = GetInteger(pc); + ctx->pc = GetInteger(pc) & ~(UINT64_C(1)); ctx->x[0] = arg; /* Set PSR. */ if (is_64_bit) { ctx->psr = 0; } else { - constexpr u64 PsrArmValue = 0x20; - constexpr u64 PsrThumbValue = 0x00; + constexpr u64 PsrArmValue = 0x00; + constexpr u64 PsrThumbValue = 0x20; ctx->psr = ((pc & 1) == 0 ? PsrArmValue : PsrThumbValue) | (0x10); + MESOSPHERE_LOG("Creating User 32-Thread, %016lx\n", GetInteger(pc)); } /* Set stack pointer. */ @@ -173,4 +176,112 @@ namespace ams::kern::arch::arm64 { } } + void KThreadContext::CloneFpuStatus() { + u64 pcr, psr; + cpu::InstructionMemoryBarrier(); + if (IsFpuEnabled()) { + __asm__ __volatile__("mrs %[pcr], fpcr" : [pcr]"=r"(pcr) :: "memory"); + __asm__ __volatile__("mrs %[psr], fpsr" : [psr]"=r"(psr) :: "memory"); + } else { + pcr = GetCurrentThread().GetContext().GetFpcr(); + psr = GetCurrentThread().GetContext().GetFpsr(); + } + + this->SetFpcr(pcr); + this->SetFpsr(psr); + } + + void KThreadContext::SetFpuRegisters(const u128 *v, bool is_64_bit) { + if (is_64_bit) { + for (size_t i = 0; i < KThreadContext::NumFpuRegisters; ++i) { + this->fpu_registers[i] = v[i]; + } + } else { + for (size_t i = 0; i < KThreadContext::NumFpuRegisters / 2; ++i) { + this->fpu_registers[i] = v[i]; + } + } + } + + void GetUserContext(ams::svc::ThreadContext *out, const KThread *thread) { + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + MESOSPHERE_ASSERT(thread->IsSuspended()); + MESOSPHERE_ASSERT(thread->GetOwnerProcess() != nullptr); + + /* Get the contexts. */ + const KExceptionContext *e_ctx = GetExceptionContext(thread); + const KThreadContext *t_ctx = std::addressof(thread->GetContext()); + + if (thread->GetOwnerProcess()->Is64Bit()) { + /* Set special registers. */ + out->fp = e_ctx->x[29]; + out->lr = e_ctx->x[30]; + out->sp = e_ctx->sp; + out->pc = e_ctx->pc; + out->pstate = e_ctx->psr & El0PsrMask; + + /* Get the thread's general purpose registers. */ + if (thread->IsCallingSvc()) { + for (size_t i = 19; i < 29; ++i) { + out->r[i] = e_ctx->x[i]; + } + if (e_ctx->write == 0) { + out->pc -= sizeof(u32); + } + } else { + for (size_t i = 0; i < 29; ++i) { + out->r[i] = e_ctx->x[i]; + } + } + + /* Copy tpidr. */ + out->tpidr = e_ctx->tpidr; + + /* Copy fpu registers. */ + static_assert(util::size(ams::svc::ThreadContext{}.v) == KThreadContext::NumFpuRegisters); + const u128 *f = t_ctx->GetFpuRegisters(); + for (size_t i = 0; i < KThreadContext::NumFpuRegisters; ++i) { + out->v[i] = f[i]; + } + } else { + /* Set special registers. */ + out->pc = static_cast(e_ctx->pc); + out->pstate = e_ctx->psr & 0xFF0FFE20; + + /* Get the thread's general purpose registers. */ + for (size_t i = 0; i < 15; ++i) { + out->r[i] = static_cast(e_ctx->x[i]); + } + + /* Adjust PC, if the thread is calling svc. */ + if (thread->IsCallingSvc()) { + if (e_ctx->write == 0) { + /* Adjust by 2 if thumb mode, 4 if arm mode. */ + out->pc -= ((e_ctx->psr & 0x20) == 0) ? sizeof(u32) : sizeof(u16); + } + } + + /* Copy tpidr. */ + out->tpidr = static_cast(e_ctx->tpidr); + + /* Copy fpu registers. */ + static_assert(util::size(ams::svc::ThreadContext{}.v) == KThreadContext::NumFpuRegisters); + const u128 *f = t_ctx->GetFpuRegisters(); + for (size_t i = 0; i < KThreadContext::NumFpuRegisters / 2; ++i) { + out->v[i] = f[i]; + } + for (size_t i = KThreadContext::NumFpuRegisters / 2; i < KThreadContext::NumFpuRegisters; ++i) { + out->v[i] = 0; + } + } + + /* Copy fpcr/fpsr. */ + out->fpcr = t_ctx->GetFpcr(); + out->fpsr = t_ctx->GetFpsr(); + } + + void KThreadContext::OnThreadTerminating(const KThread *thread) { + /* ... */ + } + } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s b/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s index d210466c9..ebdec0c46 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s +++ b/libraries/libmesosphere/source/arch/arm64/kern_userspace_memory_access_asm.s @@ -428,6 +428,95 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess20ClearMemorySize32BitEPv: mov x0, #1 ret +/* ams::kern::arch::arm64::UserspaceAccess::UpdateLockAtomic(u32 *out, u32 *address, u32 if_zero, u32 new_orr_mask) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess16UpdateLockAtomicEPjS4_jj, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess16UpdateLockAtomicEPjS4_jj +.type _ZN3ams4kern4arch5arm6415UserspaceAccess16UpdateLockAtomicEPjS4_jj, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess16UpdateLockAtomicEPjS4_jj: + /* Load the value from the address. */ + ldaxr w4, [x1] + + /* Orr in the new mask. */ + orr w5, w4, w3 + + /* If the value is zero, use the if_zero value, otherwise use the newly orr'd value. */ + cmp w4, wzr + csel w5, w2, w5, eq + + /* Try to store. */ + stlxr w6, w5, [x1] + + /* If we failed to store, try again. */ + cbnz w6, _ZN3ams4kern4arch5arm6415UserspaceAccess16UpdateLockAtomicEPjS4_jj + + /* We're done. */ + str w4, [x0] + mov x0, #1 + ret + + +/* ams::kern::arch::arm64::UserspaceAccess::UpdateIfEqualAtomic(s32 *out, s32 *address, s32 compare_value, s32 new_value) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess19UpdateIfEqualAtomicEPiS4_ii, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess19UpdateIfEqualAtomicEPiS4_ii +.type _ZN3ams4kern4arch5arm6415UserspaceAccess19UpdateIfEqualAtomicEPiS4_ii, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess19UpdateIfEqualAtomicEPiS4_ii: + /* Load the value from the address. */ + ldaxr w4, [x1] + + /* Compare it to the desired one. */ + cmp w4, w2 + + /* If equal, we want to try to write the new value. */ + b.eq 1f + + /* Otherwise, clear our exclusive hold and finish. */ + clrex + b 2f + +1: /* Try to store. */ + stlxr w5, w3, [x1] + + /* If we failed to store, try again. */ + cbnz w5, _ZN3ams4kern4arch5arm6415UserspaceAccess19UpdateIfEqualAtomicEPiS4_ii + +2: /* We're done. */ + str w4, [x0] + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::DecrementIfLessThanAtomic(s32 *out, s32 *address, s32 compare) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess25DecrementIfLessThanAtomicEPiS4_i, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess25DecrementIfLessThanAtomicEPiS4_i +.type _ZN3ams4kern4arch5arm6415UserspaceAccess25DecrementIfLessThanAtomicEPiS4_i, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess25DecrementIfLessThanAtomicEPiS4_i: + /* Load the value from the address. */ + ldaxr w3, [x1] + + /* Compare it to the desired one. */ + cmp w3, w2 + + /* If less than, we want to try to decrement. */ + b.lt 1f + + /* Otherwise, clear our exclusive hold and finish. */ + clrex + b 2f + +1: /* Decrement and try to store. */ + sub w4, w3, #1 + stlxr w5, w4, [x1] + + /* If we failed to store, try again. */ + cbnz w5, _ZN3ams4kern4arch5arm6415UserspaceAccess25DecrementIfLessThanAtomicEPiS4_i + +2: /* We're done. */ + str w3, [x0] + mov x0, #1 + ret + /* ams::kern::arch::arm64::UserspaceAccess::StoreDataCache(uintptr_t start, uintptr_t end) */ .section .text._ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm, "ax", %progbits .global _ZN3ams4kern4arch5arm6415UserspaceAccess14StoreDataCacheEmm @@ -508,6 +597,276 @@ _ZN3ams4kern4arch5arm6415UserspaceAccess26InvalidateInstructionCacheEmm: mov x0, #1 ret +/* ams::kern::arch::arm64::UserspaceAccess::ReadIoMemory32Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory32BitEPvPKvm: + /* Check if we have any work to do. */ + cmp x2, #0 + b.eq 3f + + /* Save variables in temporary registers. */ + mov x4, x0 + mov x5, x1 + mov x6, x2 + add x7, x5, x6 + + /* Save our return address. */ + mov x8, x30 + +1: /* Set our return address so that on read failure we continue as though we read -1. */ + adr x30, 4f + + /* Read the word from io. */ + ldtr w9, [x5] + dsb sy + nop + +2: /* Restore our return address. */ + mov x30, x8 + + /* Write the value we read. */ + sttr w9, [x4] + + /* Advance. */ + add x4, x4, #4 + add x5, x5, #4 + cmp x5, x7 + b.ne 1b + +3: /* We're done! */ + mov x0, #1 + ret + +4: /* We failed to read a value, so continue as though we read -1. */ + mov w9, #0xFFFFFFFF + b 2b + +/* ams::kern::arch::arm64::UserspaceAccess::ReadIoMemory16Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory16BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory16BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory16BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess17ReadIoMemory16BitEPvPKvm: + /* Check if we have any work to do. */ + cmp x2, #0 + b.eq 3f + + /* Save variables in temporary registers. */ + mov x4, x0 + mov x5, x1 + mov x6, x2 + add x7, x5, x6 + + /* Save our return address. */ + mov x8, x30 + +1: /* Set our return address so that on read failure we continue as though we read -1. */ + adr x30, 4f + + /* Read the word from io. */ + ldtrh w9, [x5] + dsb sy + nop + +2: /* Restore our return address. */ + mov x30, x8 + + /* Write the value we read. */ + sttrh w9, [x4] + + /* Advance. */ + add x4, x4, #2 + add x5, x5, #2 + cmp x5, x7 + b.ne 1b + +3: /* We're done! */ + mov x0, #1 + ret + +4: /* We failed to read a value, so continue as though we read -1. */ + mov w9, #0xFFFFFFFF + b 2b + +/* ams::kern::arch::arm64::UserspaceAccess::ReadIoMemory8Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess16ReadIoMemory8BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess16ReadIoMemory8BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess16ReadIoMemory8BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess16ReadIoMemory8BitEPvPKvm: + /* Check if we have any work to do. */ + cmp x2, #0 + b.eq 3f + + /* Save variables in temporary registers. */ + mov x4, x0 + mov x5, x1 + mov x6, x2 + add x7, x5, x6 + + /* Save our return address. */ + mov x8, x30 + +1: /* Set our return address so that on read failure we continue as though we read -1. */ + adr x30, 4f + + /* Read the word from io. */ + ldtrb w9, [x5] + dsb sy + nop + +2: /* Restore our return address. */ + mov x30, x8 + + /* Write the value we read. */ + sttrb w9, [x4] + + /* Advance. */ + add x4, x4, #1 + add x5, x5, #1 + cmp x5, x7 + b.ne 1b + +3: /* We're done! */ + mov x0, #1 + ret + +4: /* We failed to read a value, so continue as though we read -1. */ + mov w9, #0xFFFFFFFF + b 2b + +/* ams::kern::arch::arm64::UserspaceAccess::WriteIoMemory32Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory32BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory32BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory32BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory32BitEPvPKvm: + /* Check if we have any work to do. */ + cmp x2, #0 + b.eq 3f + + /* Save variables in temporary registers. */ + mov x4, x0 + mov x5, x1 + mov x6, x2 + add x7, x5, x6 + + /* Save our return address. */ + mov x8, x30 + +1: /* Read the word from normal memory. */ + mov x30, x8 + ldtr w9, [x5] + + /* Set our return address so that on read failure we continue. */ + adr x30, 2f + + /* Write the word to io. */ + sttr w9, [x5] + dsb sy + +2: /* Continue. */ + nop + + /* Advance. */ + add x4, x4, #4 + add x5, x5, #4 + cmp x5, x7 + b.ne 1b + +3: /* We're done! */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::WriteIoMemory16Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory16BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory16BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory16BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess18WriteIoMemory16BitEPvPKvm: + /* Check if we have any work to do. */ + cmp x2, #0 + b.eq 3f + + /* Save variables in temporary registers. */ + mov x4, x0 + mov x5, x1 + mov x6, x2 + add x7, x5, x6 + + /* Save our return address. */ + mov x8, x30 + +1: /* Read the word from normal memory. */ + mov x30, x8 + ldtrh w9, [x5] + + /* Set our return address so that on read failure we continue. */ + adr x30, 2f + + /* Write the word to io. */ + sttrh w9, [x5] + dsb sy + +2: /* Continue. */ + nop + + /* Advance. */ + add x4, x4, #2 + add x5, x5, #2 + cmp x5, x7 + b.ne 1b + +3: /* We're done! */ + mov x0, #1 + ret + +/* ams::kern::arch::arm64::UserspaceAccess::WriteIoMemory8Bit(void *dst, const void *src, size_t size) */ +.section .text._ZN3ams4kern4arch5arm6415UserspaceAccess17WriteIoMemory8BitEPvPKvm, "ax", %progbits +.global _ZN3ams4kern4arch5arm6415UserspaceAccess17WriteIoMemory8BitEPvPKvm +.type _ZN3ams4kern4arch5arm6415UserspaceAccess17WriteIoMemory8BitEPvPKvm, %function +.balign 0x10 +_ZN3ams4kern4arch5arm6415UserspaceAccess17WriteIoMemory8BitEPvPKvm: + /* Check if we have any work to do. */ + cmp x2, #0 + b.eq 3f + + /* Save variables in temporary registers. */ + mov x4, x0 + mov x5, x1 + mov x6, x2 + add x7, x5, x6 + + /* Save our return address. */ + mov x8, x30 + +1: /* Read the word from normal memory. */ + mov x30, x8 + ldtrb w9, [x5] + + /* Set our return address so that on read failure we continue. */ + adr x30, 2f + + /* Write the word to io. */ + sttrb w9, [x5] + dsb sy + +2: /* Continue. */ + nop + + /* Advance. */ + add x4, x4, #1 + add x5, x5, #1 + cmp x5, x7 + b.ne 1b + +3: /* We're done! */ + mov x0, #1 + ret + /* ================ All Userspace Access Functions before this line. ================ */ /* ams::kern::arch::arm64::UserspaceAccessFunctionAreaEnd() */ diff --git a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_call_secure_monitor_asm.s b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_call_secure_monitor_asm.s new file mode 100644 index 000000000..dea5890b2 --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_call_secure_monitor_asm.s @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* ams::kern::svc::CallCallSecureMonitor64From32() */ +.section .text._ZN3ams4kern3svc29CallCallSecureMonitor64From32Ev, "ax", %progbits +.global _ZN3ams4kern3svc29CallCallSecureMonitor64From32Ev +.type _ZN3ams4kern3svc29CallCallSecureMonitor64From32Ev, %function +_ZN3ams4kern3svc29CallCallSecureMonitor64From32Ev: + /* Secure Monitor 64-from-32 ABI is not supported. */ + mov x0, xzr + mov x1, xzr + mov x2, xzr + mov x3, xzr + mov x4, xzr + mov x5, xzr + mov x6, xzr + mov x7, xzr + + ret \ No newline at end of file diff --git a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_exception_asm.s b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_exception_asm.s new file mode 100644 index 000000000..c3e2a74d5 --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_exception_asm.s @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* ams::kern::svc::CallReturnFromException64(Result result) */ +.section .text._ZN3ams4kern3svc25CallReturnFromException64Ev, "ax", %progbits +.global _ZN3ams4kern3svc25CallReturnFromException64Ev +.type _ZN3ams4kern3svc25CallReturnFromException64Ev, %function +_ZN3ams4kern3svc25CallReturnFromException64Ev: + /* Save registers the SVC entry handler didn't. */ + stp x12, x13, [sp, #(8 * 12)] + stp x14, x15, [sp, #(8 * 14)] + stp x16, x17, [sp, #(8 * 16)] + str x19, [sp, #(8 * 19)] + stp x20, x21, [sp, #(8 * 20)] + stp x22, x23, [sp, #(8 * 22)] + stp x24, x25, [sp, #(8 * 24)] + stp x26, x26, [sp, #(8 * 26)] + stp x28, x29, [sp, #(8 * 28)] + + /* Call ams::kern::arch::arm64::ReturnFromException(result). */ + bl _ZN3ams4kern4arch5arm6419ReturnFromExceptionENS_6ResultE + +0: /* We should never reach this point. */ + b 0b + +/* ams::kern::svc::CallReturnFromException64From32(Result result) */ +.section .text._ZN3ams4kern3svc31CallReturnFromException64From32Ev, "ax", %progbits +.global _ZN3ams4kern3svc31CallReturnFromException64From32Ev +.type _ZN3ams4kern3svc31CallReturnFromException64From32Ev, %function +_ZN3ams4kern3svc31CallReturnFromException64From32Ev: + /* Save registers the SVC entry handler didn't. */ + /* ... */ + + /* Call ams::kern::arch::arm64::ReturnFromException(result). */ + bl _ZN3ams4kern4arch5arm6419ReturnFromExceptionENS_6ResultE + +0: /* We should never reach this point. */ + b 0b + + +/* ams::kern::svc::RestoreContext(uintptr_t sp) */ +.section .text._ZN3ams4kern3svc14RestoreContextEm, "ax", %progbits +.global _ZN3ams4kern3svc14RestoreContextEm +.type _ZN3ams4kern3svc14RestoreContextEm, %function +_ZN3ams4kern3svc14RestoreContextEm: + /* Set the stack pointer, set daif. */ + mov sp, x0 + msr daifset, #2 + +0: /* We should handle DPC. */ + /* Check the dpc flags. */ + ldrb w8, [sp, #(0x120 + 0x10)] + cbz w8, 1f + + /* We have DPC to do! */ + /* Save registers and call ams::kern::KDpcManager::HandleDpc(). */ + sub sp, sp, #0x40 + stp x0, x1, [sp, #(8 * 0)] + stp x2, x3, [sp, #(8 * 2)] + stp x4, x5, [sp, #(8 * 4)] + stp x6, x7, [sp, #(8 * 6)] + bl _ZN3ams4kern11KDpcManager9HandleDpcEv + ldp x0, x1, [sp, #(8 * 0)] + ldp x2, x3, [sp, #(8 * 2)] + ldp x4, x5, [sp, #(8 * 4)] + ldp x6, x7, [sp, #(8 * 6)] + add sp, sp, #0x40 + b 0b + +1: /* We're done with DPC, and should return from the svc. */ + /* Clear our in-SVC note. */ + strb wzr, [sp, #(0x120 + 0x12)] + + /* Restore registers. */ + ldp x30, x8, [sp, #(8 * 30)] + ldp x9, x10, [sp, #(8 * 32)] + ldr x11, [sp, #(8 * 34)] + msr sp_el0, x8 + msr elr_el1, x9 + msr spsr_el1, x10 + msr tpidr_el0, x11 + ldp x0, x1, [sp, #(8 * 0)] + ldp x2, x3, [sp, #(8 * 2)] + ldp x4, x5, [sp, #(8 * 4)] + ldp x6, x7, [sp, #(8 * 6)] + ldp x8, x9, [sp, #(8 * 8)] + ldp x10, x11, [sp, #(8 * 10)] + ldp x12, x13, [sp, #(8 * 12)] + ldp x14, x15, [sp, #(8 * 14)] + ldp x16, x17, [sp, #(8 * 16)] + ldp x18, x19, [sp, #(8 * 18)] + ldp x20, x21, [sp, #(8 * 20)] + ldp x22, x23, [sp, #(8 * 22)] + ldp x24, x25, [sp, #(8 * 24)] + ldp x26, x27, [sp, #(8 * 26)] + ldp x28, x29, [sp, #(8 * 28)] + + /* Return. */ + add sp, sp, #0x120 + eret diff --git a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_handlers_asm.s b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_handlers_asm.s index b3106e0b2..39d8eda7e 100644 --- a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_handlers_asm.s +++ b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_handlers_asm.s @@ -31,6 +31,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev: mrs x9, elr_el1 mrs x10, spsr_el1 mrs x11, tpidr_el0 + mrs x18, tpidr_el1 /* Save callee-saved registers. */ stp x19, x20, [sp, #(8 * 19)] @@ -63,8 +64,8 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev: tst x10, #1 b.eq 3f - /* Check if our preemption state allows us to call SVCs. */ - mrs x10, tpidrro_el0 + /* Check if our disable count allows us to call SVCs. */ + ldr x10, [x18, #0x30] ldrh w10, [x10, #0x100] cbz w10, 1f @@ -83,7 +84,6 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev: strb w8, [sp, #(0x120 + 0x11)] /* Invoke the SVC handler. */ - mrs x18, tpidr_el1 msr daifclr, #2 blr x11 msr daifset, #2 @@ -211,6 +211,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev: mrs x17, elr_el1 mrs x20, spsr_el1 mrs x19, tpidr_el0 + mrs x18, tpidr_el1 stp x17, x20, [sp, #(8 * 32)] str x19, [sp, #(8 * 34)] @@ -239,8 +240,8 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev: tst x17, #1 b.eq 3f - /* Check if our preemption state allows us to call SVCs. */ - mrs x15, tpidrro_el0 + /* Check if our disable count allows us to call SVCs. */ + ldr x15, [x18, #0x30] ldrh w15, [x15, #0x100] cbz w15, 1f @@ -259,7 +260,6 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev: strb w16, [sp, #(0x120 + 0x11)] /* Invoke the SVC handler. */ - mrs x18, tpidr_el1 msr daifclr, #2 blr x19 msr daifset, #2 diff --git a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_light_ipc_asm.s b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_light_ipc_asm.s new file mode 100644 index 000000000..acc64aa19 --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_light_ipc_asm.s @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* ams::kern::svc::CallSendSyncRequestLight64() */ +.section .text._ZN3ams4kern3svc26CallSendSyncRequestLight64Ev, "ax", %progbits +.global _ZN3ams4kern3svc26CallSendSyncRequestLight64Ev +.type _ZN3ams4kern3svc26CallSendSyncRequestLight64Ev, %function +_ZN3ams4kern3svc26CallSendSyncRequestLight64Ev: + /* Allocate space for the light ipc data. */ + sub sp, sp, #(4 * 8) + + /* Store the light ipc data. */ + stp w1, w2, [sp, #(4 * 0)] + stp w3, w4, [sp, #(4 * 2)] + stp w5, w6, [sp, #(4 * 4)] + str w7, [sp, #(4 * 6)] + + /* Invoke the svc handler. */ + mov x1, sp + stp x29, x30, [sp, #-16]! + bl _ZN3ams4kern3svc22SendSyncRequestLight64EjPj + ldp x29, x30, [sp], #16 + + /* Load the light ipc data. */ + ldp w1, w2, [sp, #(4 * 0)] + ldp w3, w4, [sp, #(4 * 2)] + ldp w5, w6, [sp, #(4 * 4)] + ldr w7, [sp, #(4 * 6)] + + /* Free the stack space for the light ipc data. */ + add sp, sp, #(4 * 8) + + ret + +/* ams::kern::svc::CallSendSyncRequestLight64From32() */ +.section .text._ZN3ams4kern3svc32CallSendSyncRequestLight64From32Ev, "ax", %progbits +.global _ZN3ams4kern3svc32CallSendSyncRequestLight64From32Ev +.type _ZN3ams4kern3svc32CallSendSyncRequestLight64From32Ev, %function +_ZN3ams4kern3svc32CallSendSyncRequestLight64From32Ev: + /* Load x4-x7 from where the svc handler stores them. */ + ldp x4, x5, [sp, #(8 * 0)] + ldp x6, x7, [sp, #(8 * 2)] + + /* Allocate space for the light ipc data. */ + sub sp, sp, #(4 * 8) + + /* Store the light ipc data. */ + stp w1, w2, [sp, #(4 * 0)] + stp w3, w4, [sp, #(4 * 2)] + stp w5, w6, [sp, #(4 * 4)] + str w7, [sp, #(4 * 6)] + + /* Invoke the svc handler. */ + mov x1, sp + stp x29, x30, [sp, #-16]! + bl _ZN3ams4kern3svc28SendSyncRequestLight64From32EjPj + ldp x29, x30, [sp], #16 + + /* Load the light ipc data. */ + ldp w1, w2, [sp, #(4 * 0)] + ldp w3, w4, [sp, #(4 * 2)] + ldp w5, w6, [sp, #(4 * 4)] + ldr w7, [sp, #(4 * 6)] + + /* Free the stack space for the light ipc data. */ + add sp, sp, #(4 * 8) + + /* Save x4-x7 to where the svc handler stores them. */ + stp x4, x5, [sp, #(8 * 0)] + stp x6, x7, [sp, #(8 * 2)] + + ret + + +/* ams::kern::svc::CallReplyAndReceiveLight64() */ +.section .text._ZN3ams4kern3svc26CallReplyAndReceiveLight64Ev, "ax", %progbits +.global _ZN3ams4kern3svc26CallReplyAndReceiveLight64Ev +.type _ZN3ams4kern3svc26CallReplyAndReceiveLight64Ev, %function +_ZN3ams4kern3svc26CallReplyAndReceiveLight64Ev: + /* Allocate space for the light ipc data. */ + sub sp, sp, #(4 * 8) + + /* Store the light ipc data. */ + stp w1, w2, [sp, #(4 * 0)] + stp w3, w4, [sp, #(4 * 2)] + stp w5, w6, [sp, #(4 * 4)] + str w7, [sp, #(4 * 6)] + + /* Invoke the svc handler. */ + mov x1, sp + stp x29, x30, [sp, #-16]! + bl _ZN3ams4kern3svc22ReplyAndReceiveLight64EjPj + ldp x29, x30, [sp], #16 + + /* Load the light ipc data. */ + ldp w1, w2, [sp, #(4 * 0)] + ldp w3, w4, [sp, #(4 * 2)] + ldp w5, w6, [sp, #(4 * 4)] + ldr w7, [sp, #(4 * 6)] + + /* Free the stack space for the light ipc data. */ + add sp, sp, #(4 * 8) + + ret + +/* ams::kern::svc::CallReplyAndReceiveLight64From32() */ +.section .text._ZN3ams4kern3svc32CallReplyAndReceiveLight64From32Ev, "ax", %progbits +.global _ZN3ams4kern3svc32CallReplyAndReceiveLight64From32Ev +.type _ZN3ams4kern3svc32CallReplyAndReceiveLight64From32Ev, %function +_ZN3ams4kern3svc32CallReplyAndReceiveLight64From32Ev: + /* Load x4-x7 from where the svc handler stores them. */ + ldp x4, x5, [sp, #(8 * 0)] + ldp x6, x7, [sp, #(8 * 2)] + + /* Allocate space for the light ipc data. */ + sub sp, sp, #(4 * 8) + + /* Store the light ipc data. */ + stp w1, w2, [sp, #(4 * 0)] + stp w3, w4, [sp, #(4 * 2)] + stp w5, w6, [sp, #(4 * 4)] + str w7, [sp, #(4 * 6)] + + /* Invoke the svc handler. */ + mov x1, sp + stp x29, x30, [sp, #-16]! + bl _ZN3ams4kern3svc28ReplyAndReceiveLight64From32EjPj + ldp x29, x30, [sp], #16 + + /* Load the light ipc data. */ + ldp w1, w2, [sp, #(4 * 0)] + ldp w3, w4, [sp, #(4 * 2)] + ldp w5, w6, [sp, #(4 * 4)] + ldr w7, [sp, #(4 * 6)] + + /* Free the stack space for the light ipc data. */ + add sp, sp, #(4 * 8) + + /* Save x4-x7 to where the svc handler stores them. */ + stp x4, x5, [sp, #(8 * 0)] + stp x6, x7, [sp, #(8 * 2)] + + ret diff --git a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp index 0455290e5..7f9397d6f 100644 --- a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp +++ b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp @@ -22,6 +22,20 @@ namespace ams::kern::svc { + /* Declare special prototypes for the light ipc handlers. */ + void CallSendSyncRequestLight64(); + void CallSendSyncRequestLight64From32(); + + void CallReplyAndReceiveLight64(); + void CallReplyAndReceiveLight64From32(); + + /* Declare special prototypes for ReturnFromException. */ + void CallReturnFromException64(); + void CallReturnFromException64From32(); + + /* Declare special prototype for (unsupported) CallCallSecureMonitor64From32. */ + void CallCallSecureMonitor64From32(); + namespace { #ifndef MESOSPHERE_USE_STUBBED_SVC_TABLES @@ -30,7 +44,7 @@ namespace ams::kern::svc { private: \ using Impl = ::ams::svc::codegen::KernelSvcWrapper<::ams::kern::svc::NAME##64, ::ams::kern::svc::NAME##64From32>; \ public: \ - static NOINLINE void Call64() { return Impl::Call64(); } \ + static NOINLINE void Call64() { return Impl::Call64(); } \ static NOINLINE void Call64From32() { return Impl::Call64From32(); } \ }; #else @@ -51,28 +65,102 @@ namespace ams::kern::svc { #pragma GCC pop_options + constexpr const std::array SvcTable64From32Impl = [] { + std::array table = {}; + + #define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \ + if (table[ID] == nullptr) { table[ID] = NAME::Call64From32; } + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _) + #undef AMS_KERN_SVC_SET_TABLE_ENTRY + + table[svc::SvcId_SendSyncRequestLight] = CallSendSyncRequestLight64From32; + table[svc::SvcId_ReplyAndReceiveLight] = CallReplyAndReceiveLight64From32; + + table[svc::SvcId_ReturnFromException] = CallReturnFromException64From32; + + table[svc::SvcId_CallSecureMonitor] = CallCallSecureMonitor64From32; + + return table; + }(); + + constexpr const std::array SvcTable64Impl = [] { + std::array table = {}; + + #define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \ + if (table[ID] == nullptr) { table[ID] = NAME::Call64; } + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _) + #undef AMS_KERN_SVC_SET_TABLE_ENTRY + + table[svc::SvcId_SendSyncRequestLight] = CallSendSyncRequestLight64; + table[svc::SvcId_ReplyAndReceiveLight] = CallReplyAndReceiveLight64; + + table[svc::SvcId_ReturnFromException] = CallReturnFromException64; + + return table; + }(); + + constexpr bool IsValidSvcTable(const std::array &table) { + for (size_t i = 0; i < NumSupervisorCalls; i++) { + if (table[i] != nullptr) { + return true; + } + } + + return false; + } + + static_assert(IsValidSvcTable(SvcTable64Impl)); + static_assert(IsValidSvcTable(SvcTable64From32Impl)); + } - const std::array SvcTable64From32 = [] { - std::array table = {}; + constinit const std::array SvcTable64 = SvcTable64Impl; - #define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \ - table[ID] = NAME::Call64From32; - AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _) - #undef AMS_KERN_SVC_SET_TABLE_ENTRY + constinit const std::array SvcTable64From32 = SvcTable64From32Impl; - return table; - }(); + void PatchSvcTableEntry(const SvcTableEntry *table, u32 id, SvcTableEntry entry); - const std::array SvcTable64 = [] { - std::array table = {}; + namespace { - #define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \ - table[ID] = NAME::Call64; - AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _) - #undef AMS_KERN_SVC_SET_TABLE_ENTRY + /* NOTE: Although the SVC tables are constants, our global constructor will run before .rodata is protected R--. */ + class SvcTablePatcher { + private: + using SvcTable = std::array; + private: + static SvcTablePatcher s_instance; + private: + ALWAYS_INLINE const SvcTableEntry *GetTableData(const SvcTable *table) { + if (table != nullptr) { + return table->data(); + } else { + return nullptr; + } + } - return table; - }(); + NOINLINE void PatchTables(const SvcTableEntry *table_64, const SvcTableEntry *table_64_from_32) { + /* Get the target firmware. */ + const auto target_fw = kern::GetTargetFirmware(); + + /* 10.0.0 broke the ABI for QueryIoMapping. */ + if (target_fw < TargetFirmware_10_0_0) { + if (table_64) { ::ams::kern::svc::PatchSvcTableEntry(table_64, svc::SvcId_QueryIoMapping, LegacyQueryIoMapping::Call64); } + if (table_64_from_32) { ::ams::kern::svc::PatchSvcTableEntry(table_64_from_32, svc::SvcId_QueryIoMapping, LegacyQueryIoMapping::Call64From32); } + } + + /* 3.0.0 broke the ABI for ContinueDebugEvent. */ + if (target_fw < TargetFirmware_3_0_0) { + if (table_64) { ::ams::kern::svc::PatchSvcTableEntry(table_64, svc::SvcId_ContinueDebugEvent, LegacyContinueDebugEvent::Call64); } + if (table_64_from_32) { ::ams::kern::svc::PatchSvcTableEntry(table_64_from_32, svc::SvcId_ContinueDebugEvent, LegacyContinueDebugEvent::Call64From32); } + } + } + public: + SvcTablePatcher(const SvcTable *table_64, const SvcTable *table_64_from_32) { + PatchTables(GetTableData(table_64), GetTableData(table_64_from_32)); + } + }; + + SvcTablePatcher SvcTablePatcher::s_instance(std::addressof(SvcTable64), std::addressof(SvcTable64From32)); + + } } diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp index 28eb097d3..4ada48c89 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp @@ -31,12 +31,18 @@ namespace ams::kern::board::nintendo::nx { constexpr size_t DeviceVirtualAddressBits = 34; constexpr size_t DeviceVirtualAddressMask = (1ul << DeviceVirtualAddressBits) - 1ul; - constexpr size_t DevicePageBits = 12; - constexpr size_t DevicePageSize = (1ul << DevicePageBits); - constexpr size_t DeviceLargePageBits = 22; - constexpr size_t DeviceLargePageSize = (1ul << DevicePageBits); + constexpr size_t DevicePageBits = 12; + constexpr size_t DevicePageSize = (1ul << DevicePageBits); static_assert(DevicePageSize == PageSize); + constexpr size_t DeviceLargePageBits = 22; + constexpr size_t DeviceLargePageSize = (1ul << DeviceLargePageBits); + static_assert(DeviceLargePageSize % DevicePageSize == 0); + + constexpr size_t DeviceRegionBits = 32; + constexpr size_t DeviceRegionSize = (1ul << DeviceRegionBits); + static_assert(DeviceRegionSize % DeviceLargePageSize == 0); + constexpr size_t DeviceAsidRegisterOffsets[] = { [ams::svc::DeviceName_Afi] = MC_SMMU_AFI_ASID, [ams::svc::DeviceName_Avpc] = MC_SMMU_AVPC_ASID, @@ -140,6 +146,57 @@ namespace ams::kern::board::nintendo::nx { return (static_cast(GetInteger(addr)) & ~PhysicalAddressMask) == 0; } + constexpr struct { u64 start; u64 end; } SmmuSupportedRanges[] = { + [ams::svc::DeviceName_Afi] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Avpc] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Dc] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Dcb] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Hc] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Hda] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Isp2] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_MsencNvenc] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Nv] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Nv2] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Ppcs] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Sata] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Vi] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Vic] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_XusbHost] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_XusbDev] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Tsec] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Ppcs1] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Dc1] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Sdmmc1a] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Sdmmc2a] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Sdmmc3a] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Sdmmc4a] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Isp2b] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Gpu] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Gpub] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Ppcs2] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Nvdec] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Ape] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Se] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Nvjpg] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Hc1] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Se1] = { 0x00000000ul, 0x0FFFFFFFFul }, + [ams::svc::DeviceName_Axiap] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Etr] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Tsecb] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Tsec1] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Tsecb1] = { 0x00000000ul, 0x3FFFFFFFFul }, + [ams::svc::DeviceName_Nvdec1] = { 0x00000000ul, 0x0FFFFFFFFul }, + }; + static_assert(util::size(SmmuSupportedRanges) == ams::svc::DeviceName_Count); + + constexpr bool IsAttachable(ams::svc::DeviceName device_name, u64 space_address, u64 space_size) { + if (0 <= device_name && device_name < ams::svc::DeviceName_Count) { + const auto &range = SmmuSupportedRanges[device_name]; + return range.start <= space_address && (space_address + space_size - 1) <= range.end; + } + return false; + } + /* Types. */ class EntryBase { protected: @@ -186,7 +243,7 @@ namespace ams::kern::board::nintendo::nx { constexpr ALWAYS_INLINE u32 GetAttributes() const { return this->SelectBit(Bit_NonSecure) | this->SelectBit(Bit_Writeable) | this->SelectBit(Bit_Readable); } - constexpr ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() { return (static_cast(this->value) << DevicePageBits) & PhysicalAddressMask; } + constexpr ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const { return (static_cast(this->value) << DevicePageBits) & PhysicalAddressMask; } ALWAYS_INLINE void Invalidate() { this->SetValue(0); } }; @@ -280,6 +337,8 @@ namespace ams::kern::board::nintendo::nx { KPhysicalAddress g_memory_controller_address; KPhysicalAddress g_reserved_table_phys_addr; KDeviceAsidManager g_asid_manager; + u32 g_saved_page_tables[AsidCount]; + u32 g_saved_asid_registers[ams::svc::DeviceName_Count]; /* Memory controller access functionality. */ void WriteMcRegister(size_t offset, u32 value) { @@ -298,12 +357,10 @@ namespace ams::kern::board::nintendo::nx { WriteMcRegister(MC_SMMU_PTC_FLUSH_0, 0); } -/* void InvalidatePtc(KPhysicalAddress address) { WriteMcRegister(MC_SMMU_PTC_FLUSH_1, (static_cast(GetInteger(address)) >> 32)); WriteMcRegister(MC_SMMU_PTC_FLUSH_0, (GetInteger(address) & 0xFFFFFFF0u) | 1u); } -*/ enum TlbFlushVaMatch : u32 { TlbFlushVaMatch_All = 0, @@ -323,11 +380,9 @@ namespace ams::kern::board::nintendo::nx { return WriteMcRegister(MC_SMMU_TLB_FLUSH, EncodeTlbFlushValue(true, asid, 0, TlbFlushVaMatch_All)); } -/* void InvalidateTlbSection(u8 asid, KDeviceVirtualAddress address) { return WriteMcRegister(MC_SMMU_TLB_FLUSH, EncodeTlbFlushValue(true, asid, address, TlbFlushVaMatch_Section)); } -*/ void SetTable(u8 asid, KPhysicalAddress address) { /* Write the table address. */ @@ -404,4 +459,700 @@ namespace ams::kern::board::nintendo::nx { /* TODO: Install interrupt handler. */ } + void KDevicePageTable::Lock() { + g_lock.Lock(); + } + + void KDevicePageTable::Unlock() { + g_lock.Unlock(); + } + + void KDevicePageTable::Sleep() { + /* Save all page tables. */ + for (size_t i = 0; i < AsidCount; ++i) { + WriteMcRegister(MC_SMMU_PTB_ASID, i); + SmmuSynchronizationBarrier(); + g_saved_page_tables[i] = ReadMcRegister(MC_SMMU_PTB_DATA); + } + + /* Save all asid registers. */ + for (size_t i = 0; i < ams::svc::DeviceName_Count; ++i) { + g_saved_asid_registers[i] = ReadMcRegister(GetDeviceAsidRegisterOffset(static_cast(i))); + } + } + + void KDevicePageTable::Wakeup() { + /* Synchronize. */ + InvalidatePtc(); + InvalidateTlb(); + SmmuSynchronizationBarrier(); + + /* Disable the SMMU */ + WriteMcRegister(MC_SMMU_CONFIG, 0); + + /* Restore the page tables. */ + for (size_t i = 0; i < AsidCount; ++i) { + WriteMcRegister(MC_SMMU_PTB_ASID, i); + SmmuSynchronizationBarrier(); + WriteMcRegister(MC_SMMU_PTB_DATA, g_saved_page_tables[i]); + } + SmmuSynchronizationBarrier(); + + /* Restore the asid registers. */ + for (size_t i = 0; i < ams::svc::DeviceName_Count; ++i) { + WriteMcRegister(GetDeviceAsidRegisterOffset(static_cast(i)), g_saved_asid_registers[i]); + SmmuSynchronizationBarrier(); + } + + /* Synchronize. */ + InvalidatePtc(); + InvalidateTlb(); + SmmuSynchronizationBarrier(); + + /* Enable the SMMU */ + WriteMcRegister(MC_SMMU_CONFIG, 1); + SmmuSynchronizationBarrier(); + } + + /* Member functions. */ + + Result KDevicePageTable::Initialize(u64 space_address, u64 space_size) { + /* Ensure space is valid. */ + R_UNLESS(((space_address + space_size - 1) & ~DeviceVirtualAddressMask) == 0, svc::ResultInvalidMemoryRegion()); + + /* Determine extents. */ + const size_t start_index = space_address / DeviceRegionSize; + const size_t end_index = (space_address + space_size - 1) / DeviceRegionSize; + + /* Get the page table manager. */ + auto &ptm = Kernel::GetPageTableManager(); + + /* Clear the tables. */ + static_assert(TableCount == (1ul << DeviceVirtualAddressBits) / DeviceRegionSize); + for (size_t i = 0; i < TableCount; ++i) { + this->tables[i] = Null; + } + + /* Ensure that we clean up the tables on failure. */ + auto table_guard = SCOPE_GUARD { + for (size_t i = start_index; i <= end_index; ++i) { + if (this->tables[i] != Null && ptm.Close(this->tables[i], 1)) { + ptm.Free(this->tables[i]); + } + } + }; + + /* Allocate a table for all required indices. */ + for (size_t i = start_index; i <= end_index; ++i) { + const KVirtualAddress table_vaddr = ptm.Allocate(); + R_UNLESS(table_vaddr != Null, svc::ResultOutOfMemory()); + + MESOSPHERE_ASSERT(IsValidPhysicalAddress(GetPageTablePhysicalAddress(table_vaddr))); + + ptm.Open(table_vaddr, 1); + cpu::StoreDataCache(GetVoidPointer(table_vaddr), PageDirectorySize); + this->tables[i] = table_vaddr; + } + + /* Clear asids. */ + for (size_t i = 0; i < TableCount; ++i) { + this->table_asids[i] = g_reserved_asid; + } + + /* Reserve asids for the tables. */ + R_TRY(g_asid_manager.Reserve(std::addressof(this->table_asids[start_index]), end_index - start_index + 1)); + + /* Associate tables with asids. */ + for (size_t i = start_index; i <= end_index; ++i) { + SetTable(this->table_asids[i], GetPageTablePhysicalAddress(this->tables[i])); + } + + /* Set member variables. */ + this->attached_device = 0; + this->attached_value = (1u << 31) | this->table_asids[0]; + this->detached_value = (1u << 31) | g_reserved_asid; + + this->hs_attached_value = (1u << 31); + this->hs_detached_value = (1u << 31); + for (size_t i = 0; i < TableCount; ++i) { + this->hs_attached_value |= (this->table_asids[i] << (i * BITSIZEOF(u8))); + this->hs_detached_value |= (g_reserved_asid << (i * BITSIZEOF(u8))); + } + + /* We succeeded. */ + table_guard.Cancel(); + return ResultSuccess(); + } + + void KDevicePageTable::Finalize() { + /* Get the page table manager. */ + auto &ptm = Kernel::GetPageTableManager(); + + /* Detach from all devices. */ + { + KScopedLightLock lk(g_lock); + for (size_t i = 0; i < ams::svc::DeviceName_Count; ++i) { + const auto device_name = static_cast(i); + if ((this->attached_device & (1ul << device_name)) != 0) { + WriteMcRegister(GetDeviceAsidRegisterOffset(device_name), IsHsSupported(device_name) ? this->hs_detached_value : this->detached_value); + SmmuSynchronizationBarrier(); + } + } + } + + /* Forcibly unmap all pages. */ + this->UnmapImpl(0, (1ul << DeviceVirtualAddressBits), true); + + /* Release all asids. */ + for (size_t i = 0; i < TableCount; ++i) { + if (this->table_asids[i] != g_reserved_asid) { + /* Set the table to the reserved table. */ + SetTable(this->table_asids[i], g_reserved_table_phys_addr); + + /* Close the table. */ + const KVirtualAddress table_vaddr = this->tables[i]; + MESOSPHERE_ASSERT(ptm.GetRefCount(table_vaddr) == 1); + MESOSPHERE_ABORT_UNLESS(ptm.Close(table_vaddr, 1)); + + /* Free the table. */ + ptm.Free(table_vaddr); + + /* Release the asid. */ + g_asid_manager.Release(this->table_asids[i]); + } + } + } + + Result KDevicePageTable::Attach(ams::svc::DeviceName device_name, u64 space_address, u64 space_size) { + /* Validate the device name. */ + R_UNLESS(0 <= device_name, svc::ResultNotFound()); + R_UNLESS(device_name < ams::svc::DeviceName_Count, svc::ResultNotFound()); + + /* Check that the device isn't already attached. */ + R_UNLESS((this->attached_device & (1ul << device_name)) == 0, svc::ResultBusy()); + + /* Validate that the space is allowed for the device. */ + const size_t end_index = (space_address + space_size - 1) / DeviceRegionSize; + R_UNLESS(end_index == 0 || IsHsSupported(device_name), svc::ResultInvalidCombination()); + + /* Validate that the device can be attached. */ + R_UNLESS(IsAttachable(device_name, space_address, space_size), svc::ResultInvalidCombination()); + + /* Get the device asid register offset. */ + const int reg_offset = GetDeviceAsidRegisterOffset(device_name); + R_UNLESS(reg_offset >= 0, svc::ResultNotFound()); + + /* Determine the old/new values. */ + const u32 old_val = IsHsSupported(device_name) ? this->hs_detached_value : this->detached_value; + const u32 new_val = IsHsSupported(device_name) ? this->hs_attached_value : this->attached_value; + + /* Attach the device. */ + { + KScopedLightLock lk(g_lock); + + /* Validate that the device is unclaimed. */ + R_UNLESS((ReadMcRegister(reg_offset) | (1u << 31)) == (old_val | (1u << 31)), svc::ResultBusy()); + + /* Claim the device. */ + WriteMcRegister(reg_offset, new_val); + SmmuSynchronizationBarrier(); + + /* Ensure that we claimed it successfully. */ + if (ReadMcRegister(reg_offset) != new_val) { + WriteMcRegister(reg_offset, old_val); + SmmuSynchronizationBarrier(); + return svc::ResultNotFound(); + } + } + + /* Mark the device as attached. */ + this->attached_device |= (1ul << device_name); + + return ResultSuccess(); + } + + Result KDevicePageTable::Detach(ams::svc::DeviceName device_name) { + /* Validate the device name. */ + R_UNLESS(0 <= device_name, svc::ResultNotFound()); + R_UNLESS(device_name < ams::svc::DeviceName_Count, svc::ResultNotFound()); + + /* Check that the device is already attached. */ + R_UNLESS((this->attached_device & (1ul << device_name)) != 0, svc::ResultInvalidState()); + + /* Get the device asid register offset. */ + const int reg_offset = GetDeviceAsidRegisterOffset(device_name); + R_UNLESS(reg_offset >= 0, svc::ResultNotFound()); + + /* Determine the old/new values. */ + const u32 old_val = IsHsSupported(device_name) ? this->hs_attached_value : this->attached_value; + const u32 new_val = IsHsSupported(device_name) ? this->hs_detached_value : this->detached_value; + + /* When not building for debug, the old value might be unused. */ + AMS_UNUSED(old_val); + + /* Detach the device. */ + { + KScopedLightLock lk(g_lock); + + /* Check that the device is attached. */ + MESOSPHERE_ASSERT(ReadMcRegister(reg_offset) == old_val); + + /* Release the device. */ + WriteMcRegister(reg_offset, new_val); + SmmuSynchronizationBarrier(); + + /* Check that the device was released. */ + MESOSPHERE_ASSERT((ReadMcRegister(reg_offset) | (1u << 31)) == (new_val | 1u << 31)); + } + + /* Mark the device as detached. */ + this->attached_device &= ~(1ul << device_name); + + return ResultSuccess(); + } + + bool KDevicePageTable::IsFree(KDeviceVirtualAddress address, u64 size) const { + MESOSPHERE_ASSERT((address & ~DeviceVirtualAddressMask) == 0); + MESOSPHERE_ASSERT(((address + size - 1) & ~DeviceVirtualAddressMask) == 0); + + /* Walk the directory, looking for entries. */ + u64 remaining = size; + while (remaining > 0) { + const size_t l0_index = (address / DeviceRegionSize); + const size_t l1_index = (address % DeviceRegionSize) / DeviceLargePageSize; + const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize; + + const PageDirectoryEntry *l1 = GetPointer(this->tables[l0_index]); + if (l1 == nullptr || !l1[l1_index].IsValid()) { + const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index; + const size_t map_count = std::min(remaining_in_entry, remaining / DevicePageSize); + + address += DevicePageSize * map_count; + remaining -= DevicePageSize * map_count; + } else if (l1[l1_index].IsTable()) { + const PageTableEntry *l2 = GetPointer(GetPageTableVirtualAddress(l1[l1_index].GetPhysicalAddress())); + + const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index; + const size_t map_count = std::min(remaining_in_entry, remaining / DevicePageSize); + + for (size_t i = 0; i < map_count; ++i) { + if (l2[l2_index + i].IsValid()) { + return false; + } + } + + address += DevicePageSize * map_count; + remaining -= DevicePageSize * map_count; + } else { + /* If we have an entry, we're not free. */ + return false; + } + } + + return true; + } + + Result KDevicePageTable::MapDevicePage(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm) { + /* Clear the output size. */ + *out_mapped_size = 0; + + /* Ensure that the physical address is valid. */ + R_UNLESS(IsValidPhysicalAddress(static_cast(GetInteger(phys_addr)) + size - 1), svc::ResultInvalidCurrentMemory()); + MESOSPHERE_ASSERT((address & ~DeviceVirtualAddressMask) == 0); + MESOSPHERE_ASSERT(((address + size - 1) & ~DeviceVirtualAddressMask) == 0); + + /* Get the memory manager and page table manager. */ + KMemoryManager &mm = Kernel::GetMemoryManager(); + KPageTableManager &ptm = Kernel::GetPageTableManager(); + + /* Cache permissions. */ + const bool read = (device_perm & ams::svc::MemoryPermission_Read) != 0; + const bool write = (device_perm & ams::svc::MemoryPermission_Write) != 0; + + /* Walk the directory. */ + u64 remaining = size; + while (remaining > 0) { + const size_t l0_index = (address / DeviceRegionSize); + const size_t l1_index = (address % DeviceRegionSize) / DeviceLargePageSize; + const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize; + + /* Get and validate l1. */ + PageDirectoryEntry *l1 = GetPointer(this->tables[l0_index]); + MESOSPHERE_ASSERT(l1 != nullptr); + + /* Setup an l1 table/entry, if needed. */ + if (!l1[l1_index].IsTable()) { + /* Check that an entry doesn't already exist. */ + MESOSPHERE_ASSERT(!l1[l1_index].IsValid()); + + /* If we can make an l1 entry, do so. */ + if (l2_index == 0 && util::IsAligned(GetInteger(phys_addr), DeviceLargePageSize) && remaining >= DeviceLargePageSize) { + /* Set the large page. */ + l1[l1_index].SetLargePage(read, write, true, phys_addr); + cpu::StoreDataCache(std::addressof(l1[l1_index]), sizeof(PageDirectoryEntry)); + + /* Synchronize. */ + InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index])))); + InvalidateTlbSection(this->table_asids[l0_index], address); + SmmuSynchronizationBarrier(); + + /* Open references to the pages. */ + mm.Open(GetHeapVirtualAddress(phys_addr), DeviceLargePageSize / PageSize); + + /* Advance. */ + phys_addr += DeviceLargePageSize; + address += DeviceLargePageSize; + *out_mapped_size += DeviceLargePageSize; + remaining -= DeviceLargePageSize; + continue; + } else if (num_pt == max_pt) { + break; + } else { + /* Make an l1 table. */ + const KVirtualAddress table_vaddr = ptm.Allocate(); + R_UNLESS(table_vaddr != Null, svc::ResultOutOfMemory()); + MESOSPHERE_ASSERT(IsValidPhysicalAddress(GetPageTablePhysicalAddress(table_vaddr))); + cpu::StoreDataCache(GetVoidPointer(table_vaddr), PageTableSize); + + /* Set the l1 table. */ + l1[l1_index].SetTable(true, true, true, GetPageTablePhysicalAddress(table_vaddr)); + cpu::StoreDataCache(std::addressof(l1[l1_index]), sizeof(PageDirectoryEntry)); + + /* Synchronize. */ + InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index])))); + InvalidateTlbSection(this->table_asids[l0_index], address); + SmmuSynchronizationBarrier(); + + /* Increment the page table count. */ + ++num_pt; + } + } + + /* If we get to this point, l1 must be a table. */ + MESOSPHERE_ASSERT(l1[l1_index].IsTable()); + + /* Map l2 entries. */ + { + PageTableEntry *l2 = GetPointer(GetPageTableVirtualAddress(l1[l1_index].GetPhysicalAddress())); + + const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index; + const size_t map_count = std::min(remaining_in_entry, remaining / DevicePageSize); + + /* Set the entries. */ + for (size_t i = 0; i < map_count; ++i) { + MESOSPHERE_ASSERT(!l2[l2_index + i].IsValid()); + l2[l2_index + i].SetPage(read, write, true, phys_addr + DevicePageSize * i); + + /* Add a reference to the l2 page (from the l2 entry page). */ + ptm.Open(KVirtualAddress(l2), 1); + } + cpu::StoreDataCache(std::addressof(l2[l2_index]), map_count * sizeof(PageTableEntry)); + + /* Invalidate the page table cache. */ + for (size_t i = util::AlignDown(l2_index, 4); i <= util::AlignDown(l2_index + map_count - 1, 4); i += 4) { + InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l2[i])))); + } + + /* Synchronize. */ + InvalidateTlbSection(this->table_asids[l0_index], address); + SmmuSynchronizationBarrier(); + + /* Open references to the pages. */ + mm.Open(GetHeapVirtualAddress(phys_addr), (map_count * DevicePageSize) / PageSize); + + /* Advance. */ + phys_addr += map_count * DevicePageSize; + address += map_count * DevicePageSize; + *out_mapped_size += map_count * DevicePageSize; + remaining -= map_count * DevicePageSize; + } + } + + return ResultSuccess(); + } + + Result KDevicePageTable::MapImpl(size_t *out_mapped_size, s32 &num_pt, s32 max_pt, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm) { + /* Clear the output size. */ + *out_mapped_size = 0; + + /* Get the size, and validate the address. */ + const u64 size = pg.GetNumPages() * PageSize; + MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0); + MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0); + + /* Ensure that the region we're mapping to is free. */ + R_UNLESS(this->IsFree(device_address, size), svc::ResultInvalidCurrentMemory()); + + /* Ensure that if we fail, we unmap anything we mapped. */ + auto unmap_guard = SCOPE_GUARD { this->UnmapImpl(device_address, size, true); }; + + /* Iterate, mapping device pages. */ + KDeviceVirtualAddress cur_addr = device_address; + for (auto it = pg.begin(); it != pg.end(); ++it) { + /* Require that we be able to map the device page. */ + R_UNLESS(IsHeapVirtualAddress(it->GetAddress()), svc::ResultInvalidCurrentMemory()); + + /* Get the physical address for the page. */ + const KPhysicalAddress phys_addr = GetHeapPhysicalAddress(it->GetAddress()); + + /* Map the device page. */ + const u64 block_size = it->GetSize(); + size_t mapped_size = 0; + R_TRY(this->MapDevicePage(std::addressof(mapped_size), num_pt, max_pt, phys_addr, block_size, cur_addr, device_perm)); + + /* Advance. */ + cur_addr += block_size; + *out_mapped_size += mapped_size; + + /* If we didn't map as much as we wanted, break. */ + if (mapped_size < block_size) { + break; + } + } + + /* We're done, so cancel our guard. */ + unmap_guard.Cancel(); + + return ResultSuccess(); + } + + void KDevicePageTable::UnmapImpl(KDeviceVirtualAddress address, u64 size, bool force) { + MESOSPHERE_ASSERT((address & ~DeviceVirtualAddressMask) == 0); + MESOSPHERE_ASSERT(((address + size - 1) & ~DeviceVirtualAddressMask) == 0); + + /* Get the memory manager and page table manager. */ + KMemoryManager &mm = Kernel::GetMemoryManager(); + KPageTableManager &ptm = Kernel::GetPageTableManager(); + + /* Make a page group for the pages we're closing. */ + KPageGroup pg(std::addressof(Kernel::GetBlockInfoManager())); + + /* Walk the directory. */ + u64 remaining = size; + while (remaining > 0) { + const size_t l0_index = (address / DeviceRegionSize); + const size_t l1_index = (address % DeviceRegionSize) / DeviceLargePageSize; + const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize; + + /* Get and validate l1. */ + PageDirectoryEntry *l1 = GetPointer(this->tables[l0_index]); + + /* Check if there's nothing mapped at l1. */ + if (l1 == nullptr || !l1[l1_index].IsValid()) { + MESOSPHERE_ASSERT(force); + + const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index; + const size_t map_count = std::min(remaining_in_entry, remaining / DevicePageSize); + + /* Advance. */ + address += map_count * DevicePageSize; + remaining -= map_count * DevicePageSize; + } else if (l1[l1_index].IsTable()) { + /* Dealing with an l1 table. */ + PageTableEntry *l2 = GetPointer(GetPageTableVirtualAddress(l1[l1_index].GetPhysicalAddress())); + + const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index; + const size_t map_count = std::min(remaining_in_entry, remaining / DevicePageSize); + size_t num_closed = 0; + bool invalidated_tlb = false; + + for (size_t i = 0; i < map_count; ++i) { + if (l2[l2_index + i].IsValid()) { + /* Get the physical address. */ + const KPhysicalAddress phys_addr = l2[l2_index + i].GetPhysicalAddress(); + MESOSPHERE_ASSERT(IsHeapPhysicalAddress(phys_addr)); + + /* Invalidate the entry. */ + l2[l2_index + i].Invalidate(); + ++num_closed; + + /* Try to add the page to the group. */ + if (R_FAILED(pg.AddBlock(GetHeapVirtualAddress(phys_addr), DevicePageSize / PageSize))) { + /* If we can't add it for deferred close, close it now. */ + cpu::StoreDataCache(std::addressof(l2[l2_index + i]), sizeof(PageTableEntry)); + InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l2[l2_index + i])))); + SmmuSynchronizationBarrier(); + + /* Close the page's reference. */ + mm.Close(GetHeapVirtualAddress(phys_addr), 1); + } + } else { + MESOSPHERE_ASSERT(force); + } + } + cpu::StoreDataCache(std::addressof(l2[l2_index]), map_count * sizeof(PageTableEntry)); + + /* Invalidate the page table cache. */ + for (size_t i = util::AlignDown(l2_index, 4); i <= util::AlignDown(l2_index + map_count - 1, 4); i += 4) { + InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l2[i])))); + } + SmmuSynchronizationBarrier(); + + /* Close the pages. */ + if (ptm.Close(KVirtualAddress(l2), num_closed)) { + /* Invalidate the l1 entry. */ + l1[l1_index].Invalidate(); + cpu::StoreDataCache(std::addressof(l1[l1_index]), sizeof(PageDirectoryEntry)); + + /* Synchronize. */ + InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index])))); + InvalidateTlbSection(this->table_asids[l0_index], address); + SmmuSynchronizationBarrier(); + + /* We invalidated the tlb. */ + invalidated_tlb = true; + + /* Free the l2 page. */ + ptm.Free(KVirtualAddress(l2)); + } + + /* Invalidate the tlb if we haven't already. */ + if (!invalidated_tlb) { + InvalidateTlbSection(this->table_asids[l0_index], address); + SmmuSynchronizationBarrier(); + } + + /* Advance. */ + address += map_count * DevicePageSize; + remaining -= map_count * DevicePageSize; + } else { + /* Dealing with an l1 entry. */ + MESOSPHERE_ASSERT(l2_index == 0); + + /* Get the physical address. */ + const KPhysicalAddress phys_addr = l1[l1_index].GetPhysicalAddress(); + MESOSPHERE_ASSERT(IsHeapPhysicalAddress(phys_addr)); + + /* Invalidate the entry. */ + l1[l1_index].Invalidate(); + cpu::StoreDataCache(std::addressof(l1[l1_index]), sizeof(PageDirectoryEntry)); + + /* Synchronize. */ + InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index])))); + InvalidateTlbSection(this->table_asids[l0_index], address); + SmmuSynchronizationBarrier(); + + /* Close references. */ + mm.Close(GetHeapVirtualAddress(phys_addr), DeviceLargePageSize / PageSize); + + /* Advance. */ + address += DeviceLargePageSize; + remaining -= DeviceLargePageSize; + } + } + + /* Close references to the pages in the group. */ + pg.Close(); + } + + Result KDevicePageTable::MakePageGroup(KPageGroup *out, KDeviceVirtualAddress address, u64 size) const { + MESOSPHERE_ASSERT((address & ~DeviceVirtualAddressMask) == 0); + MESOSPHERE_ASSERT(((address + size - 1) & ~DeviceVirtualAddressMask) == 0); + + /* Walk the directory. */ + u64 remaining = size; + bool first = true; + u32 attr = 0; + while (remaining > 0) { + const size_t l0_index = (address / DeviceRegionSize); + const size_t l1_index = (address % DeviceRegionSize) / DeviceLargePageSize; + const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize; + + /* Get and validate l1. */ + const PageDirectoryEntry *l1 = GetPointer(this->tables[l0_index]); + R_UNLESS(l1 != nullptr, svc::ResultInvalidCurrentMemory()); + R_UNLESS(l1[l1_index].IsValid(), svc::ResultInvalidCurrentMemory()); + + if (l1[l1_index].IsTable()) { + /* We're acting on an l2 entry. */ + const PageTableEntry *l2 = GetPointer(GetPageTableVirtualAddress(l1[l1_index].GetPhysicalAddress())); + + const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index; + const size_t map_count = std::min(remaining_in_entry, remaining / DevicePageSize); + + for (size_t i = 0; i < map_count; ++i) { + /* Ensure the l2 entry is valid. */ + R_UNLESS(l2[l2_index + i].IsValid(), svc::ResultInvalidCurrentMemory()); + + /* Get the physical address. */ + const KPhysicalAddress phys_addr = l2[l2_index + i].GetPhysicalAddress(); + MESOSPHERE_ASSERT(IsHeapPhysicalAddress(phys_addr)); + + /* Add to the group. */ + R_TRY(out->AddBlock(GetHeapVirtualAddress(phys_addr), DevicePageSize / PageSize)); + + /* If this is our first entry, get the attribute. */ + if (first) { + attr = l2[l2_index + i].GetAttributes(); + first = false; + } else { + /* Validate the attributes match the first entry. */ + R_UNLESS(l2[l2_index + i].GetAttributes() == attr, svc::ResultInvalidCurrentMemory()); + } + } + + /* Advance. */ + address += DevicePageSize * map_count; + remaining -= DevicePageSize * map_count; + } else { + /* We're acting on an l1 entry. */ + R_UNLESS(l2_index == 0, svc::ResultInvalidCurrentMemory()); + R_UNLESS(remaining >= DeviceLargePageSize, svc::ResultInvalidCurrentMemory()); + + /* Get the physical address. */ + const KPhysicalAddress phys_addr = l1[l1_index].GetPhysicalAddress(); + MESOSPHERE_ASSERT(IsHeapPhysicalAddress(phys_addr)); + + /* Add to the group. */ + R_TRY(out->AddBlock(GetHeapVirtualAddress(phys_addr), DeviceLargePageSize / PageSize)); + + /* If this is our first entry, get the attribute. */ + if (first) { + attr = l1[l1_index].GetAttributes(); + first = false; + } else { + /* Validate the attributes match the first entry. */ + R_UNLESS(l1[l1_index].GetAttributes() == attr, svc::ResultInvalidCurrentMemory()); + } + + /* Advance. */ + address += DeviceLargePageSize; + remaining -= DeviceLargePageSize; + } + } + + return ResultSuccess(); + } + + bool KDevicePageTable::Compare(const KPageGroup &compare_pg, KDeviceVirtualAddress device_address) const { + /* Check whether the page group we expect for the virtual address matches the page group we're validating. */ + KPageGroup calc_pg(std::addressof(Kernel::GetBlockInfoManager())); + return (R_SUCCEEDED(this->MakePageGroup(std::addressof(calc_pg), device_address, compare_pg.GetNumPages() * PageSize))) && + calc_pg.IsEquivalentTo(compare_pg); + } + + Result KDevicePageTable::Map(size_t *out_mapped_size, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings) { + /* Clear the output size. */ + *out_mapped_size = 0; + + /* Map the pages. */ + s32 num_pt = 0; + return this->MapImpl(out_mapped_size, num_pt, refresh_mappings ? 1 : std::numeric_limits::max(), pg, device_address, device_perm); + } + + Result KDevicePageTable::Unmap(const KPageGroup &pg, KDeviceVirtualAddress device_address) { + /* Validate address/size. */ + const size_t size = pg.GetNumPages() * PageSize; + MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0); + MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0); + + /* Ensure the page group is correct. */ + R_UNLESS(this->Compare(pg, device_address), svc::ResultInvalidCurrentMemory()); + + /* Unmap the pages. */ + this->UnmapImpl(device_address, size, false); + + return ResultSuccess(); + } + } diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp index 3846b1752..0e3f4f602 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp @@ -50,7 +50,7 @@ namespace ams::kern::board::nintendo::nx { u64 pmcr_el0; u64 pmevcntrN_el0[31]; u64 pmevtyperN_el0[31]; - u64 pmcntenset_el1; + u64 pmintenset_el1; u64 pmovsset_el0; u64 pmselr_el0; u64 pmuserenr_el0; @@ -62,13 +62,367 @@ namespace ams::kern::board::nintendo::nx { constexpr s32 SleepManagerThreadPriority = 2; /* Globals for sleep/wake. */ - u64 g_sleep_target_cores; - KLightLock g_request_lock; - KLightLock g_cv_lock; - KLightConditionVariable g_cv; - KPhysicalAddress g_sleep_buffer_phys_addrs[cpu::NumCores]; - alignas(16) u64 g_sleep_buffers[cpu::NumCores][1_KB / sizeof(u64)]; - SavedSystemRegisters g_sleep_system_registers[cpu::NumCores] = {}; + constinit u64 g_sleep_target_cores; + constinit KLightLock g_request_lock; + constinit KLightLock g_cv_lock; + constinit KLightConditionVariable g_cv; + constinit KPhysicalAddress g_sleep_buffer_phys_addrs[cpu::NumCores]; + alignas(1_KB) constinit u64 g_sleep_buffers[cpu::NumCores][1_KB / sizeof(u64)]; + constinit SavedSystemRegisters g_sleep_system_registers[cpu::NumCores] = {}; + + void PowerOnCpu(int core_id, KPhysicalAddress entry_phys_addr, u64 context_id) { + /* Request the secure monitor power on the core. */ + smc::CpuOn(cpu::MultiprocessorAffinityRegisterAccessor().GetCpuOnArgument() | core_id, GetInteger(entry_phys_addr), context_id); + } + + void WaitOtherCpuPowerOff() { + constexpr u64 PmcPhysicalAddress = 0x7000E400; + constexpr u64 APBDEV_PMC_PWRGATE_STATUS = PmcPhysicalAddress + 0x38; + + constexpr u32 PWRGATE_STATUS_CE123_MASK = ((1u << 3) - 1) << 9; + + u32 value; + do { + bool res = smc::ReadWriteRegister(std::addressof(value), APBDEV_PMC_PWRGATE_STATUS, 0, 0); + MESOSPHERE_ASSERT(res); + } while ((value & PWRGATE_STATUS_CE123_MASK) != 0); + } + + void SavedSystemRegisters::Save() { + /* Save system registers. */ + this->ttbr0_el1 = cpu::GetTtbr0El1(); + this->tcr_el1 = cpu::GetTcrEl1(); + this->tpidr_el0 = cpu::GetTpidrEl0(); + this->elr_el1 = cpu::GetElrEl1(); + this->sp_el0 = cpu::GetSpEl0(); + this->spsr_el1 = cpu::GetSpsrEl1(); + this->daif = cpu::GetDaif(); + this->cpacr_el1 = cpu::GetCpacrEl1(); + this->vbar_el1 = cpu::GetVbarEl1(); + this->csselr_el1 = cpu::GetCsselrEl1(); + this->cntp_ctl_el0 = cpu::GetCntpCtlEl0(); + this->cntp_cval_el0 = cpu::GetCntpCvalEl0(); + this->cntkctl_el1 = cpu::GetCntkCtlEl1(); + this->tpidrro_el0 = cpu::GetTpidrRoEl0(); + + /* Save pmu registers. */ + { + /* Get and clear pmcr_el0 */ + this->pmcr_el0 = cpu::GetPmcrEl0(); + cpu::SetPmcrEl0(0); + cpu::EnsureInstructionConsistency(); + + /* Save other pmu registers. */ + this->pmuserenr_el0 = cpu::GetPmUserEnrEl0(); + this->pmselr_el0 = cpu::GetPmSelrEl0(); + this->pmccfiltr_el0 = cpu::GetPmcCfiltrEl0(); + this->pmcntenset_el0 = cpu::GetPmCntEnSetEl0(); + this->pmintenset_el1 = cpu::GetPmIntEnSetEl1(); + this->pmovsset_el0 = cpu::GetPmOvsSetEl0(); + this->pmccntr_el0 = cpu::GetPmcCntrEl0(); + + switch (cpu::PerformanceMonitorsControlRegisterAccessor(this->pmcr_el0).GetN()) { + #define HANDLE_PMU_CASE(N) \ + case (N+1): \ + this->pmevcntrN_el0 [ N ] = cpu::GetPmevCntr##N##El0(); \ + this->pmevtyperN_el0[ N ] = cpu::GetPmevTyper##N##El0(); \ + [[fallthrough]] + + HANDLE_PMU_CASE(30); + HANDLE_PMU_CASE(29); + HANDLE_PMU_CASE(28); + HANDLE_PMU_CASE(27); + HANDLE_PMU_CASE(26); + HANDLE_PMU_CASE(25); + HANDLE_PMU_CASE(24); + HANDLE_PMU_CASE(23); + HANDLE_PMU_CASE(22); + HANDLE_PMU_CASE(21); + HANDLE_PMU_CASE(20); + HANDLE_PMU_CASE(19); + HANDLE_PMU_CASE(18); + HANDLE_PMU_CASE(17); + HANDLE_PMU_CASE(16); + HANDLE_PMU_CASE(15); + HANDLE_PMU_CASE(14); + HANDLE_PMU_CASE(13); + HANDLE_PMU_CASE(12); + HANDLE_PMU_CASE(11); + HANDLE_PMU_CASE(10); + HANDLE_PMU_CASE( 9); + HANDLE_PMU_CASE( 8); + HANDLE_PMU_CASE( 7); + HANDLE_PMU_CASE( 6); + HANDLE_PMU_CASE( 5); + HANDLE_PMU_CASE( 4); + HANDLE_PMU_CASE( 3); + HANDLE_PMU_CASE( 2); + HANDLE_PMU_CASE( 1); + HANDLE_PMU_CASE( 0); + + #undef HANDLE_PMU_CASE + case 0: + default: + break; + } + } + + /* Save debug registers. */ + const u64 dfr0 = cpu::GetIdAa64Dfr0El1(); + + this->mdscr_el1 = cpu::GetMdscrEl1(); + this->contextidr_el1 = cpu::GetContextidrEl1(); + + /* Save watchpoints. */ + switch (cpu::DebugFeatureRegisterAccessor(dfr0).GetNumWatchpoints()) { + #define HANDLE_DBG_CASE(N) \ + case N: \ + this->dbgwcrN_el1[ N ] = cpu::GetDbgWcr##N##El1(); \ + this->dbgwvrN_el1[ N ] = cpu::GetDbgWvr##N##El1(); \ + [[fallthrough]] + + HANDLE_DBG_CASE(15); + HANDLE_DBG_CASE(14); + HANDLE_DBG_CASE(13); + HANDLE_DBG_CASE(12); + HANDLE_DBG_CASE(11); + HANDLE_DBG_CASE(10); + HANDLE_DBG_CASE( 9); + HANDLE_DBG_CASE( 8); + HANDLE_DBG_CASE( 7); + HANDLE_DBG_CASE( 6); + HANDLE_DBG_CASE( 5); + HANDLE_DBG_CASE( 4); + HANDLE_DBG_CASE( 3); + HANDLE_DBG_CASE( 2); + + #undef HANDLE_DBG_CASE + + case 1: + this->dbgwcrN_el1[1] = cpu::GetDbgWcr1El1(); + this->dbgwvrN_el1[1] = cpu::GetDbgWvr1El1(); + this->dbgwcrN_el1[0] = cpu::GetDbgWcr0El1(); + this->dbgwvrN_el1[0] = cpu::GetDbgWvr0El1(); + [[fallthrough]]; + default: + break; + } + + /* Save breakpoints. */ + switch (cpu::DebugFeatureRegisterAccessor(dfr0).GetNumBreakpoints()) { + #define HANDLE_DBG_CASE(N) \ + case N: \ + this->dbgbcrN_el1[ N ] = cpu::GetDbgBcr##N##El1(); \ + this->dbgbvrN_el1[ N ] = cpu::GetDbgBvr##N##El1(); \ + [[fallthrough]] + + HANDLE_DBG_CASE(15); + HANDLE_DBG_CASE(14); + HANDLE_DBG_CASE(13); + HANDLE_DBG_CASE(12); + HANDLE_DBG_CASE(11); + HANDLE_DBG_CASE(10); + HANDLE_DBG_CASE( 9); + HANDLE_DBG_CASE( 8); + HANDLE_DBG_CASE( 7); + HANDLE_DBG_CASE( 6); + HANDLE_DBG_CASE( 5); + HANDLE_DBG_CASE( 4); + HANDLE_DBG_CASE( 3); + HANDLE_DBG_CASE( 2); + + #undef HANDLE_DBG_CASE + + case 1: + this->dbgbcrN_el1[1] = cpu::GetDbgBcr1El1(); + this->dbgbvrN_el1[1] = cpu::GetDbgBvr1El1(); + [[fallthrough]]; + default: + break; + } + + this->dbgbcrN_el1[0] = cpu::GetDbgBcr0El1(); + this->dbgbvrN_el1[0] = cpu::GetDbgBvr0El1(); + cpu::EnsureInstructionConsistency(); + + /* Clear mdscr_el1. */ + cpu::SetMdscrEl1(0); + cpu::EnsureInstructionConsistency(); + } + + void SavedSystemRegisters::Restore() const { + /* Restore debug registers. */ + const u64 dfr0 = cpu::GetIdAa64Dfr0El1(); + cpu::EnsureInstructionConsistency(); + + cpu::SetMdscrEl1(0); + cpu::EnsureInstructionConsistency(); + + cpu::SetOslarEl1(0); + cpu::EnsureInstructionConsistency(); + + /* Restore watchpoints. */ + switch (cpu::DebugFeatureRegisterAccessor(dfr0).GetNumWatchpoints()) { + #define HANDLE_DBG_CASE(N) \ + case N: \ + cpu::SetDbgWcr##N##El1(this->dbgwcrN_el1[ N ]); \ + cpu::SetDbgWvr##N##El1(this->dbgwvrN_el1[ N ]); \ + [[fallthrough]] + + HANDLE_DBG_CASE(15); + HANDLE_DBG_CASE(14); + HANDLE_DBG_CASE(13); + HANDLE_DBG_CASE(12); + HANDLE_DBG_CASE(11); + HANDLE_DBG_CASE(10); + HANDLE_DBG_CASE( 9); + HANDLE_DBG_CASE( 8); + HANDLE_DBG_CASE( 7); + HANDLE_DBG_CASE( 6); + HANDLE_DBG_CASE( 5); + HANDLE_DBG_CASE( 4); + HANDLE_DBG_CASE( 3); + HANDLE_DBG_CASE( 2); + + #undef HANDLE_DBG_CASE + + case 1: + cpu::SetDbgWcr1El1(this->dbgwcrN_el1[1]); + cpu::SetDbgWvr1El1(this->dbgwvrN_el1[1]); + cpu::SetDbgWcr0El1(this->dbgwcrN_el1[0]); + cpu::SetDbgWvr0El1(this->dbgwvrN_el1[0]); + [[fallthrough]]; + default: + break; + } + + /* Restore breakpoints. */ + switch (cpu::DebugFeatureRegisterAccessor(dfr0).GetNumBreakpoints()) { + #define HANDLE_DBG_CASE(N) \ + case N: \ + cpu::SetDbgBcr##N##El1(this->dbgbcrN_el1[ N ]); \ + cpu::SetDbgBvr##N##El1(this->dbgbvrN_el1[ N ]); \ + [[fallthrough]] + + HANDLE_DBG_CASE(15); + HANDLE_DBG_CASE(14); + HANDLE_DBG_CASE(13); + HANDLE_DBG_CASE(12); + HANDLE_DBG_CASE(11); + HANDLE_DBG_CASE(10); + HANDLE_DBG_CASE( 9); + HANDLE_DBG_CASE( 8); + HANDLE_DBG_CASE( 7); + HANDLE_DBG_CASE( 6); + HANDLE_DBG_CASE( 5); + HANDLE_DBG_CASE( 4); + HANDLE_DBG_CASE( 3); + HANDLE_DBG_CASE( 2); + + #undef HANDLE_DBG_CASE + + case 1: + cpu::SetDbgBcr1El1(this->dbgbcrN_el1[1]); + cpu::SetDbgBvr1El1(this->dbgbvrN_el1[1]); + [[fallthrough]]; + default: + break; + } + + cpu::SetDbgBcr0El1(this->dbgbcrN_el1[0]); + cpu::SetDbgBvr0El1(this->dbgbvrN_el1[0]); + cpu::EnsureInstructionConsistency(); + + cpu::SetContextidrEl1(this->contextidr_el1); + cpu::EnsureInstructionConsistency(); + + cpu::SetMdscrEl1(this->mdscr_el1); + cpu::EnsureInstructionConsistency(); + + /* Restore pmu registers. */ + cpu::SetPmUserEnrEl0(0); + cpu::PerformanceMonitorsControlRegisterAccessor().SetEventCounterReset(true).SetCycleCounterReset(true).Store(); + cpu::SetPmOvsClrEl0(static_cast(static_cast(~u32()))); + cpu::SetPmIntEnClrEl1(static_cast(static_cast(~u32()))); + cpu::SetPmCntEnClrEl0(static_cast(static_cast(~u32()))); + + switch (cpu::PerformanceMonitorsControlRegisterAccessor(this->pmcr_el0).GetN()) { + #define HANDLE_PMU_CASE(N) \ + case (N+1): \ + cpu::SetPmevCntr##N##El0 (this->pmevcntrN_el0 [ N ]); \ + cpu::SetPmevTyper##N##El0(this->pmevtyperN_el0[ N ]); \ + [[fallthrough]] + + HANDLE_PMU_CASE(30); + HANDLE_PMU_CASE(29); + HANDLE_PMU_CASE(28); + HANDLE_PMU_CASE(27); + HANDLE_PMU_CASE(26); + HANDLE_PMU_CASE(25); + HANDLE_PMU_CASE(24); + HANDLE_PMU_CASE(23); + HANDLE_PMU_CASE(22); + HANDLE_PMU_CASE(21); + HANDLE_PMU_CASE(20); + HANDLE_PMU_CASE(19); + HANDLE_PMU_CASE(18); + HANDLE_PMU_CASE(17); + HANDLE_PMU_CASE(16); + HANDLE_PMU_CASE(15); + HANDLE_PMU_CASE(14); + HANDLE_PMU_CASE(13); + HANDLE_PMU_CASE(12); + HANDLE_PMU_CASE(11); + HANDLE_PMU_CASE(10); + HANDLE_PMU_CASE( 9); + HANDLE_PMU_CASE( 8); + HANDLE_PMU_CASE( 7); + HANDLE_PMU_CASE( 6); + HANDLE_PMU_CASE( 5); + HANDLE_PMU_CASE( 4); + HANDLE_PMU_CASE( 3); + HANDLE_PMU_CASE( 2); + HANDLE_PMU_CASE( 1); + HANDLE_PMU_CASE( 0); + + #undef HANDLE_PMU_CASE + case 0: + default: + break; + } + + cpu::SetPmUserEnrEl0 (this->pmuserenr_el0); + cpu::SetPmSelrEl0 (this->pmselr_el0); + cpu::SetPmcCfiltrEl0 (this->pmccfiltr_el0); + cpu::SetPmCntEnSetEl0(this->pmcntenset_el0); + cpu::SetPmIntEnSetEl1(this->pmintenset_el1); + cpu::SetPmOvsSetEl0 (this->pmovsset_el0); + cpu::SetPmcCntrEl0 (this->pmccntr_el0); + cpu::EnsureInstructionConsistency(); + + cpu::SetPmcrEl0(this->pmcr_el0); + cpu::EnsureInstructionConsistency(); + + /* Restore system registers. */ + cpu::SetTtbr0El1 (this->ttbr0_el1); + cpu::SetTcrEl1 (this->tcr_el1); + cpu::SetTpidrEl0 (this->tpidr_el0); + cpu::SetElrEl1 (this->elr_el1); + cpu::SetSpEl0 (this->sp_el0); + cpu::SetSpsrEl1 (this->spsr_el1); + cpu::SetDaif (this->daif); + cpu::SetCpacrEl1 (this->cpacr_el1); + cpu::SetVbarEl1 (this->vbar_el1); + cpu::SetCsselrEl1 (this->csselr_el1); + cpu::SetCntpCtlEl0 (this->cntp_ctl_el0); + cpu::SetCntpCvalEl0(this->cntp_cval_el0); + cpu::SetCntkCtlEl1 (this->cntkctl_el1); + cpu::SetTpidrRoEl0 (this->tpidrro_el0); + cpu::EnsureInstructionConsistency(); + + /* Invalidate the entire tlb. */ + cpu::InvalidateEntireTlb(); + } } @@ -95,8 +449,8 @@ namespace ams::kern::board::nintendo::nx { void KSleepManager::SleepSystem() { /* Ensure device mappings are not modified during sleep. */ - MESOSPHERE_TODO("KDevicePageTable::Lock();"); - ON_SCOPE_EXIT { MESOSPHERE_TODO("KDevicePageTable::Unlock();"); }; + KDevicePageTable::Lock(); + ON_SCOPE_EXIT { KDevicePageTable::Unlock(); }; /* Request that the system sleep. */ { @@ -107,7 +461,7 @@ namespace ams::kern::board::nintendo::nx { KScopedLightLock lk(g_cv_lock); MESOSPHERE_ABORT_UNLESS(g_sleep_target_cores == 0); - g_sleep_target_cores = (1ul << (cpu::NumCores - 1)); + g_sleep_target_cores = (1ul << cpu::NumCores) - 1; g_cv.Broadcast(); while (g_sleep_target_cores != 0) { @@ -140,9 +494,106 @@ namespace ams::kern::board::nintendo::nx { } } - MESOSPHERE_TODO("Implement Sleep/Wake"); - (void)(g_sleep_system_registers[core_id]); - (void)(sleep_buffer_phys_addr); + /* Perform Sleep/Wake sequence. */ + { + /* Disable interrupts. */ + KScopedInterruptDisable di; + + /* Save the system registers for the current core. */ + g_sleep_system_registers[core_id].Save(); + + /* Change the translation tables to use the kernel table. */ + { + /* Get the current value of the translation control register. */ + const u64 tcr = cpu::GetTcrEl1(); + + /* Disable translation table walks on tlb miss. */ + cpu::TranslationControlRegisterAccessor(tcr).SetEpd0(true).Store(); + cpu::EnsureInstructionConsistency(); + + /* Change the translation table base (ttbr0) to use the kernel table. */ + cpu::SetTtbr0El1(Kernel::GetKernelPageTable().GetIdentityMapTtbr0(core_id)); + cpu::EnsureInstructionConsistency(); + + /* Enable translation table walks on tlb miss. */ + cpu::TranslationControlRegisterAccessor(tcr).SetEpd0(false).Store(); + cpu::EnsureInstructionConsistency(); + } + + /* Invalidate the entire tlb. */ + cpu::InvalidateEntireTlb(); + + /* Ensure that all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* If on core 0, put the device page tables to sleep. */ + if (core_id == 0) { + KDevicePageTable::Sleep(); + } + + /* Ensure that all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* Save the interrupt manager's state. */ + Kernel::GetInterruptManager().Save(core_id); + + /* Ensure that all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* Log that the core is going to sleep. */ + MESOSPHERE_LOG("Core[%d]: Going to sleep, buffer = %010lx\n", core_id, GetInteger(sleep_buffer_phys_addr)); + + /* If we're on a core other than zero, we can just invoke the sleep handler. */ + if (core_id != 0) { + CpuSleepHandler(GetInteger(sleep_buffer_phys_addr), GetInteger(resume_entry_phys_addr)); + } else { + /* Wait for all other cores to be powered off. */ + WaitOtherCpuPowerOff(); + + /* Log that we're about to enter SC7. */ + MESOSPHERE_LOG("Entering SC7\n"); + + /* Save the debug log state. */ + KDebugLog::Save(); + + /* Invoke the sleep handler. */ + CpuSleepHandler(GetInteger(sleep_buffer_phys_addr), GetInteger(resume_entry_phys_addr)); + + /* Restore the debug log state. */ + KDebugLog::Restore(); + + /* Log that we're about to exit SC7. */ + MESOSPHERE_LOG("Exiting SC7\n"); + + /* Wake up the other cores. */ + for (s32 i = 1; i < static_cast(cpu::NumCores); ++i) { + PowerOnCpu(i, resume_entry_phys_addr, GetInteger(g_sleep_buffer_phys_addrs[i])); + } + } + + /* Log that the core is waking from sleep. */ + MESOSPHERE_LOG("Core[%d]: Woke from sleep.\n", core_id); + + /* Ensure that all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* Restore the interrupt manager's state. */ + Kernel::GetInterruptManager().Restore(core_id); + + /* Ensure that all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* If on core 0, wake up the device page tables. */ + if (core_id == 0) { + KDevicePageTable::Wakeup(); + } + + /* Ensure that all cores get to this point before continuing. */ + cpu::SynchronizeAllCores(); + + /* Restore the system registers for the current core. */ + g_sleep_system_registers[core_id].Restore(); + } /* Signal request completed. */ { diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.hpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.hpp index 1cc500c4d..f39ce7b67 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.hpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.hpp @@ -23,6 +23,8 @@ namespace ams::kern::board::nintendo::nx { static void CpuSleepHandler(uintptr_t arg, uintptr_t entry); static void ResumeEntry(uintptr_t arg); + static void InvalidateDataCacheForResumeEntry(uintptr_t level); + static void ProcessRequests(uintptr_t buffer); public: static void Initialize(); diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager_asm.s b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager_asm.s index 1c4a014cd..210155730 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager_asm.s +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager_asm.s @@ -14,10 +14,328 @@ * along with this program. If not, see . */ + +/* For some reason GAS doesn't know about it, even with .cpu cortex-a57 */ +#define cpuactlr_el1 s3_1_c15_c2_0 +#define cpuectlr_el1 s3_1_c15_c2_1 + +#define LOAD_IMMEDIATE_32(reg, val) \ + mov reg, #(((val) >> 0x00) & 0xFFFF); \ + movk reg, #(((val) >> 0x10) & 0xFFFF), lsl#16 + +/* ams::kern::board::nintendo::nx::KSleepManager::CpuSleepHandler(uintptr_t arg, uintptr_t entry) */ +.section .sleep._ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm, "ax", %progbits +.global _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm +.type _ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm, %function +_ZN3ams4kern5board8nintendo2nx13KSleepManager15CpuSleepHandlerEmm: + /* Save arguments. */ + mov x17, x0 + mov x18, x1 + + /* Enable access to FPU registers. */ + mrs x1, cpacr_el1 + orr x1, x1, #0x100000 + msr cpacr_el1, x1 + dsb sy + isb + + /* Save callee-save registers. */ + stp x19, x20, [x0], #0x10 + stp x21, x22, [x0], #0x10 + stp x23, x24, [x0], #0x10 + stp x25, x26, [x0], #0x10 + stp x27, x28, [x0], #0x10 + stp x29, x30, [x0], #0x10 + + /* Save stack pointer. */ + mov x1, sp + str x1, [x0], #8 + + /* Save fpcr/fpsr. */ + mrs x1, fpcr + mrs x2, fpsr + stp w1, w2, [x0], #8 + + /* Save the floating point registers. */ + stp q0, q1, [x0], #0x20 + stp q2, q3, [x0], #0x20 + stp q4, q5, [x0], #0x20 + stp q6, q7, [x0], #0x20 + stp q8, q9, [x0], #0x20 + stp q10, q11, [x0], #0x20 + stp q12, q13, [x0], #0x20 + stp q14, q15, [x0], #0x20 + stp q16, q17, [x0], #0x20 + stp q28, q19, [x0], #0x20 + stp q20, q21, [x0], #0x20 + stp q22, q23, [x0], #0x20 + stp q24, q25, [x0], #0x20 + stp q26, q27, [x0], #0x20 + stp q28, q29, [x0], #0x20 + stp q30, q31, [x0], #0x20 + + /* Save cpuactlr/cpuectlr. */ + mrs x1, cpuectlr_el1 + mrs x2, cpuactlr_el1 + stp x1, x2, [x0], #0x10 + + /* Save ttbr0/ttbr1. */ + mrs x1, ttbr0_el1 + mrs x2, ttbr1_el1 + stp x1, x2, [x0], #0x10 + + /* Save tcr/mair. */ + mrs x1, tcr_el1 + mrs x2, mair_el1 + stp x1, x2, [x0], #0x10 + + /* Save sctlr/tpidr. */ + mrs x1, sctlr_el1 + mrs x2, tpidr_el1 + stp x1, x2, [x0], #0x10 + + /* Save the virtual resumption entrypoint. */ + adr x1, 77f + stp x1, xzr, [x0], #0x10 + + /* Get the current core id. */ + mrs x0, mpidr_el1 + and x0, x0, #0xFF + + /* If we're on core 0, suspend. */ + cbz x0, 1f + + /* Otherwise, power off. */ + LOAD_IMMEDIATE_32(x0, 0x84000002) + smc #1 +0: b 0b + +1: /* Suspend. */ + LOAD_IMMEDIATE_32(x0, 0xC4000001) + LOAD_IMMEDIATE_32(x1, 0x0201001B) + mov x2, x18 + mov x3, x17 + smc #1 +0: b 0b + /* ams::kern::board::nintendo::nx::KSleepManager::ResumeEntry(uintptr_t arg) */ -.section .text._ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm, "ax", %progbits +.section .sleep._ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm, "ax", %progbits .global _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm .type _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm, %function _ZN3ams4kern5board8nintendo2nx13KSleepManager11ResumeEntryEm: - /* TODO: Implement a real function here. */ - brk 1000 \ No newline at end of file + /* Mask interrupts. */ + msr daifset, #0xF + + /* Save the argument. */ + mov x21, x0 + + /* Check that we're at the correct exception level. */ + mrs x0, currentel + + /* Check if we're EL1. */ + cmp x0, #0x4 + b.eq 3f + + /* Check if we're EL2. */ + cmp x0, #0x8 + b.eq 2f + +1: /* We're running at EL3. */ + b 1b + +2: /* We're running at EL2. */ + b 2b + +3: /* We're running at EL1. */ + + /* Invalidate the L1 cache. */ + mov x0, #0 + bl _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm + + /* Get the current core id. */ + mrs x0, mpidr_el1 + and x0, x0, #0xFF + + /* If we're on core0, we want to invalidate the L2 cache. */ + cbnz x0, 4f + + mov x0, #1 + bl _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm + +4: /* Invalidate the L1 cache. */ + mov x0, #0 + bl _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm + + /* Invalidate the instruction cache. */ + ic ialluis + dsb sy + isb + + /* Invalidate the entire tlb. */ + tlbi vmalle1is + dsb sy + isb + + /* Switch to sp 1. */ + msr spsel, #1 + + /* Prepare to restore the saved context. */ + mov x0, x21 + + /* Enable access to FPU registers. */ + mrs x1, cpacr_el1 + orr x1, x1, #0x100000 + msr cpacr_el1, x1 + dsb sy + isb + + /* Restore callee-save registers. */ + ldp x19, x20, [x0], #0x10 + ldp x21, x22, [x0], #0x10 + ldp x23, x24, [x0], #0x10 + ldp x25, x26, [x0], #0x10 + ldp x27, x28, [x0], #0x10 + ldp x29, x30, [x0], #0x10 + + /* Restore stack pointer. */ + ldr x1, [x0], #8 + mov sp, x1 + + /* Restore fpcr/fpsr. */ + ldp w1, w2, [x0], #8 + msr fpcr, x1 + msr fpsr, x2 + + /* Restore the floating point registers. */ + ldp q0, q1, [x0], #0x20 + ldp q2, q3, [x0], #0x20 + ldp q4, q5, [x0], #0x20 + ldp q6, q7, [x0], #0x20 + ldp q8, q9, [x0], #0x20 + ldp q10, q11, [x0], #0x20 + ldp q12, q13, [x0], #0x20 + ldp q14, q15, [x0], #0x20 + ldp q16, q17, [x0], #0x20 + ldp q28, q19, [x0], #0x20 + ldp q20, q21, [x0], #0x20 + ldp q22, q23, [x0], #0x20 + ldp q24, q25, [x0], #0x20 + ldp q26, q27, [x0], #0x20 + ldp q28, q29, [x0], #0x20 + ldp q30, q31, [x0], #0x20 + + /* Restore cpuactlr/cpuectlr. */ + ldp x1, x2, [x0], #0x10 + mrs x3, cpuectlr_el1 + cmp x1, x3 +5: b.ne 5b + mrs x3, cpuactlr_el1 + cmp x2, x3 +6: b.ne 6b + + /* Restore ttbr0/ttbr1. */ + ldp x1, x2, [x0], #0x10 + msr ttbr0_el1, x1 + msr ttbr1_el1, x2 + + /* Restore tcr/mair. */ + ldp x1, x2, [x0], #0x10 + msr tcr_el1, x1 + msr mair_el1, x2 + + /* Get sctlr, tpidr, and the entrypoint. */ + ldp x1, x2, [x0], #0x10 + ldp x3, xzr, [x0], #0x10 + + /* Set the global context back into x18/tpidr. */ + msr tpidr_el1, x2 + mov x18, x2 + dsb sy + isb + + /* Restore sctlr with the wxn bit cleared. */ + bic x2, x1, #0x80000 + msr sctlr_el1, x2 + dsb sy + isb + + /* Jump to the entrypoint. */ + br x3 + +77: /* Virtual resumption entrypoint. */ + + /* Restore sctlr. */ + msr sctlr_el1, x1 + dsb sy + isb + + ret + +/* ams::kern::board::nintendo::nx::KSleepManager::InvalidateDataCacheForResumeEntry(uintptr_t level) */ +.section .sleep._ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm, "ax", %progbits +.global _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm +.type _ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm, %function +_ZN3ams4kern5board8nintendo2nx13KSleepManager33InvalidateDataCacheForResumeEntryEm: + /* const u64 level_sel_value = level << 1; */ + lsl x8, x0, #1 + + /* cpu::SetCsselrEl1(level_sel_value); */ + msr csselr_el1, x8 + + /* cpu::InstructionMemoryBarrier(); */ + isb + + /* CacheSizeIdAccessor ccsidr_el1; */ + mrs x13, ccsidr_el1 + + /* const int num_ways = ccsidr_el1.GetAssociativity(); */ + ubfx w10, w13, #3, #0xA + + /* const int line_size = ccsidr_el1.GetLineSize(); */ + and w11, w13, #7 + + /* const int num_sets = ccsidr_el1.GetNumberOfSets(); */ + ubfx w13, w13, #0xD, #0xF + + /* int way = 0; */ + mov w9, wzr + + /* const u64 set_shift = static_cast(line_size + 4); */ + add w11, w11, #4 + + /* const u64 way_shift = static_cast(__builtin_clz(num_ways)); */ + clz w12, w10 + + +0: /* do { */ + /* int set = 0; */ + mov w14, wzr + + /* const u64 way_value = (static_cast(way) << way_shift); */ + lsl w15, w9, w12 + +1: /* do { */ + + /* const u64 isw_value = (static_cast(set) << set_shift) | way_value | level_sel_value; */ + lsl w16, w14, w11 + orr w16, w16, w15 + sxtw x16, w16 + orr x16, x16, x8 + + /* __asm__ __volatile__("dc isw, %0" :: "r"(isw_value) : "memory"); */ + dc isw, x16 + + /* while (set <= num_sets); */ + cmp w13, w14 + add w14, w14, #1 + b.ne 1b + + /* while (way <= num_ways); */ + cmp w9, w10 + add w9, w9, #1 + b.ne 0b + + /* cpu::EnsureInstructionConsistency(); */ + dsb sy + isb + ret diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp index cdeb77909..ca6e0f58c 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp @@ -21,13 +21,21 @@ namespace ams::kern::board::nintendo::nx { namespace { + constexpr size_t SecureAlignment = 128_KB; + /* Global variables for panic. */ - bool g_call_smc_on_panic; + constinit bool g_call_smc_on_panic; /* Global variables for secure memory. */ - constexpr size_t SecureAppletReservedMemorySize = 4_MB; - KVirtualAddress g_secure_applet_memory_address; + constexpr size_t SecureAppletMemorySize = 4_MB; + constinit KSpinLock g_secure_applet_lock; + constinit bool g_secure_applet_memory_used = false; + constinit KVirtualAddress g_secure_applet_memory_address = Null; + constinit KSpinLock g_secure_region_lock; + constinit bool g_secure_region_used = false; + constinit KPhysicalAddress g_secure_region_phys_addr = Null; + constinit size_t g_secure_region_size = 0; /* Global variables for randomness. */ /* Nintendo uses std::mt19937_t for randomness. */ @@ -35,7 +43,7 @@ namespace ams::kern::board::nintendo::nx { /* We will use TinyMT. */ bool g_initialized_random_generator; util::TinyMT g_random_generator; - KSpinLock g_random_lock; + constinit KSpinLock g_random_lock; ALWAYS_INLINE size_t GetRealMemorySizeForInit() { /* TODO: Move this into a header for the MC in general. */ @@ -125,14 +133,181 @@ namespace ams::kern::board::nintendo::nx { return GetConfigU64(which) != 0; } + ALWAYS_INLINE bool CheckRegisterAllowedTable(const u8 *table, const size_t offset) { + return (table[(offset / sizeof(u32)) / BITSIZEOF(u8)] & (1u << ((offset / sizeof(u32)) % BITSIZEOF(u8)))) != 0; + } + + /* TODO: Generate this from a list of register names (see similar logic in exosphere)? */ + constexpr inline const u8 McKernelRegisterWhitelist[(PageSize / sizeof(u32)) / BITSIZEOF(u8)] = { + 0x9F, 0x31, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0xC0, 0x73, 0x3E, 0x6F, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0xE4, 0xFF, 0xFF, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; + + /* TODO: Generate this from a list of register names (see similar logic in exosphere)? */ + constexpr inline const u8 McUserRegisterWhitelist[(PageSize / sizeof(u32)) / BITSIZEOF(u8)] = { + 0x00, 0x00, 0x20, 0x00, 0xF0, 0xFF, 0xF7, 0x01, + 0xCD, 0xFE, 0xC0, 0xFE, 0x00, 0x00, 0x00, 0x00, + 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6E, + 0x30, 0x05, 0x06, 0xB0, 0x71, 0xC8, 0x43, 0x04, + 0x80, 0xFF, 0x08, 0x80, 0x03, 0x38, 0x8E, 0x1F, + 0xC8, 0xFF, 0xFF, 0x00, 0x0E, 0x00, 0x00, 0x00, + 0xF0, 0x1F, 0x00, 0x30, 0xF0, 0x03, 0x03, 0x30, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0C, 0x00, 0xFE, 0x0F, + 0x01, 0x00, 0x80, 0x00, 0x00, 0x08, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }; + bool IsRegisterAccessibleToPrivileged(ams::svc::PhysicalAddress address) { - if (!KMemoryLayout::GetMemoryControllerRegion().Contains(address)) { + /* Find the region for the address. */ + KMemoryRegionTree::const_iterator it = KMemoryLayout::FindContainingRegion(KPhysicalAddress(address)); + if (AMS_LIKELY(it != KMemoryLayout::GetPhysicalMemoryRegionTree().end())) { + if (AMS_LIKELY(it->IsDerivedFrom(KMemoryRegionAttr_NoUserMap | KMemoryRegionType_MemoryController))) { + /* Get the offset within the region. */ + const size_t offset = address - it->GetAddress(); + MESOSPHERE_ABORT_UNLESS(offset < it->GetSize()); + + /* Check the whitelist. */ + if (AMS_LIKELY(CheckRegisterAllowedTable(McKernelRegisterWhitelist, offset))) { + return true; + } + } + } + + return false; + } + + bool IsRegisterAccessibleToUser(ams::svc::PhysicalAddress address) { + /* Find the region for the address. */ + KMemoryRegionTree::const_iterator it = KMemoryLayout::FindContainingRegion(KPhysicalAddress(address)); + if (AMS_LIKELY(it != KMemoryLayout::GetPhysicalMemoryRegionTree().end())) { + /* The PMC is always allowed. */ + if (it->IsDerivedFrom(KMemoryRegionAttr_NoUserMap | KMemoryRegionType_PowerManagementController)) { + return true; + } + + /* Memory controller is allowed if the register is whitelisted. */ + if (it->IsDerivedFrom(KMemoryRegionAttr_NoUserMap | KMemoryRegionType_MemoryController ) || + it->IsDerivedFrom(KMemoryRegionAttr_NoUserMap | KMemoryRegionType_MemoryController0) || + it->IsDerivedFrom(KMemoryRegionAttr_NoUserMap | KMemoryRegionType_MemoryController1)) + { + /* Get the offset within the region. */ + const size_t offset = address - it->GetAddress(); + MESOSPHERE_ABORT_UNLESS(offset < it->GetSize()); + + /* Check the whitelist. */ + if (AMS_LIKELY(CheckRegisterAllowedTable(McUserRegisterWhitelist, offset))) { + return true; + } + } + } + + return false; + } + + bool SetSecureRegion(KPhysicalAddress phys_addr, size_t size) { + /* Ensure address and size are aligned. */ + if (!util::IsAligned(GetInteger(phys_addr), SecureAlignment)) { return false; } - /* TODO: Validate specific offsets. */ + if (!util::IsAligned(size, SecureAlignment)) { + return false; + } + + /* Disable interrupts and acquire the secure region lock. */ + KScopedInterruptDisable di; + KScopedSpinLock lk(g_secure_region_lock); + + /* If size is non-zero, we're allocating the secure region. Otherwise, we're freeing it. */ + if (size != 0) { + /* Verify that the secure region is free. */ + if (g_secure_region_used) { + return false; + } + + /* Set the secure region. */ + g_secure_region_used = true; + g_secure_region_phys_addr = phys_addr; + g_secure_region_size = size; + } else { + /* Verify that the secure region is in use. */ + if (!g_secure_region_used) { + return false; + } + + /* Verify that the address being freed is the secure region. */ + if (phys_addr != g_secure_region_phys_addr) { + return false; + } + + /* Clear the secure region. */ + g_secure_region_used = false; + g_secure_region_phys_addr = Null; + g_secure_region_size = 0; + } + + /* Configure the carveout with the secure monitor. */ + smc::ConfigureCarveout(1, GetInteger(phys_addr), size); + return true; } + Result AllocateSecureMemoryForApplet(KVirtualAddress *out, size_t size) { + /* Verify that the size is valid. */ + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size <= SecureAppletMemorySize, svc::ResultOutOfMemory()); + + /* Disable interrupts and acquire the secure applet lock. */ + KScopedInterruptDisable di; + KScopedSpinLock lk(g_secure_applet_lock); + + /* Check that memory is reserved for secure applet use. */ + MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address != Null); + + /* Verify that the secure applet memory isn't already being used. */ + R_UNLESS(!g_secure_applet_memory_used, svc::ResultOutOfMemory()); + + /* Return the secure applet memory. */ + g_secure_applet_memory_used = true; + *out = g_secure_applet_memory_address; + + return ResultSuccess(); + } + + void FreeSecureMemoryForApplet(KVirtualAddress address, size_t size) { + /* Disable interrupts and acquire the secure applet lock. */ + KScopedInterruptDisable di; + KScopedSpinLock lk(g_secure_applet_lock); + + /* Verify that the memory being freed is correct. */ + MESOSPHERE_ABORT_UNLESS(address == g_secure_applet_memory_address); + MESOSPHERE_ABORT_UNLESS(size <= SecureAppletMemorySize); + MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize)); + MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_used); + + /* Release the secure applet memory. */ + g_secure_applet_memory_used = false; + } + } /* Initialization. */ @@ -280,10 +455,10 @@ namespace ams::kern::board::nintendo::nx { /* Reserve secure applet memory. */ { MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address == Null); - MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, SecureAppletReservedMemorySize)); + MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, SecureAppletMemorySize)); constexpr auto SecureAppletAllocateOption = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront); - g_secure_applet_memory_address = Kernel::GetMemoryManager().AllocateContinuous(SecureAppletReservedMemorySize / PageSize, 1, SecureAppletAllocateOption); + g_secure_applet_memory_address = Kernel::GetMemoryManager().AllocateContinuous(SecureAppletMemorySize / PageSize, 1, SecureAppletAllocateOption); MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address != Null); } } @@ -299,8 +474,11 @@ namespace ams::kern::board::nintendo::nx { MESOSPHERE_ABORT_UNLESS(smc::ReadWriteRegister(out, address, mask, value)); } - void KSystemControl::ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) { - MESOSPHERE_UNIMPLEMENTED(); + Result KSystemControl::ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) { + R_UNLESS(AMS_LIKELY(util::IsAligned(address, sizeof(u32))), svc::ResultInvalidAddress()); + R_UNLESS(AMS_LIKELY(IsRegisterAccessibleToUser(address)), svc::ResultInvalidAddress()); + R_UNLESS(AMS_LIKELY(smc::ReadWriteRegister(out, address, mask, value)), svc::ResultInvalidAddress()); + return ResultSuccess(); } /* Randomness. */ @@ -337,6 +515,8 @@ namespace ams::kern::board::nintendo::nx { /* Display a panic screen via secure monitor. */ smc::Panic(0xF00); } + u32 dummy; + smc::init::ReadWriteRegister(std::addressof(dummy), 0x7000E400, 0x10, 0x10); while (true) { /* ... */ } } @@ -345,8 +525,6 @@ namespace ams::kern::board::nintendo::nx { /* Get the function id for the current call. */ u64 function_id = args->r[0]; - MESOSPHERE_LOG("CallSecureMonitor(%lx, %lx, %lx, %lx, %lx, %lx, %lx, %lx);\n", args->r[0], args->r[1], args->r[2], args->r[3], args->r[4], args->r[5], args->r[6], args->r[7]); - /* We'll need to map in pages if arguments are pointers. Prepare page groups to do so. */ auto &page_table = GetCurrentProcess().GetPageTable(); auto *bim = page_table.GetBlockInfoManager(); @@ -369,10 +547,8 @@ namespace ams::kern::board::nintendo::nx { KPhysicalAddress phys_addr = page_table.GetHeapPhysicalAddress(it->GetAddress()); args->r[reg_id] = GetInteger(phys_addr) | (GetInteger(virt_addr) & (PageSize - 1)); - MESOSPHERE_LOG("Mapped arg %zu\n", reg_id); } else { /* If we couldn't map, we should clear the address. */ - MESOSPHERE_LOG("Failed to map arg %zu\n", reg_id); args->r[reg_id] = 0; } } @@ -381,12 +557,86 @@ namespace ams::kern::board::nintendo::nx { /* Invoke the secure monitor. */ smc::CallSecureMonitorFromUser(args); - MESOSPHERE_LOG("Secure Monitor Returned: (%lx, %lx, %lx, %lx, %lx, %lx, %lx, %lx);\n", args->r[0], args->r[1], args->r[2], args->r[3], args->r[4], args->r[5], args->r[6], args->r[7]); - /* Make sure that we close any pages that we opened. */ for (size_t i = 0; i < MaxMappedRegisters; i++) { page_groups[i].Close(); } } + /* Secure Memory. */ + size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) { + if (pool == KMemoryManager::Pool_Applet) { + return 0; + } + return size; + } + + Result KSystemControl::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) { + /* Applet secure memory is handled separately. */ + if (pool == KMemoryManager::Pool_Applet) { + return AllocateSecureMemoryForApplet(out, size); + } + + /* Ensure the size is aligned. */ + const size_t alignment = (pool == KMemoryManager::Pool_System ? PageSize : SecureAlignment); + R_UNLESS(util::IsAligned(size, alignment), svc::ResultInvalidSize()); + + /* Allocate the memory. */ + const size_t num_pages = size / PageSize; + const KVirtualAddress vaddr = Kernel::GetMemoryManager().AllocateContinuous(num_pages, alignment / PageSize, KMemoryManager::EncodeOption(static_cast(pool), KMemoryManager::Direction_FromFront)); + R_UNLESS(vaddr != Null, svc::ResultOutOfMemory()); + + /* Open a reference to the memory. */ + Kernel::GetMemoryManager().Open(vaddr, num_pages); + + /* Ensure we don't leak references to the memory on error. */ + auto mem_guard = SCOPE_GUARD { Kernel::GetMemoryManager().Close(vaddr, num_pages); }; + + /* If the memory isn't already secure, set it as secure. */ + if (pool != KMemoryManager::Pool_System) { + /* Get the physical address. */ + const KPhysicalAddress paddr = KPageTable::GetHeapPhysicalAddress(vaddr); + MESOSPHERE_ABORT_UNLESS(paddr != Null); + + /* Set the secure region. */ + R_UNLESS(SetSecureRegion(paddr, size), svc::ResultOutOfMemory()); + } + + /* We succeeded. */ + mem_guard.Cancel(); + *out = vaddr; + return ResultSuccess(); + } + + void KSystemControl::FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool) { + /* Applet secure memory is handled separately. */ + if (pool == KMemoryManager::Pool_Applet) { + return FreeSecureMemoryForApplet(address, size); + } + + /* Ensure the size is aligned. */ + const size_t alignment = (pool == KMemoryManager::Pool_System ? PageSize : SecureAlignment); + MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), alignment)); + MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, alignment)); + + /* If the memory isn't secure system, reset the secure region. */ + if (pool != KMemoryManager::Pool_System) { + /* Check that the size being freed is the current secure region size. */ + MESOSPHERE_ABORT_UNLESS(g_secure_region_size == size); + + /* Get the physical address. */ + const KPhysicalAddress paddr = KPageTable::GetHeapPhysicalAddress(address); + MESOSPHERE_ABORT_UNLESS(paddr != Null); + + /* Check that the memory being freed is the current secure region. */ + MESOSPHERE_ABORT_UNLESS(paddr == g_secure_region_phys_addr); + + /* Free the secure region. */ + MESOSPHERE_ABORT_UNLESS(SetSecureRegion(paddr, 0)); + } + + /* Close the secure region's pages. */ + Kernel::GetMemoryManager().Close(address, size / PageSize); + } + } \ No newline at end of file diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.cpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.cpp index 3ae375069..9405d8a67 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.cpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.cpp @@ -201,6 +201,12 @@ namespace ams::kern::board::nintendo::nx::smc { MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success)); } + void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { + SecureMonitorArguments args = { FunctionId_CpuOn, core_id, static_cast(entrypoint), static_cast(arg) }; + CallPrivilegedSecureMonitorFunction(args); + MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success)); + } + void GenerateRandomBytes(void *dst, size_t size) { /* Setup for call. */ SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size }; diff --git a/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp index 72abe5dee..fe3e7634f 100644 --- a/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp +++ b/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.hpp @@ -89,6 +89,8 @@ namespace ams::kern::board::nintendo::nx::smc { bool ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value); void ConfigureCarveout(size_t which, uintptr_t address, size_t size); + void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg); + void NORETURN Panic(u32 color); void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args); diff --git a/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp b/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp index 65451a9b6..5737afea7 100644 --- a/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp +++ b/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp @@ -127,10 +127,13 @@ namespace ams::kern::init { size += util::AlignUp(sizeof(NAME) * (COUNT), alignof(void *)); \ }); - /* NOTE: This can't be used right now because we don't have all these types implemented. */ - /* Once we do, uncomment the following and stop using the hardcoded size. */ - /* TODO: FOREACH_SLAB_TYPE(ADD_SLAB_SIZE) */ - size = 0x647000; + /* Add the size required for each slab. */ + FOREACH_SLAB_TYPE(ADD_SLAB_SIZE) + + #undef ADD_SLAB_SIZE + + /* Add the reserved size. */ + size += SlabRegionReservedSize; return size; } diff --git a/libraries/libmesosphere/source/kern_debug_log.cpp b/libraries/libmesosphere/source/kern_debug_log.cpp index 45a53f534..6288f1df2 100644 --- a/libraries/libmesosphere/source/kern_debug_log.cpp +++ b/libraries/libmesosphere/source/kern_debug_log.cpp @@ -404,18 +404,52 @@ namespace ams::kern { char g_print_buffer[0x400]; void PutString(const char *str) { - if (g_initialized_impl) { - while (*str) { - const char c = *(str++); - if (c == '\n') { - KDebugLogImpl::PutChar('\r'); - } - KDebugLogImpl::PutChar(c); - } - KDebugLogImpl::Flush(); + /* Only print if the implementation is initialized. */ + if (!g_initialized_impl) { + return; } + + while (*str) { + /* Get a character. */ + const char c = *(str++); + + /* Print the character. */ + if (c == '\n') { + KDebugLogImpl::PutChar('\r'); + } + KDebugLogImpl::PutChar(c); + } + + KDebugLogImpl::Flush(); } + #if defined(MESOSPHERE_ENABLE_DEBUG_PRINT) + + Result PutUserString(ams::kern::svc::KUserPointer user_str, size_t len) { + /* Only print if the implementation is initialized. */ + if (!g_initialized_impl) { + return ResultSuccess(); + } + + for (size_t i = 0; i < len; ++i) { + /* Get a character. */ + char c; + R_TRY(user_str.CopyArrayElementTo(std::addressof(c), i)); + + /* Print the character. */ + if (c == '\n') { + KDebugLogImpl::PutChar('\r'); + } + KDebugLogImpl::PutChar(c); + } + + KDebugLogImpl::Flush(); + + return ResultSuccess(); + } + + #endif + } @@ -455,4 +489,36 @@ namespace ams::kern { VSNPrintfImpl(dst, dst_size, format, vl); } + Result KDebugLog::PrintUserString(ams::kern::svc::KUserPointer user_str, size_t len) { + /* If printing is enabled, print the user string. */ + #if defined(MESOSPHERE_ENABLE_DEBUG_PRINT) + if (KTargetSystem::IsDebugLoggingEnabled()) { + KScopedInterruptDisable di; + KScopedSpinLock lk(g_debug_log_lock); + + R_TRY(PutUserString(user_str, len)); + } + #endif + + return ResultSuccess(); + } + + void KDebugLog::Save() { + if (KTargetSystem::IsDebugLoggingEnabled()) { + KScopedInterruptDisable di; + KScopedSpinLock lk(g_debug_log_lock); + + KDebugLogImpl::Save(); + } + } + + void KDebugLog::Restore() { + if (KTargetSystem::IsDebugLoggingEnabled()) { + KScopedInterruptDisable di; + KScopedSpinLock lk(g_debug_log_lock); + + KDebugLogImpl::Restore(); + } + } + } diff --git a/libraries/libmesosphere/source/kern_debug_log_impl.board.nintendo_nx.cpp b/libraries/libmesosphere/source/kern_debug_log_impl.board.nintendo_nx.cpp index e90cbc70f..dc90e1c9e 100644 --- a/libraries/libmesosphere/source/kern_debug_log_impl.board.nintendo_nx.cpp +++ b/libraries/libmesosphere/source/kern_debug_log_impl.board.nintendo_nx.cpp @@ -28,7 +28,7 @@ namespace ams::kern { UartRegister_LSR = 5, - UartRegister_IRSA_CSR = 8, + UartRegister_IRDA_CSR = 8, UartRegister_DLL = 0, UartRegister_DLH = 1, @@ -36,6 +36,8 @@ namespace ams::kern { KVirtualAddress g_uart_address = 0; + constinit u32 g_saved_registers[5]; + NOINLINE u32 ReadUartRegister(UartRegister which) { return GetPointer(g_uart_address)[which]; } @@ -75,7 +77,7 @@ namespace ams::kern { /* Configure the FIFO to be enabled and clear receive. */ WriteUartRegister(UartRegister_FCR, 0x03); - WriteUartRegister(UartRegister_IRSA_CSR, 0x02); + WriteUartRegister(UartRegister_IRDA_CSR, 0x02); ReadUartRegister(UartRegister_FCR); return true; @@ -84,7 +86,7 @@ namespace ams::kern { void KDebugLogImpl::PutChar(char c) { while (ReadUartRegister(UartRegister_LSR) & 0x100) { /* While the FIFO is full, yield. */ - __asm__ __volatile__("yield" ::: "memory"); + cpu::Yield(); } WriteUartRegister(UartRegister_THR, c); cpu::DataSynchronizationBarrier(); @@ -96,4 +98,44 @@ namespace ams::kern { } } + void KDebugLogImpl::Save() { + /* Save LCR, IER, FCR. */ + g_saved_registers[0] = ReadUartRegister(UartRegister_LCR); + g_saved_registers[1] = ReadUartRegister(UartRegister_IER); + g_saved_registers[2] = ReadUartRegister(UartRegister_FCR); + + /* Set Divisor Latch Access bit, to allow access to DLL/DLH */ + WriteUartRegister(UartRegister_LCR, 0x80); + ReadUartRegister(UartRegister_LCR); + + /* Save DLL/DLH. */ + g_saved_registers[3] = ReadUartRegister(UartRegister_DLL); + g_saved_registers[4] = ReadUartRegister(UartRegister_DLH); + + /* Restore Divisor Latch Access bit. */ + WriteUartRegister(UartRegister_LCR, g_saved_registers[0]); + ReadUartRegister(UartRegister_LCR); + } + + void KDebugLogImpl::Restore() { + /* Set Divisor Latch Access bit, to allow access to DLL/DLH */ + WriteUartRegister(UartRegister_LCR, 0x80); + ReadUartRegister(UartRegister_LCR); + + /* Restore DLL/DLH. */ + WriteUartRegister(UartRegister_DLL, g_saved_registers[3]); + WriteUartRegister(UartRegister_DLH, g_saved_registers[4]); + ReadUartRegister(UartRegister_DLH); + + /* Restore Divisor Latch Access bit. */ + WriteUartRegister(UartRegister_LCR, g_saved_registers[0]); + ReadUartRegister(UartRegister_LCR); + + /* Restore IER and FCR. */ + WriteUartRegister(UartRegister_IER, g_saved_registers[1]); + WriteUartRegister(UartRegister_FCR, g_saved_registers[2] | 2); + WriteUartRegister(UartRegister_IRDA_CSR, 0x02); + ReadUartRegister(UartRegister_FCR); + } + } diff --git a/libraries/libmesosphere/source/kern_debug_log_impl.hpp b/libraries/libmesosphere/source/kern_debug_log_impl.hpp index 1a278a03c..b2b7aa049 100644 --- a/libraries/libmesosphere/source/kern_debug_log_impl.hpp +++ b/libraries/libmesosphere/source/kern_debug_log_impl.hpp @@ -23,6 +23,10 @@ namespace ams::kern { static NOINLINE bool Initialize(); static NOINLINE void PutChar(char c); static NOINLINE void Flush(); + + /* Functionality for preserving across sleep. */ + static NOINLINE void Save(); + static NOINLINE void Restore(); }; } diff --git a/libraries/libmesosphere/source/kern_k_address_arbiter.cpp b/libraries/libmesosphere/source/kern_k_address_arbiter.cpp new file mode 100644 index 000000000..dfcd40883 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_address_arbiter.cpp @@ -0,0 +1,292 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + ALWAYS_INLINE bool ReadFromUser(s32 *out, KProcessAddress address) { + return UserspaceAccess::CopyMemoryFromUserSize32Bit(out, GetVoidPointer(address)); + } + + ALWAYS_INLINE bool DecrementIfLessThan(s32 *out, KProcessAddress address, s32 value) { + KScopedInterruptDisable di; + + if (!cpu::CanAccessAtomic(address)) { + return false; + } + + return UserspaceAccess::DecrementIfLessThanAtomic(out, GetPointer(address), value); + } + + ALWAYS_INLINE bool UpdateIfEqual(s32 *out, KProcessAddress address, s32 value, s32 new_value) { + KScopedInterruptDisable di; + + if (!cpu::CanAccessAtomic(address)) { + return false; + } + + return UserspaceAccess::UpdateIfEqualAtomic(out, GetPointer(address), value, new_value); + } + + } + + Result KAddressArbiter::Signal(uintptr_t addr, s32 count) { + /* Perform signaling. */ + s32 num_waiters = 0; + { + KScopedSchedulerLock sl; + g_cv_arbiter_compare_thread.SetupForAddressArbiterCompare(addr, -1); + + auto it = this->tree.nfind(g_cv_arbiter_compare_thread); + while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { + KThread *target_thread = std::addressof(*it); + target_thread->SetSyncedObject(nullptr, ResultSuccess()); + + AMS_ASSERT(target_thread->IsWaitingForAddressArbiter()); + target_thread->Wakeup(); + + it = this->tree.erase(it); + target_thread->ClearAddressArbiter(); + ++num_waiters; + } + } + return ResultSuccess(); + } + + Result KAddressArbiter::SignalAndIncrementIfEqual(uintptr_t addr, s32 value, s32 count) { + /* Perform signaling. */ + s32 num_waiters = 0; + { + KScopedSchedulerLock sl; + g_cv_arbiter_compare_thread.SetupForAddressArbiterCompare(addr, -1); + + auto it = this->tree.nfind(g_cv_arbiter_compare_thread); + + /* Check the userspace value. */ + s32 user_value; + R_UNLESS(UpdateIfEqual(std::addressof(user_value), addr, value, value + 1), svc::ResultInvalidCurrentMemory()); + R_UNLESS(user_value == value, svc::ResultInvalidState()); + + while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { + KThread *target_thread = std::addressof(*it); + target_thread->SetSyncedObject(nullptr, ResultSuccess()); + + AMS_ASSERT(target_thread->IsWaitingForAddressArbiter()); + target_thread->Wakeup(); + + it = this->tree.erase(it); + target_thread->ClearAddressArbiter(); + ++num_waiters; + } + } + return ResultSuccess(); + } + + Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(uintptr_t addr, s32 value, s32 count) { + /* Perform signaling. */ + s32 num_waiters = 0; + { + KScopedSchedulerLock sl; + g_cv_arbiter_compare_thread.SetupForAddressArbiterCompare(addr, -1); + + auto it = this->tree.nfind(g_cv_arbiter_compare_thread); + + /* Determine the updated value. */ + s32 new_value; + if (count <= 0) { + if ((it != this->tree.end()) && (it->GetAddressArbiterKey() == addr)) { + new_value = value - 1; + } else { + new_value = value + 1; + } + } else { + auto tmp_it = it; + int tmp_num_waiters = 0; + while ((tmp_it != this->tree.end()) && (tmp_it->GetAddressArbiterKey() == addr) && (tmp_num_waiters < count + 1)) { + ++tmp_num_waiters; + ++tmp_it; + } + + if (tmp_num_waiters == 0) { + new_value = value + 1; + } else if (tmp_num_waiters <= count) { + new_value = value - 1; + } else { + new_value = value; + } + } + + /* Check the userspace value. */ + s32 user_value; + bool succeeded; + if (value != new_value) { + succeeded = UpdateIfEqual(std::addressof(user_value), addr, value, new_value); + } else { + succeeded = ReadFromUser(std::addressof(user_value), addr); + } + + R_UNLESS(succeeded, svc::ResultInvalidCurrentMemory()); + R_UNLESS(user_value == value, svc::ResultInvalidState()); + + while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) { + KThread *target_thread = std::addressof(*it); + target_thread->SetSyncedObject(nullptr, ResultSuccess()); + + AMS_ASSERT(target_thread->IsWaitingForAddressArbiter()); + target_thread->Wakeup(); + + it = this->tree.erase(it); + target_thread->ClearAddressArbiter(); + ++num_waiters; + } + } + return ResultSuccess(); + } + + Result KAddressArbiter::WaitIfLessThan(uintptr_t addr, s32 value, bool decrement, s64 timeout) { + /* Prepare to wait. */ + KThread *cur_thread = GetCurrentThreadPointer(); + KHardwareTimer *timer; + + { + KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout); + + /* Check that the thread isn't terminating. */ + if (cur_thread->IsTerminationRequested()) { + slp.CancelSleep(); + return svc::ResultTerminationRequested(); + } + + /* Set the synced object. */ + cur_thread->SetSyncedObject(nullptr, ams::svc::ResultTimedOut()); + + /* Read the value from userspace. */ + s32 user_value; + bool succeeded; + if (decrement) { + succeeded = DecrementIfLessThan(std::addressof(user_value), addr, value); + } else { + succeeded = ReadFromUser(std::addressof(user_value), addr); + } + + if (!succeeded) { + slp.CancelSleep(); + return svc::ResultInvalidCurrentMemory(); + } + + /* Check that the value is less than the specified one. */ + if (user_value >= value) { + slp.CancelSleep(); + return svc::ResultInvalidState(); + } + + /* Check that the timeout is non-zero. */ + if (timeout == 0) { + slp.CancelSleep(); + return svc::ResultTimedOut(); + } + + /* Set the arbiter. */ + cur_thread->SetAddressArbiter(std::addressof(this->tree), addr); + this->tree.insert(*cur_thread); + cur_thread->SetState(KThread::ThreadState_Waiting); + } + + /* Cancel the timer wait. */ + if (timer != nullptr) { + timer->CancelTask(cur_thread); + } + + /* Remove from the address arbiter. */ + { + KScopedSchedulerLock sl; + + if (cur_thread->IsWaitingForAddressArbiter()) { + this->tree.erase(this->tree.iterator_to(*cur_thread)); + cur_thread->ClearAddressArbiter(); + } + } + + /* Get the result. */ + KSynchronizationObject *dummy; + return cur_thread->GetWaitResult(std::addressof(dummy)); + } + + Result KAddressArbiter::WaitIfEqual(uintptr_t addr, s32 value, s64 timeout) { + /* Prepare to wait. */ + KThread *cur_thread = GetCurrentThreadPointer(); + KHardwareTimer *timer; + + { + KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout); + + /* Check that the thread isn't terminating. */ + if (cur_thread->IsTerminationRequested()) { + slp.CancelSleep(); + return svc::ResultTerminationRequested(); + } + + /* Set the synced object. */ + cur_thread->SetSyncedObject(nullptr, ams::svc::ResultTimedOut()); + + /* Read the value from userspace. */ + s32 user_value; + if (!ReadFromUser(std::addressof(user_value), addr)) { + slp.CancelSleep(); + return svc::ResultInvalidCurrentMemory(); + } + + /* Check that the value is equal. */ + if (value != user_value) { + slp.CancelSleep(); + return svc::ResultInvalidState(); + } + + /* Check that the timeout is non-zero. */ + if (timeout == 0) { + slp.CancelSleep(); + return svc::ResultTimedOut(); + } + + /* Set the arbiter. */ + cur_thread->SetAddressArbiter(std::addressof(this->tree), addr); + this->tree.insert(*cur_thread); + cur_thread->SetState(KThread::ThreadState_Waiting); + } + + /* Cancel the timer wait. */ + if (timer != nullptr) { + timer->CancelTask(cur_thread); + } + + /* Remove from the address arbiter. */ + { + KScopedSchedulerLock sl; + + if (cur_thread->IsWaitingForAddressArbiter()) { + this->tree.erase(this->tree.iterator_to(*cur_thread)); + cur_thread->ClearAddressArbiter(); + } + } + + /* Get the result. */ + KSynchronizationObject *dummy; + return cur_thread->GetWaitResult(std::addressof(dummy)); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_address_space_info.cpp b/libraries/libmesosphere/source/kern_k_address_space_info.cpp index 1d81e14d7..c8cce5841 100644 --- a/libraries/libmesosphere/source/kern_k_address_space_info.cpp +++ b/libraries/libmesosphere/source/kern_k_address_space_info.cpp @@ -22,16 +22,16 @@ namespace ams::kern { constexpr uintptr_t Invalid = std::numeric_limits::max(); constexpr KAddressSpaceInfo AddressSpaceInfos[] = { - { .bit_width = 32, .address = 2_MB, .size = 1_GB - 2_MB, .type = KAddressSpaceInfo::Type_32Bit, }, - { .bit_width = 32, .address = 1_GB, .size = 4_GB - 1_GB, .type = KAddressSpaceInfo::Type_Small64Bit, }, + { .bit_width = 32, .address = 2_MB, .size = 1_GB - 2_MB, .type = KAddressSpaceInfo::Type_MapSmall, }, + { .bit_width = 32, .address = 1_GB, .size = 4_GB - 1_GB, .type = KAddressSpaceInfo::Type_MapLarge, }, { .bit_width = 32, .address = Invalid, .size = 1_GB, .type = KAddressSpaceInfo::Type_Heap, }, { .bit_width = 32, .address = Invalid, .size = 1_GB, .type = KAddressSpaceInfo::Type_Alias, }, - { .bit_width = 36, .address = 128_MB, .size = 2_GB - 128_MB, .type = KAddressSpaceInfo::Type_32Bit, }, - { .bit_width = 36, .address = 2_GB, .size = 64_GB - 2_GB, .type = KAddressSpaceInfo::Type_Small64Bit, }, + { .bit_width = 36, .address = 128_MB, .size = 2_GB - 128_MB, .type = KAddressSpaceInfo::Type_MapSmall, }, + { .bit_width = 36, .address = 2_GB, .size = 64_GB - 2_GB, .type = KAddressSpaceInfo::Type_MapLarge, }, { .bit_width = 36, .address = Invalid, .size = 6_GB, .type = KAddressSpaceInfo::Type_Heap, }, { .bit_width = 36, .address = Invalid, .size = 6_GB, .type = KAddressSpaceInfo::Type_Alias, }, - { .bit_width = 39, .address = 128_MB, .size = 512_GB - 128_MB, .type = KAddressSpaceInfo::Type_Large64Bit, }, - { .bit_width = 39, .address = Invalid, .size = 64_GB, .type = KAddressSpaceInfo::Type_32Bit, }, + { .bit_width = 39, .address = 128_MB, .size = 512_GB - 128_MB, .type = KAddressSpaceInfo::Type_Map39Bit, }, + { .bit_width = 39, .address = Invalid, .size = 64_GB, .type = KAddressSpaceInfo::Type_MapSmall, }, { .bit_width = 39, .address = Invalid, .size = 6_GB, .type = KAddressSpaceInfo::Type_Heap, }, { .bit_width = 39, .address = Invalid, .size = 64_GB, .type = KAddressSpaceInfo::Type_Alias, }, { .bit_width = 39, .address = Invalid, .size = 2_GB, .type = KAddressSpaceInfo::Type_Stack, }, @@ -54,15 +54,15 @@ namespace ams::kern { }; constexpr bool IsAllowed32BitType(KAddressSpaceInfo::Type type) { - return type < KAddressSpaceInfo::Type_Count && type != KAddressSpaceInfo::Type_Large64Bit && type != KAddressSpaceInfo::Type_Stack; + return type < KAddressSpaceInfo::Type_Count && type != KAddressSpaceInfo::Type_Map39Bit && type != KAddressSpaceInfo::Type_Stack; } constexpr bool IsAllowed36BitType(KAddressSpaceInfo::Type type) { - return type < KAddressSpaceInfo::Type_Count && type != KAddressSpaceInfo::Type_Large64Bit && type != KAddressSpaceInfo::Type_Stack; + return type < KAddressSpaceInfo::Type_Count && type != KAddressSpaceInfo::Type_Map39Bit && type != KAddressSpaceInfo::Type_Stack; } constexpr bool IsAllowed39BitType(KAddressSpaceInfo::Type type) { - return type < KAddressSpaceInfo::Type_Count && type != KAddressSpaceInfo::Type_Small64Bit; + return type < KAddressSpaceInfo::Type_Count && type != KAddressSpaceInfo::Type_MapLarge; } } diff --git a/libraries/libmesosphere/source/kern_k_capabilities.cpp b/libraries/libmesosphere/source/kern_k_capabilities.cpp index 871721108..e8e5d2a86 100644 --- a/libraries/libmesosphere/source/kern_k_capabilities.cpp +++ b/libraries/libmesosphere/source/kern_k_capabilities.cpp @@ -34,6 +34,14 @@ namespace ams::kern { return this->SetCapabilities(caps, num_caps, page_table); } + Result KCapabilities::Initialize(svc::KUserPointer user_caps, s32 num_caps, KProcessPageTable *page_table) { + /* We're initializing a user process. */ + /* Most fields have already been cleared by our constructor. */ + + /* Parse the user capabilities array. */ + return this->SetCapabilities(user_caps, num_caps, page_table); + } + Result KCapabilities::SetCorePriorityCapability(const util::BitPack32 cap) { /* We can't set core/priority if we've already set them. */ R_UNLESS(this->core_mask == 0, svc::ResultInvalidArgument()); @@ -164,7 +172,7 @@ namespace ams::kern { for (size_t i = 0; i < util::size(ids); i++) { if (ids[i] != PaddingInterruptId) { R_UNLESS(Kernel::GetInterruptManager().IsInterruptDefined(ids[i]), svc::ResultOutOfRange()); - R_UNLESS(this->SetInterruptAllowed(ids[i]), svc::ResultOutOfRange()); + R_UNLESS(this->SetInterruptPermitted(ids[i]), svc::ResultOutOfRange()); } } @@ -258,4 +266,35 @@ namespace ams::kern { return ResultSuccess(); } + Result KCapabilities::SetCapabilities(svc::KUserPointer user_caps, s32 num_caps, KProcessPageTable *page_table) { + u32 set_flags = 0, set_svc = 0; + + for (s32 i = 0; i < num_caps; i++) { + /* Read the cap from userspace. */ + u32 cap0; + R_TRY(user_caps.CopyArrayElementTo(std::addressof(cap0), i)); + + const util::BitPack32 cap = { cap0 }; + if (GetCapabilityType(cap) == CapabilityType::MapRange) { + /* Check that the pair cap exists. */ + R_UNLESS((++i) < num_caps, svc::ResultInvalidCombination()); + + /* Read the second cap from userspace. */ + u32 cap1; + R_TRY(user_caps.CopyArrayElementTo(std::addressof(cap1), i)); + + /* Check the pair cap is a map range cap. */ + const util::BitPack32 size_cap = { cap1 }; + R_UNLESS(GetCapabilityType(size_cap) == CapabilityType::MapRange, svc::ResultInvalidCombination()); + + /* Map the range. */ + R_TRY(this->MapRange(cap, size_cap, page_table)); + } else { + R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table)); + } + } + + return ResultSuccess(); + } + } diff --git a/libraries/libmesosphere/source/kern_k_class_token.cpp b/libraries/libmesosphere/source/kern_k_class_token.cpp index 2531d9f45..531f6412c 100644 --- a/libraries/libmesosphere/source/kern_k_class_token.cpp +++ b/libraries/libmesosphere/source/kern_k_class_token.cpp @@ -26,10 +26,10 @@ namespace ams::kern { static_assert(ClassToken == 0b00000111'00000011); static_assert(ClassToken == 0b00001011'00000001); static_assert(ClassToken == 0b00010011'00000001); - /* TODO: static_assert(ClassToken == 0b00100011'00000001); */ - /* TODO: static_assert(ClassToken == 0b01000011'00000001); */ - /* TODO: static_assert(ClassToken == 0b10000011'00000001); */ - /* TODO: static_assert(ClassToken == 0b00001101'00000000); */ + static_assert(ClassToken == 0b00100011'00000001); + static_assert(ClassToken == 0b01000011'00000001); + static_assert(ClassToken == 0b10000011'00000001); + static_assert(ClassToken == 0b00001101'00000000); static_assert(ClassToken == 0b00010101'00000001); static_assert(ClassToken == 0b00100101'00000000); static_assert(ClassToken == 0b01000101'00000000); @@ -37,9 +37,9 @@ namespace ams::kern { static_assert(ClassToken == 0b00011001'00000000); static_assert(ClassToken == 0b00101001'00000000); static_assert(ClassToken == 0b01001001'00000000); - /* TODO: static_assert(ClassToken == 0b10001001'00000000); */ - /* TODO: static_assert(ClassToken == 0b00110001'00000000); */ - /* TODO: static_assert(ClassToken == 0b01010001'00000000); */ + static_assert(ClassToken == 0b10001001'00000000); + static_assert(ClassToken == 0b00110001'00000000); + static_assert(ClassToken == 0b01010001'00000000); static_assert(ClassToken == 0b10010001'00000000); static_assert(ClassToken == 0b01100001'00000000); static_assert(ClassToken == 0b10100001'00000000); @@ -56,10 +56,10 @@ namespace ams::kern { static_assert(ClassToken == ((0b00000111 << 8) | ClassToken)); static_assert(ClassToken == ((0b00001011 << 8) | ClassToken)); static_assert(ClassToken == ((0b00010011 << 8) | ClassToken)); - /* TODO: static_assert(ClassToken == ((0b00100011 << 8) | ClassToken)); */ - /* TODO: static_assert(ClassToken == ((0b01000011 << 8) | ClassToken)); */ - /* TODO: static_assert(ClassToken == ((0b10000011 << 8) | ClassToken)); */ - /* TODO: static_assert(ClassToken == ((0b00001101 << 8) | ClassToken)); */ + static_assert(ClassToken == ((0b00100011 << 8) | ClassToken)); + static_assert(ClassToken == ((0b01000011 << 8) | ClassToken)); + static_assert(ClassToken == ((0b10000011 << 8) | ClassToken)); + static_assert(ClassToken == ((0b00001101 << 8) | ClassToken)); static_assert(ClassToken == ((0b00010101 << 8) | ClassToken)); static_assert(ClassToken == ((0b00100101 << 8) | ClassToken)); static_assert(ClassToken == ((0b01000101 << 8) | ClassToken)); @@ -67,9 +67,9 @@ namespace ams::kern { static_assert(ClassToken == ((0b00011001 << 8) | ClassToken)); static_assert(ClassToken == ((0b00101001 << 8) | ClassToken)); static_assert(ClassToken == ((0b01001001 << 8) | ClassToken)); - /* TODO: static_assert(ClassToken == ((0b10001001 << 8) | ClassToken)); */ - /* TODO: static_assert(ClassToken == ((0b00110001 << 8) | ClassToken)); */ - /* TODO: static_assert(ClassToken == ((0b01010001 << 8) | ClassToken)); */ + static_assert(ClassToken == ((0b10001001 << 8) | ClassToken)); + static_assert(ClassToken == ((0b00110001 << 8) | ClassToken)); + static_assert(ClassToken == ((0b01010001 << 8) | ClassToken)); static_assert(ClassToken == ((0b10010001 << 8) | ClassToken)); static_assert(ClassToken == ((0b01100001 << 8) | ClassToken)); static_assert(ClassToken == ((0b10100001 << 8) | ClassToken)); @@ -85,10 +85,10 @@ namespace ams::kern { static_assert(std::is_final::value && std::is_base_of::value); static_assert(std::is_final::value && std::is_base_of::value); static_assert(std::is_final::value && std::is_base_of::value); - /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ - /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ - /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ - /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); static_assert(std::is_final::value && std::is_base_of::value); static_assert(std::is_final::value && std::is_base_of::value); static_assert(std::is_final::value && std::is_base_of::value); @@ -96,9 +96,9 @@ namespace ams::kern { static_assert(std::is_final::value && std::is_base_of::value); static_assert(std::is_final::value && std::is_base_of::value); static_assert(std::is_final::value && std::is_base_of::value); - /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ - /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ - /* TODO: static_assert(std::is_final::value && std::is_base_of::value); */ + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); + static_assert(std::is_final::value && std::is_base_of::value); static_assert(std::is_final::value && std::is_base_of::value); static_assert(std::is_final::value && std::is_base_of::value); static_assert(std::is_final::value && std::is_base_of::value); diff --git a/libraries/libmesosphere/source/kern_k_client_port.cpp b/libraries/libmesosphere/source/kern_k_client_port.cpp index 10eb84764..97c023ce6 100644 --- a/libraries/libmesosphere/source/kern_k_client_port.cpp +++ b/libraries/libmesosphere/source/kern_k_client_port.cpp @@ -25,6 +25,19 @@ namespace ams::kern { this->max_sessions = max_sessions; } + void KClientPort::OnSessionFinalized() { + KScopedSchedulerLock sl; + + const auto prev = this->num_sessions--; + if (prev == this->max_sessions) { + this->NotifyAvailable(); + } + } + + void KClientPort::OnServerClosed() { + MESOSPHERE_ASSERT_THIS(); + } + bool KClientPort::IsLight() const { return this->GetParent()->IsLight(); } @@ -38,9 +51,140 @@ namespace ams::kern { } bool KClientPort::IsSignaled() const { - /* TODO: Check preconditions later. */ MESOSPHERE_ASSERT_THIS(); return this->num_sessions < this->max_sessions; } + Result KClientPort::CreateSession(KClientSession **out) { + MESOSPHERE_ASSERT_THIS(); + + /* Reserve a new session from the resource limit. */ + KScopedResourceReservation session_reservation(GetCurrentProcessPointer(), ams::svc::LimitableResource_SessionCountMax); + R_UNLESS(session_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Update the session counts. */ + { + /* Atomically increment the number of sessions. */ + s32 new_sessions; + { + const auto max = this->max_sessions; + auto cur_sessions = this->num_sessions.load(std::memory_order_acquire); + do { + R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions()); + new_sessions = cur_sessions + 1; + } while (!this->num_sessions.compare_exchange_weak(cur_sessions, new_sessions, std::memory_order_relaxed)); + + } + + /* Atomically update the peak session tracking. */ + { + auto peak = this->peak_sessions.load(std::memory_order_acquire); + do { + if (peak >= new_sessions) { + break; + } + } while (!this->peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed)); + } + } + + /* Create a new session. */ + KSession *session = KSession::Create(); + if (session == nullptr) { + /* Decrement the session count. */ + const auto prev = this->num_sessions--; + if (prev == this->max_sessions) { + this->NotifyAvailable(); + } + + return svc::ResultOutOfResource(); + } + + /* Initialize the session. */ + session->Initialize(this, this->parent->GetName()); + + /* Commit the session reservation. */ + session_reservation.Commit(); + + /* Register the session. */ + KSession::Register(session); + auto session_guard = SCOPE_GUARD { + session->GetClientSession().Close(); + session->GetServerSession().Close(); + }; + + /* Enqueue the session with our parent. */ + R_TRY(this->parent->EnqueueSession(std::addressof(session->GetServerSession()))); + + /* We succeeded, so set the output. */ + session_guard.Cancel(); + *out = std::addressof(session->GetClientSession()); + return ResultSuccess(); + } + + Result KClientPort::CreateLightSession(KLightClientSession **out) { + MESOSPHERE_ASSERT_THIS(); + + /* Reserve a new session from the resource limit. */ + KScopedResourceReservation session_reservation(GetCurrentProcessPointer(), ams::svc::LimitableResource_SessionCountMax); + R_UNLESS(session_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Update the session counts. */ + { + /* Atomically increment the number of sessions. */ + s32 new_sessions; + { + const auto max = this->max_sessions; + auto cur_sessions = this->num_sessions.load(std::memory_order_acquire); + do { + R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions()); + new_sessions = cur_sessions + 1; + } while (!this->num_sessions.compare_exchange_weak(cur_sessions, new_sessions, std::memory_order_relaxed)); + + } + + /* Atomically update the peak session tracking. */ + { + auto peak = this->peak_sessions.load(std::memory_order_acquire); + do { + if (peak >= new_sessions) { + break; + } + } while (!this->peak_sessions.compare_exchange_weak(peak, new_sessions, std::memory_order_relaxed)); + } + } + + /* Create a new session. */ + KLightSession *session = KLightSession::Create(); + if (session == nullptr) { + /* Decrement the session count. */ + const auto prev = this->num_sessions--; + if (prev == this->max_sessions) { + this->NotifyAvailable(); + } + + return svc::ResultOutOfResource(); + } + + /* Initialize the session. */ + session->Initialize(this, this->parent->GetName()); + + /* Commit the session reservation. */ + session_reservation.Commit(); + + /* Register the session. */ + KLightSession::Register(session); + auto session_guard = SCOPE_GUARD { + session->GetClientSession().Close(); + session->GetServerSession().Close(); + }; + + /* Enqueue the session with our parent. */ + R_TRY(this->parent->EnqueueSession(std::addressof(session->GetServerSession()))); + + /* We succeeded, so set the output. */ + session_guard.Cancel(); + *out = std::addressof(session->GetClientSession()); + return ResultSuccess(); + } + } diff --git a/libraries/libmesosphere/source/kern_k_client_session.cpp b/libraries/libmesosphere/source/kern_k_client_session.cpp new file mode 100644 index 000000000..7f164a5c0 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_client_session.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KClientSession::Destroy() { + MESOSPHERE_ASSERT_THIS(); + + this->parent->OnClientClosed(); + this->parent->Close(); + } + + void KClientSession::OnServerClosed() { + MESOSPHERE_ASSERT_THIS(); + } + + Result KClientSession::SendSyncRequest(uintptr_t address, size_t size) { + MESOSPHERE_ASSERT_THIS(); + + /* Create a session request. */ + KSessionRequest *request = KSessionRequest::Create(); + R_UNLESS(request != nullptr, svc::ResultOutOfResource()); + ON_SCOPE_EXIT { request->Close(); }; + + /* Initialize the request. */ + request->Initialize(nullptr, address, size); + + /* Send the request. */ + { + KScopedSchedulerLock sl; + + GetCurrentThread().SetSyncedObject(nullptr, ResultSuccess()); + + R_TRY(this->parent->OnRequest(request)); + } + + /* Get the result. */ + KSynchronizationObject *dummy; + return GetCurrentThread().GetWaitResult(std::addressof(dummy)); + } + + Result KClientSession::SendAsyncRequest(KWritableEvent *event, uintptr_t address, size_t size) { + MESOSPHERE_ASSERT_THIS(); + + /* Create a session request. */ + KSessionRequest *request = KSessionRequest::Create(); + R_UNLESS(request != nullptr, svc::ResultOutOfResource()); + ON_SCOPE_EXIT { request->Close(); }; + + /* Initialize the request. */ + request->Initialize(event, address, size); + + /* Send the request. */ + { + KScopedSchedulerLock sl; + + R_TRY(this->parent->OnRequest(request)); + } + + return ResultSuccess(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_code_memory.cpp b/libraries/libmesosphere/source/kern_k_code_memory.cpp new file mode 100644 index 000000000..57931326d --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_code_memory.cpp @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + Result KCodeMemory::Initialize(KProcessAddress addr, size_t size) { + MESOSPHERE_ASSERT_THIS(); + + /* Set members. */ + this->owner = GetCurrentProcessPointer(); + + /* Initialize the page group. */ + auto &page_table = this->owner->GetPageTable(); + new (GetPointer(this->page_group)) KPageGroup(page_table.GetBlockInfoManager()); + + /* Ensure that our page group's state is valid on exit. */ + auto pg_guard = SCOPE_GUARD { GetReference(this->page_group).~KPageGroup(); }; + + /* Lock the memory. */ + R_TRY(page_table.LockForCodeMemory(GetPointer(this->page_group), addr, size)); + + /* Clear the memory. */ + for (const auto &block : GetReference(this->page_group)) { + /* Clear and store cache. */ + std::memset(GetVoidPointer(block.GetAddress()), 0xFF, block.GetSize()); + cpu::StoreDataCache(GetVoidPointer(block.GetAddress()), block.GetSize()); + } + + /* Set remaining tracking members. */ + this->owner->Open(); + this->address = addr; + this->is_initialized = true; + this->is_owner_mapped = false; + this->is_mapped = false; + + /* We succeeded. */ + pg_guard.Cancel(); + return ResultSuccess(); + } + + void KCodeMemory::Finalize() { + MESOSPHERE_ASSERT_THIS(); + + /* Unlock. */ + if (!this->is_mapped && !this->is_owner_mapped) { + const size_t size = GetReference(this->page_group).GetNumPages() * PageSize; + MESOSPHERE_R_ABORT_UNLESS(this->owner->GetPageTable().UnlockForCodeMemory(this->address, size, GetReference(this->page_group))); + } + + /* Close the page group. */ + GetReference(this->page_group).Close(); + GetReference(this->page_group).Finalize(); + + /* Close our reference to our owner. */ + this->owner->Close(); + + /* Perform inherited finalization. */ + KAutoObjectWithSlabHeapAndContainer::Finalize(); + } + + Result KCodeMemory::Map(KProcessAddress address, size_t size) { + MESOSPHERE_ASSERT_THIS(); + + /* Validate the size. */ + R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + + /* Lock ourselves. */ + KScopedLightLock lk(this->lock); + + /* Ensure we're not already mapped. */ + R_UNLESS(!this->is_mapped, svc::ResultInvalidState()); + + /* Map the memory. */ + R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(this->page_group), KMemoryState_CodeOut, KMemoryPermission_UserReadWrite)); + + /* Mark ourselves as mapped. */ + this->is_mapped = true; + + return ResultSuccess(); + } + + Result KCodeMemory::Unmap(KProcessAddress address, size_t size) { + MESOSPHERE_ASSERT_THIS(); + + /* Validate the size. */ + R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + + /* Lock ourselves. */ + KScopedLightLock lk(this->lock); + + /* Unmap the memory. */ + R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(this->page_group), KMemoryState_CodeOut)); + + /* Mark ourselves as unmapped. */ + MESOSPHERE_ASSERT(this->is_mapped); + this->is_mapped = false; + + return ResultSuccess(); + } + + Result KCodeMemory::MapToOwner(KProcessAddress address, size_t size, ams::svc::MemoryPermission perm) { + MESOSPHERE_ASSERT_THIS(); + + /* Validate the size. */ + R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + + /* Lock ourselves. */ + KScopedLightLock lk(this->lock); + + /* Ensure we're not already mapped. */ + R_UNLESS(!this->is_owner_mapped, svc::ResultInvalidState()); + + /* Convert the memory permission. */ + KMemoryPermission k_perm; + switch (perm) { + case ams::svc::MemoryPermission_Read: k_perm = KMemoryPermission_UserRead; break; + case ams::svc::MemoryPermission_ReadExecute: k_perm = KMemoryPermission_UserReadExecute; break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + + /* Map the memory. */ + R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(this->page_group), KMemoryState_GeneratedCode, k_perm)); + + /* Mark ourselves as mapped. */ + this->is_owner_mapped = true; + + return ResultSuccess(); + } + + Result KCodeMemory::UnmapFromOwner(KProcessAddress address, size_t size) { + MESOSPHERE_ASSERT_THIS(); + + /* Validate the size. */ + R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + + /* Lock ourselves. */ + KScopedLightLock lk(this->lock); + + /* Unmap the memory. */ + R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(this->page_group), KMemoryState_GeneratedCode)); + + /* Mark ourselves as unmapped. */ + MESOSPHERE_ASSERT(this->is_owner_mapped); + this->is_owner_mapped = false; + + return ResultSuccess(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_condition_variable.cpp b/libraries/libmesosphere/source/kern_k_condition_variable.cpp new file mode 100644 index 000000000..270c3c8ac --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_condition_variable.cpp @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + constinit KThread g_cv_arbiter_compare_thread; + + namespace { + + ALWAYS_INLINE bool ReadFromUser(u32 *out, KProcessAddress address) { + return UserspaceAccess::CopyMemoryFromUserSize32Bit(out, GetVoidPointer(address)); + } + + ALWAYS_INLINE bool WriteToUser(KProcessAddress address, const u32 *p) { + return UserspaceAccess::CopyMemoryToUserSize32Bit(GetVoidPointer(address), p); + } + + ALWAYS_INLINE bool UpdateLockAtomic(u32 *out, KProcessAddress address, u32 if_zero, u32 new_orr_mask) { + return UserspaceAccess::UpdateLockAtomic(out, GetPointer(address), if_zero, new_orr_mask); + } + + } + + Result KConditionVariable::SignalToAddress(KProcessAddress addr) { + KThread *owner_thread = std::addressof(GetCurrentThread()); + + /* Signal the address. */ + { + KScopedSchedulerLock sl; + + /* Remove waiter thread. */ + s32 num_waiters; + KThread *next_owner_thread = owner_thread->RemoveWaiterByKey(std::addressof(num_waiters), addr); + + /* Determine the next tag. */ + u32 next_value = 0; + if (next_owner_thread) { + next_value = next_owner_thread->GetAddressKeyValue(); + if (num_waiters > 1) { + next_value |= ams::svc::HandleWaitMask; + } + + next_owner_thread->SetSyncedObject(nullptr, ResultSuccess()); + next_owner_thread->Wakeup(); + } + + /* Write the value to userspace. */ + if (!WriteToUser(addr, std::addressof(next_value))) { + if (next_owner_thread) { + next_owner_thread->SetSyncedObject(nullptr, svc::ResultInvalidCurrentMemory()); + } + + return svc::ResultInvalidCurrentMemory(); + } + } + + return ResultSuccess(); + } + + Result KConditionVariable::WaitForAddress(ams::svc::Handle handle, KProcessAddress addr, u32 value) { + KThread *cur_thread = std::addressof(GetCurrentThread()); + + /* Wait for the address. */ + { + KScopedAutoObject owner_thread; + MESOSPHERE_ASSERT(owner_thread.IsNull()); + { + KScopedSchedulerLock sl; + cur_thread->SetSyncedObject(nullptr, ResultSuccess()); + + /* Check if the thread should terminate. */ + R_UNLESS(!cur_thread->IsTerminationRequested(), svc::ResultTerminationRequested()); + + { + /* Read the tag from userspace. */ + u32 test_tag; + R_UNLESS(ReadFromUser(std::addressof(test_tag), addr), svc::ResultInvalidCurrentMemory()); + + /* If the tag isn't the handle (with wait mask), we're done. */ + R_SUCCEED_IF(test_tag != (handle | ams::svc::HandleWaitMask)); + + /* Get the lock owner thread. */ + owner_thread = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle(handle); + R_UNLESS(owner_thread.IsNotNull(), svc::ResultInvalidHandle()); + + /* Update the lock. */ + cur_thread->SetAddressKey(addr, value); + owner_thread->AddWaiter(cur_thread); + cur_thread->SetState(KThread::ThreadState_Waiting); + } + } + MESOSPHERE_ASSERT(owner_thread.IsNotNull()); + } + + /* Remove the thread as a waiter from the lock owner. */ + { + KScopedSchedulerLock sl; + KThread *owner_thread = cur_thread->GetLockOwner(); + if (owner_thread != nullptr) { + owner_thread->RemoveWaiter(cur_thread); + } + } + + /* Get the wait result. */ + KSynchronizationObject *dummy; + return cur_thread->GetWaitResult(std::addressof(dummy)); + } + + KThread *KConditionVariable::SignalImpl(KThread *thread) { + /* Check pre-conditions. */ + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Update the tag. */ + KProcessAddress address = thread->GetAddressKey(); + u32 own_tag = thread->GetAddressKeyValue(); + + u32 prev_tag; + bool can_access; + { + KScopedInterruptDisable di; + + can_access = cpu::CanAccessAtomic(address); + if (AMS_LIKELY(can_access)) { + UpdateLockAtomic(std::addressof(prev_tag), address, own_tag, ams::svc::HandleWaitMask); + } + } + + KThread *thread_to_close = nullptr; + if (AMS_LIKELY(can_access)) { + if (prev_tag == ams::svc::InvalidHandle) { + /* If nobody held the lock previously, we're all good. */ + thread->SetSyncedObject(nullptr, ResultSuccess()); + thread->Wakeup(); + } else { + /* Get the previous owner. */ + KThread *owner_thread = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle(static_cast(prev_tag & ~ams::svc::HandleWaitMask)) + .ReleasePointerUnsafe(); + if (AMS_LIKELY(owner_thread != nullptr)) { + /* Add the thread as a waiter on the owner. */ + owner_thread->AddWaiter(thread); + thread_to_close = owner_thread; + } else { + /* The lock was tagged with a thread that doesn't exist. */ + thread->SetSyncedObject(nullptr, svc::ResultInvalidState()); + thread->Wakeup(); + } + } + } else { + /* If the address wasn't accessible, note so. */ + thread->SetSyncedObject(nullptr, svc::ResultInvalidCurrentMemory()); + thread->Wakeup(); + } + + return thread_to_close; + } + + void KConditionVariable::Signal(uintptr_t cv_key, s32 count) { + /* Prepare for signaling. */ + constexpr int MaxThreads = 16; + KLinkedList thread_list; + KThread *thread_array[MaxThreads]; + int num_to_close = 0; + + /* Perform signaling. */ + int num_waiters = 0; + { + KScopedSchedulerLock sl; + g_cv_arbiter_compare_thread.SetupForConditionVariableCompare(cv_key, -1); + + auto it = this->tree.nfind(g_cv_arbiter_compare_thread); + while ((it != this->tree.end()) && (count <= 0 || num_waiters < count) && (it->GetConditionVariableKey() == cv_key)) { + KThread *target_thread = std::addressof(*it); + + if (KThread *thread = this->SignalImpl(target_thread); thread != nullptr) { + if (num_to_close < MaxThreads) { + thread_array[num_to_close++] = thread; + } else { + thread_list.push_back(*thread); + } + } + + it = this->tree.erase(it); + target_thread->ClearConditionVariable(); + ++num_waiters; + } + } + + /* Close threads in the array. */ + for (auto i = 0; i < num_to_close; ++i) { + thread_array[i]->Close(); + } + + /* Close threads in the list. */ + if (num_waiters > MaxThreads) { + auto it = thread_list.begin(); + while (it != thread_list.end()) { + KThread *thread = std::addressof(*it); + thread->Close(); + it = thread_list.erase(it); + } + } + } + + Result KConditionVariable::Wait(KProcessAddress addr, uintptr_t key, u32 value, s64 timeout) { + /* Prepare to wait. */ + KThread *cur_thread = GetCurrentThreadPointer(); + KHardwareTimer *timer; + + { + KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout); + + /* Set the synced object. */ + cur_thread->SetSyncedObject(nullptr, ams::svc::ResultTimedOut()); + + /* Check that the thread isn't terminating. */ + if (cur_thread->IsTerminationRequested()) { + slp.CancelSleep(); + return svc::ResultTerminationRequested(); + } + + /* Update the value and process for the next owner. */ + { + /* Remove waiter thread. */ + s32 num_waiters; + KThread *next_owner_thread = cur_thread->RemoveWaiterByKey(std::addressof(num_waiters), GetInteger(addr)); + + /* Update for the next owner thread. */ + u32 next_value = 0; + if (next_owner_thread != nullptr) { + /* Get the next tag value. */ + next_value = next_owner_thread->GetAddressKeyValue(); + if (num_waiters > 1) { + next_value |= ams::svc::HandleWaitMask; + } + + /* Wake up the next owner. */ + next_owner_thread->SetSyncedObject(nullptr, ResultSuccess()); + next_owner_thread->Wakeup(); + } + + /* Write the value to userspace. */ + if (!WriteToUser(addr, std::addressof(next_value))) { + slp.CancelSleep(); + return svc::ResultInvalidCurrentMemory(); + } + } + + /* Update condition variable tracking. */ + { + cur_thread->SetConditionVariable(std::addressof(this->tree), addr, key, value); + this->tree.insert(*cur_thread); + } + + /* If the timeout is non-zero, set the thread as waiting. */ + if (timeout != 0) { + cur_thread->SetState(KThread::ThreadState_Waiting); + } + } + + /* Cancel the timer wait. */ + if (timer != nullptr) { + timer->CancelTask(cur_thread); + } + + /* Remove from the condition variable. */ + { + KScopedSchedulerLock sl; + + if (KThread *owner = cur_thread->GetLockOwner(); owner != nullptr) { + owner->RemoveWaiter(cur_thread); + } + + if (cur_thread->IsWaitingForConditionVariable()) { + this->tree.erase(this->tree.iterator_to(*cur_thread)); + cur_thread->ClearConditionVariable(); + } + } + + /* Get the result. */ + KSynchronizationObject *dummy; + return cur_thread->GetWaitResult(std::addressof(dummy)); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_debug_base.cpp b/libraries/libmesosphere/source/kern_k_debug_base.cpp new file mode 100644 index 000000000..93c3d044c --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_debug_base.cpp @@ -0,0 +1,1120 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + ALWAYS_INLINE KDebugBase *GetDebugObject(KProcess *process) { + return static_cast(process->GetDebugObject()); + } + + } + + void KDebugBase::Initialize() { + /* Clear the process and continue flags. */ + this->process = nullptr; + this->continue_flags = 0; + } + + bool KDebugBase::Is64Bit() const { + MESOSPHERE_ASSERT(this->lock.IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(this->process != nullptr); + return this->process->Is64Bit(); + } + + + Result KDebugBase::QueryMemoryInfo(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, KProcessAddress address) { + /* Lock ourselves. */ + KScopedLightLock lk(this->lock); + + /* Check that we have a valid process. */ + R_UNLESS(this->process != nullptr, svc::ResultProcessTerminated()); + R_UNLESS(!this->process->IsTerminated(), svc::ResultProcessTerminated()); + + /* Query the mapping's info. */ + KMemoryInfo info; + R_TRY(process->GetPageTable().QueryInfo(std::addressof(info), out_page_info, address)); + + /* Write output. */ + *out_memory_info = info.GetSvcMemoryInfo(); + return ResultSuccess(); + } + + Result KDebugBase::ReadMemory(KProcessAddress buffer, KProcessAddress address, size_t size) { + /* Lock ourselves. */ + KScopedLightLock lk(this->lock); + + /* Check that we have a valid process. */ + R_UNLESS(this->process != nullptr, svc::ResultProcessTerminated()); + R_UNLESS(!this->process->IsTerminated(), svc::ResultProcessTerminated()); + + /* Get the page tables. */ + KProcessPageTable &debugger_pt = GetCurrentProcess().GetPageTable(); + KProcessPageTable &target_pt = this->process->GetPageTable(); + + /* Verify that the regions are in range. */ + R_UNLESS(target_pt.Contains(address, size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(debugger_pt.Contains(buffer, size), svc::ResultInvalidCurrentMemory()); + + /* Iterate over the target process's memory blocks. */ + KProcessAddress cur_address = address; + size_t remaining = size; + while (remaining > 0) { + /* Get the current memory info. */ + KMemoryInfo info; + ams::svc::PageInfo pi; + R_TRY(target_pt.QueryInfo(std::addressof(info), std::addressof(pi), cur_address)); + + /* Check that the memory is accessible. */ + R_UNLESS(info.GetState() != static_cast(ams::svc::MemoryState_Inaccessible), svc::ResultInvalidAddress()); + + /* Get the current size. */ + const size_t cur_size = std::min(remaining, info.GetEndAddress() - GetInteger(cur_address)); + + /* Read the memory. */ + if (info.GetState() != KMemoryState_Io) { + /* The memory is normal memory. */ + R_TRY(target_pt.ReadDebugMemory(GetVoidPointer(buffer), cur_address, cur_size)); + } else { + /* The memory is IO memory. */ + + /* Verify that the memory is readable. */ + R_UNLESS((info.GetPermission() & KMemoryPermission_UserRead) == KMemoryPermission_UserRead, svc::ResultInvalidAddress()); + + /* Get the physical address of the memory. */ + /* NOTE: Nintendo does not verify the result of this call. */ + KPhysicalAddress phys_addr; + target_pt.GetPhysicalAddress(std::addressof(phys_addr), cur_address); + + /* Map the address as IO in the current process. */ + R_TRY(debugger_pt.MapIo(util::AlignDown(GetInteger(phys_addr), PageSize), PageSize, KMemoryPermission_UserRead)); + + /* Get the address of the newly mapped IO. */ + KProcessAddress io_address; + Result query_result = debugger_pt.QueryIoMapping(std::addressof(io_address), util::AlignDown(GetInteger(phys_addr), PageSize), PageSize); + MESOSPHERE_R_ASSERT(query_result); + R_TRY(query_result); + + /* Ensure we clean up the new mapping on scope exit. */ + ON_SCOPE_EXIT { MESOSPHERE_R_ABORT_UNLESS(debugger_pt.UnmapPages(util::AlignDown(GetInteger(io_address), PageSize), 1, KMemoryState_Io)); }; + + /* Adjust the io address for alignment. */ + io_address += (GetInteger(cur_address) & (PageSize - 1)); + + /* Get the readable size. */ + const size_t readable_size = std::min(cur_size, util::AlignDown(GetInteger(cur_address) + PageSize, PageSize) - GetInteger(cur_address)); + + /* Read the memory. */ + switch ((GetInteger(cur_address) | readable_size) & 3) { + case 0: + { + R_UNLESS(UserspaceAccess::ReadIoMemory32Bit(GetVoidPointer(buffer), GetVoidPointer(io_address), readable_size), svc::ResultInvalidPointer()); + } + break; + case 2: + { + R_UNLESS(UserspaceAccess::ReadIoMemory16Bit(GetVoidPointer(buffer), GetVoidPointer(io_address), readable_size), svc::ResultInvalidPointer()); + } + break; + default: + { + R_UNLESS(UserspaceAccess::ReadIoMemory8Bit(GetVoidPointer(buffer), GetVoidPointer(io_address), readable_size), svc::ResultInvalidPointer()); + } + break; + } + } + + /* Advance. */ + buffer += cur_size; + cur_address += cur_size; + remaining -= cur_size; + } + + return ResultSuccess(); + } + + Result KDebugBase::WriteMemory(KProcessAddress buffer, KProcessAddress address, size_t size) { + /* Lock ourselves. */ + KScopedLightLock lk(this->lock); + + /* Check that we have a valid process. */ + R_UNLESS(this->process != nullptr, svc::ResultProcessTerminated()); + R_UNLESS(!this->process->IsTerminated(), svc::ResultProcessTerminated()); + + /* Get the page tables. */ + KProcessPageTable &debugger_pt = GetCurrentProcess().GetPageTable(); + KProcessPageTable &target_pt = this->process->GetPageTable(); + + /* Verify that the regions are in range. */ + R_UNLESS(target_pt.Contains(address, size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(debugger_pt.Contains(buffer, size), svc::ResultInvalidCurrentMemory()); + + /* Iterate over the target process's memory blocks. */ + KProcessAddress cur_address = address; + size_t remaining = size; + while (remaining > 0) { + /* Get the current memory info. */ + KMemoryInfo info; + ams::svc::PageInfo pi; + R_TRY(target_pt.QueryInfo(std::addressof(info), std::addressof(pi), cur_address)); + + /* Check that the memory is accessible. */ + R_UNLESS(info.GetState() != static_cast(ams::svc::MemoryState_Inaccessible), svc::ResultInvalidAddress()); + + /* Get the current size. */ + const size_t cur_size = std::min(remaining, info.GetEndAddress() - GetInteger(cur_address)); + + /* Read the memory. */ + if (info.GetState() != KMemoryState_Io) { + /* The memory is normal memory. */ + R_TRY(target_pt.WriteDebugMemory(cur_address, GetVoidPointer(buffer), cur_size)); + } else { + /* The memory is IO memory. */ + + /* Verify that the memory is writable. */ + R_UNLESS((info.GetPermission() & KMemoryPermission_UserReadWrite) == KMemoryPermission_UserReadWrite, svc::ResultInvalidAddress()); + + /* Get the physical address of the memory. */ + /* NOTE: Nintendo does not verify the result of this call. */ + KPhysicalAddress phys_addr; + target_pt.GetPhysicalAddress(std::addressof(phys_addr), cur_address); + + /* Map the address as IO in the current process. */ + R_TRY(debugger_pt.MapIo(util::AlignDown(GetInteger(phys_addr), PageSize), PageSize, KMemoryPermission_UserReadWrite)); + + /* Get the address of the newly mapped IO. */ + KProcessAddress io_address; + Result query_result = debugger_pt.QueryIoMapping(std::addressof(io_address), util::AlignDown(GetInteger(phys_addr), PageSize), PageSize); + MESOSPHERE_R_ASSERT(query_result); + R_TRY(query_result); + + /* Ensure we clean up the new mapping on scope exit. */ + ON_SCOPE_EXIT { MESOSPHERE_R_ABORT_UNLESS(debugger_pt.UnmapPages(util::AlignDown(GetInteger(io_address), PageSize), 1, KMemoryState_Io)); }; + + /* Adjust the io address for alignment. */ + io_address += (GetInteger(cur_address) & (PageSize - 1)); + + /* Get the readable size. */ + const size_t readable_size = std::min(cur_size, util::AlignDown(GetInteger(cur_address) + PageSize, PageSize) - GetInteger(cur_address)); + + /* Read the memory. */ + switch ((GetInteger(cur_address) | readable_size) & 3) { + case 0: + { + R_UNLESS(UserspaceAccess::WriteIoMemory32Bit(GetVoidPointer(io_address), GetVoidPointer(buffer), readable_size), svc::ResultInvalidPointer()); + } + break; + case 2: + { + R_UNLESS(UserspaceAccess::WriteIoMemory16Bit(GetVoidPointer(io_address), GetVoidPointer(buffer), readable_size), svc::ResultInvalidPointer()); + } + break; + default: + { + R_UNLESS(UserspaceAccess::WriteIoMemory8Bit(GetVoidPointer(io_address), GetVoidPointer(buffer), readable_size), svc::ResultInvalidPointer()); + } + break; + } + } + + /* Advance. */ + buffer += cur_size; + cur_address += cur_size; + remaining -= cur_size; + } + + return ResultSuccess(); + } + + Result KDebugBase::GetRunningThreadInfo(ams::svc::LastThreadContext *out_context, u64 *out_thread_id) { + /* Get the attached process. */ + KScopedAutoObject process = this->GetProcess(); + R_UNLESS(process.IsNotNull(), svc::ResultProcessTerminated()); + + /* Get the thread info. */ + { + KScopedSchedulerLock sl; + + /* Get the running thread. */ + const s32 core_id = GetCurrentCoreId(); + KThread *thread = process->GetRunningThread(core_id); + + /* Check that the thread's idle count is correct. */ + R_UNLESS(process->GetRunningThreadIdleCount(core_id) == Kernel::GetScheduler(core_id).GetIdleCount(), svc::ResultNoThread()); + + /* Check that the thread is running on the current core. */ + R_UNLESS(thread != nullptr, svc::ResultUnknownThread()); + R_UNLESS(thread->GetActiveCore() == core_id, svc::ResultUnknownThread()); + + /* Get the thread's exception context. */ + GetExceptionContext(thread)->GetSvcThreadContext(out_context); + + /* Get the thread's id. */ + *out_thread_id = thread->GetId(); + } + + return ResultSuccess(); + } + + Result KDebugBase::Attach(KProcess *target) { + /* Check that the process isn't null. */ + MESOSPHERE_ASSERT(target != nullptr); + + /* Attach to the process. */ + { + /* Lock both ourselves, the target process, and the scheduler. */ + KScopedLightLock state_lk(target->GetStateLock()); + KScopedLightLock list_lk(target->GetListLock()); + KScopedLightLock this_lk(this->lock); + KScopedSchedulerLock sl; + + /* Check that the process isn't already being debugged. */ + R_UNLESS(!target->IsAttachedToDebugger(), svc::ResultBusy()); + + { + /* Ensure the process is in a state that allows for debugging. */ + const KProcess::State state = target->GetState(); + switch (state) { + case KProcess::State_Created: + case KProcess::State_Running: + case KProcess::State_Crashed: + break; + case KProcess::State_CreatedAttached: + case KProcess::State_RunningAttached: + case KProcess::State_DebugBreak: + return svc::ResultBusy(); + case KProcess::State_Terminating: + case KProcess::State_Terminated: + return svc::ResultProcessTerminated(); + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + + /* Set our process member, and open a reference to the target. */ + this->process = target; + this->process->Open(); + + /* Set ourselves as the process's attached object. */ + this->old_process_state = this->process->SetDebugObject(this); + + /* Send an event for our attaching to the process. */ + this->PushDebugEvent(ams::svc::DebugEvent_CreateProcess); + + /* Send events for attaching to each thread in the process. */ + { + auto end = this->process->GetThreadList().end(); + for (auto it = this->process->GetThreadList().begin(); it != end; ++it) { + /* Request that we suspend the thread. */ + it->RequestSuspend(KThread::SuspendType_Debug); + + /* If the thread is in a state for us to do so, generate the event. */ + if (const auto thread_state = it->GetState(); thread_state == KThread::ThreadState_Runnable || thread_state == KThread::ThreadState_Waiting) { + /* Mark the thread as attached to. */ + it->SetDebugAttached(); + + /* Send the event. */ + this->PushDebugEvent(ams::svc::DebugEvent_CreateThread, it->GetId(), GetInteger(it->GetThreadLocalRegionAddress()), it->GetEntrypoint()); + } + } + } + + /* Send the process's jit debug info, if relevant. */ + if (KEventInfo *jit_info = this->process->GetJitDebugInfo(); jit_info != nullptr) { + this->EnqueueDebugEventInfo(jit_info); + } + + /* Send an exception event to represent our attaching. */ + this->PushDebugEvent(ams::svc::DebugEvent_Exception, ams::svc::DebugException_DebuggerAttached); + + /* Signal. */ + this->NotifyAvailable(); + } + } + + return ResultSuccess(); + } + + Result KDebugBase::BreakProcess() { + /* Get the attached process. */ + KScopedAutoObject target = this->GetProcess(); + R_UNLESS(target.IsNotNull(), svc::ResultProcessTerminated()); + + /* Lock both ourselves, the target process, and the scheduler. */ + KScopedLightLock state_lk(target->GetStateLock()); + KScopedLightLock list_lk(target->GetListLock()); + KScopedLightLock this_lk(this->lock); + KScopedSchedulerLock sl; + + /* Check that we're still attached to the process, and that it's not terminated. */ + /* NOTE: Here Nintendo only checks that this->process is not nullptr. */ + R_UNLESS(this->process == target.GetPointerUnsafe(), svc::ResultProcessTerminated()); + R_UNLESS(!target->IsTerminated(), svc::ResultProcessTerminated()); + + /* Get the currently active threads. */ + constexpr u64 ThreadIdNoThread = -1ll; + constexpr u64 ThreadIdUnknownThread = -2ll; + u64 thread_ids[cpu::NumCores]; + for (size_t i = 0; i < util::size(thread_ids); ++i) { + /* Get the currently running thread. */ + KThread *thread = target->GetRunningThread(i); + + /* Check that the thread's idle count is correct. */ + if (target->GetRunningThreadIdleCount(i) == Kernel::GetScheduler(i).GetIdleCount()) { + if (thread != nullptr && static_cast(thread->GetActiveCore()) == i) { + thread_ids[i] = thread->GetId(); + } else { + /* We found an unknown thread. */ + thread_ids[i] = ThreadIdUnknownThread; + } + } else { + /* We didn't find a thread. */ + thread_ids[i] = ThreadIdNoThread; + } + } + + /* Suspend all the threads in the process. */ + { + auto end = target->GetThreadList().end(); + for (auto it = target->GetThreadList().begin(); it != end; ++it) { + /* Request that we suspend the thread. */ + it->RequestSuspend(KThread::SuspendType_Debug); + } + } + + /* Send an exception event to represent our breaking the process. */ + static_assert(util::size(thread_ids) >= 4); + this->PushDebugEvent(ams::svc::DebugEvent_Exception, ams::svc::DebugException_DebuggerBreak, thread_ids[0], thread_ids[1], thread_ids[2], thread_ids[3]); + + /* Signal. */ + this->NotifyAvailable(); + + /* Set the process as breaked. */ + target->SetDebugBreak(); + + return ResultSuccess(); + } + + Result KDebugBase::TerminateProcess() { + /* Get the attached process. If we don't have one, we have nothing to do. */ + KScopedAutoObject target = this->GetProcess(); + R_SUCCEED_IF(target.IsNull()); + + /* Detach from the process. */ + { + /* Lock both ourselves and the target process. */ + KScopedLightLock state_lk(target->GetStateLock()); + KScopedLightLock list_lk(target->GetListLock()); + KScopedLightLock this_lk(this->lock); + + /* Check that we still have our process. */ + if (this->process != nullptr) { + /* Check that our process is the one we got earlier. */ + MESOSPHERE_ASSERT(this->process == target.GetPointerUnsafe()); + + /* Lock the scheduler. */ + KScopedSchedulerLock sl; + + /* Get the process's state. */ + const KProcess::State state = target->GetState(); + + /* Check that the process is in a state where we can terminate it. */ + R_UNLESS(state != KProcess::State_Created, svc::ResultInvalidState()); + R_UNLESS(state != KProcess::State_CreatedAttached, svc::ResultInvalidState()); + + /* Decide on a new state for the process. */ + KProcess::State new_state; + if (state == KProcess::State_RunningAttached) { + /* If the process is running, transition it accordingly. */ + new_state = KProcess::State_Running; + } else if (state == KProcess::State_DebugBreak) { + /* If the process is debug breaked, transition it accordingly. */ + new_state = KProcess::State_Crashed; + } else { + /* Otherwise, don't transition. */ + new_state = state; + } + + /* Detach from the process. */ + target->ClearDebugObject(new_state); + this->process = nullptr; + + /* Clear our continue flags. */ + this->continue_flags = 0; + } + } + + /* Close the reference we held to the process while we were attached to it. */ + target->Close(); + + /* Terminate the process. */ + target->Terminate(); + + return ResultSuccess(); + } + + Result KDebugBase::GetThreadContext(ams::svc::ThreadContext *out, u64 thread_id, u32 context_flags) { + /* Lock ourselves. */ + KScopedLightLock lk(this->lock); + + /* Get the thread from its id. */ + KScopedAutoObject thread = KThread::GetThreadFromId(thread_id); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidId()); + + /* Verify that the thread is owned by our process. */ + R_UNLESS(this->process == thread->GetOwnerProcess(), svc::ResultInvalidId()); + + /* Verify that the thread isn't terminated. */ + R_UNLESS(thread->GetState() != KThread::ThreadState_Terminated, svc::ResultTerminationRequested()); + + /* Check that the thread is not the current one. */ + /* NOTE: Nintendo does not check this, and thus the following loop will deadlock. */ + R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(), svc::ResultInvalidId()); + + /* Try to get the thread context until the thread isn't current on any core. */ + while (true) { + KScopedSchedulerLock sl; + + /* The thread needs to be requested for debug suspension. */ + R_UNLESS(thread->IsSuspendRequested(KThread::SuspendType_Debug), svc::ResultInvalidState()); + + /* If the thread's raw state isn't runnable, check if it's current on some core. */ + if (thread->GetRawState() != KThread::ThreadState_Runnable) { + bool current = false; + for (auto i = 0; i < static_cast(cpu::NumCores); ++i) { + if (thread.GetPointerUnsafe() == Kernel::GetCurrentContext(i).current_thread) { + current = true; + } + break; + } + + /* If the thread is current, retry until it isn't. */ + if (current) { + continue; + } + } + + /* Get the thread context. */ + return this->GetThreadContextImpl(out, thread.GetPointerUnsafe(), context_flags); + } + } + + Result KDebugBase::SetThreadContext(const ams::svc::ThreadContext &ctx, u64 thread_id, u32 context_flags) { + /* Lock ourselves. */ + KScopedLightLock lk(this->lock); + + /* Get the thread from its id. */ + KScopedAutoObject thread = KThread::GetThreadFromId(thread_id); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidId()); + + /* Verify that the thread is owned by our process. */ + R_UNLESS(this->process == thread->GetOwnerProcess(), svc::ResultInvalidId()); + + /* Verify that the thread isn't terminated. */ + R_UNLESS(thread->GetState() != KThread::ThreadState_Terminated, svc::ResultTerminationRequested()); + + /* Check that the thread is not the current one. */ + /* NOTE: Nintendo does not check this, and thus the following loop will deadlock. */ + R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(), svc::ResultInvalidId()); + + /* Try to get the thread context until the thread isn't current on any core. */ + while (true) { + KScopedSchedulerLock sl; + + /* The thread needs to be requested for debug suspension. */ + R_UNLESS(thread->IsSuspendRequested(KThread::SuspendType_Debug), svc::ResultInvalidState()); + + /* If the thread's raw state isn't runnable, check if it's current on some core. */ + if (thread->GetRawState() != KThread::ThreadState_Runnable) { + bool current = false; + for (auto i = 0; i < static_cast(cpu::NumCores); ++i) { + if (thread.GetPointerUnsafe() == Kernel::GetCurrentContext(i).current_thread) { + current = true; + } + break; + } + + /* If the thread is current, retry until it isn't. */ + if (current) { + continue; + } + } + + /* Verify that the thread's svc state is valid. */ + if (thread->IsCallingSvc()) { + R_UNLESS(thread->GetSvcId() != svc::SvcId_Break, svc::ResultInvalidState()); + R_UNLESS(thread->GetSvcId() != svc::SvcId_ReturnFromException, svc::ResultInvalidState()); + } + + /* Set the thread context. */ + return this->SetThreadContextImpl(ctx, thread.GetPointerUnsafe(), context_flags); + } + } + + + Result KDebugBase::ContinueDebug(const u32 flags, const u64 *thread_ids, size_t num_thread_ids) { + /* Get the attached process. */ + KScopedAutoObject target = this->GetProcess(); + R_UNLESS(target.IsNotNull(), svc::ResultProcessTerminated()); + + /* Lock both ourselves, the target process, and the scheduler. */ + KScopedLightLock state_lk(target->GetStateLock()); + KScopedLightLock list_lk(target->GetListLock()); + KScopedLightLock this_lk(this->lock); + KScopedSchedulerLock sl; + + /* Check that we're still attached to the process, and that it's not terminated. */ + R_UNLESS(this->process == target.GetPointerUnsafe(), svc::ResultProcessTerminated()); + R_UNLESS(!target->IsTerminated(), svc::ResultProcessTerminated()); + + /* Check that we have no pending events. */ + R_UNLESS(this->event_info_list.empty(), svc::ResultBusy()); + + /* Clear the target's JIT debug info. */ + target->ClearJitDebugInfo(); + + /* Set our continue flags. */ + this->continue_flags = flags; + + /* Iterate over threads, continuing them as we should. */ + bool has_debug_break_thread = false; + { + /* Parse our flags. */ + const bool exception_handled = (this->continue_flags & ams::svc::ContinueFlag_ExceptionHandled) != 0; + const bool continue_all = (this->continue_flags & ams::svc::ContinueFlag_ContinueAll) != 0; + const bool continue_others = (this->continue_flags & ams::svc::ContinueFlag_ContinueOthers) != 0; + + /* Update each thread. */ + auto end = target->GetThreadList().end(); + for (auto it = target->GetThreadList().begin(); it != end; ++it) { + /* Determine if we should continue the thread. */ + bool should_continue; + { + if (continue_all) { + /* Continue all threads. */ + should_continue = true; + } else if (continue_others) { + /* Continue the thread if it doesn't match one of our target ids. */ + const u64 thread_id = it->GetId(); + should_continue = true; + for (size_t i = 0; i < num_thread_ids; ++i) { + if (thread_ids[i] == thread_id) { + should_continue = false; + break; + } + } + } else { + /* Continue the thread if it matches one of our target ids. */ + const u64 thread_id = it->GetId(); + should_continue = false; + for (size_t i = 0; i < num_thread_ids; ++i) { + if (thread_ids[i] == thread_id) { + should_continue = true; + break; + } + } + } + } + + /* Continue the thread if we should. */ + if (should_continue) { + if (exception_handled) { + it->SetDebugExceptionResult(svc::ResultStopProcessingException()); + } + it->Resume(KThread::SuspendType_Debug); + } + + /* If the thread has debug suspend requested, note so. */ + if (it->IsSuspendRequested(KThread::SuspendType_Debug)) { + has_debug_break_thread = true; + } + } + } + + /* Set the process's state. */ + if (has_debug_break_thread) { + target->SetDebugBreak(); + } else { + target->SetAttached(); + } + + return ResultSuccess(); + } + + KEventInfo *KDebugBase::CreateDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4, u64 cur_thread_id) { + /* Allocate a new event. */ + KEventInfo *info = KEventInfo::Allocate(); + + /* Populate the event info. */ + if (info != nullptr) { + /* Set common fields. */ + info->event = event; + info->thread_id = 0; + info->flags = ams::svc::DebugEventFlag_Stopped; + + /* Set event specific fields. */ + switch (event) { + case ams::svc::DebugEvent_CreateProcess: + { + /* ... */ + } + break; + case ams::svc::DebugEvent_CreateThread: + { + /* Set the thread id. */ + info->thread_id = param0; + + /* Set the thread creation info. */ + info->info.create_thread.thread_id = param0; + info->info.create_thread.tls_address = param1; + info->info.create_thread.entrypoint = param2; + } + break; + case ams::svc::DebugEvent_ExitProcess: + { + /* Set the exit reason. */ + info->info.exit_process.reason = static_cast(param0); + + /* Clear the thread id and flags. */ + info->thread_id = 0; + info->flags = 0; + } + break; + case ams::svc::DebugEvent_ExitThread: + { + /* Set the thread id. */ + info->thread_id = param0; + + /* Set the exit reason. */ + info->info.exit_thread.reason = static_cast(param1); + } + break; + case ams::svc::DebugEvent_Exception: + { + /* Set the thread id. */ + info->thread_id = cur_thread_id; + + /* Set the exception type, and clear the count. */ + info->info.exception.exception_type = static_cast(param0); + info->info.exception.exception_data_count = 0; + switch (static_cast(param0)) { + case ams::svc::DebugException_UndefinedInstruction: + case ams::svc::DebugException_BreakPoint: + case ams::svc::DebugException_UndefinedSystemCall: + { + info->info.exception.exception_address = param1; + + info->info.exception.exception_data_count = 1; + info->info.exception.exception_data[0] = param2; + } + break; + case ams::svc::DebugException_DebuggerAttached: + { + info->thread_id = 0; + + info->info.exception.exception_address = 0; + } + break; + case ams::svc::DebugException_UserBreak: + { + info->info.exception.exception_address = param1; + + info->info.exception.exception_data_count = 3; + info->info.exception.exception_data[0] = param2; + info->info.exception.exception_data[1] = param3; + info->info.exception.exception_data[2] = param4; + } + break; + case ams::svc::DebugException_DebuggerBreak: + { + info->thread_id = 0; + + info->info.exception.exception_address = 0; + + info->info.exception.exception_data_count = 4; + info->info.exception.exception_data[0] = param1; + info->info.exception.exception_data[1] = param2; + info->info.exception.exception_data[2] = param3; + info->info.exception.exception_data[3] = param4; + } + break; + case ams::svc::DebugException_MemorySystemError: + { + info->info.exception.exception_address = 0; + } + break; + case ams::svc::DebugException_InstructionAbort: + case ams::svc::DebugException_DataAbort: + case ams::svc::DebugException_AlignmentFault: + default: + { + info->info.exception.exception_address = param1; + } + break; + } + } + break; + } + } + + return info; + } + + void KDebugBase::PushDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4) { + /* Create and enqueue and event. */ + if (KEventInfo *new_info = CreateDebugEvent(event, param0, param1, param2, param3, param4, GetCurrentThread().GetId()); new_info != nullptr) { + this->EnqueueDebugEventInfo(new_info); + } + } + + void KDebugBase::EnqueueDebugEventInfo(KEventInfo *info) { + /* Lock the scheduler. */ + KScopedSchedulerLock sl; + + /* Push the event to the back of the list. */ + this->event_info_list.push_back(*info); + } + + + KScopedAutoObject KDebugBase::GetProcess() { + /* Lock ourselves. */ + KScopedLightLock lk(this->lock); + + return this->process; + } + + template requires (std::same_as || std::same_as) + Result KDebugBase::GetDebugEventInfoImpl(T *out) { + /* Get the attached process. */ + KScopedAutoObject process = this->GetProcess(); + R_UNLESS(process.IsNotNull(), svc::ResultProcessTerminated()); + + /* Pop an event info from our queue. */ + KEventInfo *info = nullptr; + { + KScopedSchedulerLock sl; + + /* Check that we have an event to dequeue. */ + R_UNLESS(!this->event_info_list.empty(), svc::ResultNoEvent()); + + /* Pop the event from the front of the queue. */ + info = std::addressof(this->event_info_list.front()); + this->event_info_list.pop_front(); + } + MESOSPHERE_ASSERT(info != nullptr); + + /* Free the event info once we're done with it. */ + ON_SCOPE_EXIT { KEventInfo::Free(info); }; + + /* Set common fields. */ + out->type = info->event; + out->thread_id = info->thread_id; + out->flags = info->flags; + + /* Set event specific fields. */ + switch (info->event) { + case ams::svc::DebugEvent_CreateProcess: + { + out->info.create_process.program_id = process->GetProgramId(); + out->info.create_process.process_id = process->GetId(); + out->info.create_process.flags = process->GetCreateProcessFlags(); + out->info.create_process.user_exception_context_address = GetInteger(process->GetProcessLocalRegionAddress()); + + std::memcpy(out->info.create_process.name, process->GetName(), sizeof(out->info.create_process.name)); + } + break; + case ams::svc::DebugEvent_CreateThread: + { + out->info.create_thread.thread_id = info->info.create_thread.thread_id; + out->info.create_thread.tls_address = info->info.create_thread.tls_address; + out->info.create_thread.entrypoint = info->info.create_thread.entrypoint; + } + break; + case ams::svc::DebugEvent_ExitProcess: + { + out->info.exit_process.reason = info->info.exit_process.reason; + } + break; + case ams::svc::DebugEvent_ExitThread: + { + out->info.exit_thread.reason = info->info.exit_thread.reason; + } + break; + case ams::svc::DebugEvent_Exception: + { + out->info.exception.type = info->info.exception.exception_type; + out->info.exception.address = info->info.exception.exception_address; + + switch (info->info.exception.exception_type) { + case ams::svc::DebugException_UndefinedInstruction: + { + MESOSPHERE_ASSERT(info->info.exception.exception_data_count == 1); + out->info.exception.specific.undefined_instruction.insn = info->info.exception.exception_data[0]; + } + break; + case ams::svc::DebugException_BreakPoint: + { + MESOSPHERE_ASSERT(info->info.exception.exception_data_count == 1); + out->info.exception.specific.break_point.type = static_cast(info->info.exception.exception_data[0]); + out->info.exception.specific.break_point.address = 0; + } + break; + case ams::svc::DebugException_UserBreak: + { + MESOSPHERE_ASSERT(info->info.exception.exception_data_count == 3); + out->info.exception.specific.user_break.break_reason = static_cast(info->info.exception.exception_data[0]); + out->info.exception.specific.user_break.address = info->info.exception.exception_data[1]; + out->info.exception.specific.user_break.size = info->info.exception.exception_data[2]; + } + break; + case ams::svc::DebugException_DebuggerBreak: + { + MESOSPHERE_ASSERT(info->info.exception.exception_data_count == 4); + out->info.exception.specific.debugger_break.active_thread_ids[0] = info->info.exception.exception_data[0]; + out->info.exception.specific.debugger_break.active_thread_ids[1] = info->info.exception.exception_data[1]; + out->info.exception.specific.debugger_break.active_thread_ids[2] = info->info.exception.exception_data[2]; + out->info.exception.specific.debugger_break.active_thread_ids[3] = info->info.exception.exception_data[3]; + } + break; + case ams::svc::DebugException_UndefinedSystemCall: + { + MESOSPHERE_ASSERT(info->info.exception.exception_data_count == 1); + out->info.exception.specific.undefined_system_call.id = info->info.exception.exception_data[0]; + } + break; + default: + { + /* ... */ + } + break; + } + } + break; + } + + return ResultSuccess(); + } + + Result KDebugBase::GetDebugEventInfo(ams::svc::lp64::DebugEventInfo *out) { + return this->GetDebugEventInfoImpl(out); + } + + Result KDebugBase::GetDebugEventInfo(ams::svc::ilp32::DebugEventInfo *out) { + return this->GetDebugEventInfoImpl(out); + } + + void KDebugBase::OnFinalizeSynchronizationObject() { + /* Detach from our process, if we have one. */ + { + /* Get the attached process. */ + KScopedAutoObject process = this->GetProcess(); + + /* If the process isn't null, detach. */ + if (process.IsNotNull()) { + /* When we're done detaching, clear the reference we opened when we attached. */ + ON_SCOPE_EXIT { process->Close(); }; + + /* Detach. */ + { + /* Lock both ourselves and the target process. */ + KScopedLightLock state_lk(process->GetStateLock()); + KScopedLightLock list_lk(process->GetListLock()); + KScopedLightLock this_lk(this->lock); + + /* Ensure we finalize exactly once. */ + if (this->process != nullptr) { + MESOSPHERE_ASSERT(this->process == process.GetPointerUnsafe()); + { + KScopedSchedulerLock sl; + + /* Detach ourselves from the process. */ + process->ClearDebugObject(this->old_process_state); + + /* Release all threads. */ + const bool resume = (process->GetState() != KProcess::State_Crashed); + { + auto end = process->GetThreadList().end(); + for (auto it = process->GetThreadList().begin(); it != end; ++it) { + if (resume) { + /* If the process isn't crashed, resume threads. */ + it->Resume(KThread::SuspendType_Debug); + } else { + /* Otherwise, suspend them. */ + it->RequestSuspend(KThread::SuspendType_Debug); + } + } + } + + /* Clear our process. */ + this->process = nullptr; + } + } + } + } + } + + /* Free any pending events. */ + { + KScopedSchedulerLock sl; + + while (!this->event_info_list.empty()) { + KEventInfo *info = std::addressof(this->event_info_list.front()); + this->event_info_list.pop_front(); + KEventInfo::Free(info); + } + } + } + + bool KDebugBase::IsSignaled() const { + KScopedSchedulerLock sl; + + return (!this->event_info_list.empty()) || this->process == nullptr || this->process->IsTerminated(); + } + + Result KDebugBase::ProcessDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4) { + /* Get the current process. */ + KProcess *process = GetCurrentProcessPointer(); + + /* If the event is CreateThread and we've already attached, there's nothing to do. */ + if (event == ams::svc::DebugEvent_CreateThread) { + R_SUCCEED_IF(GetCurrentThread().IsAttachedToDebugger()); + } + + while (true) { + /* Lock the process and the scheduler. */ + KScopedLightLock state_lk(process->GetStateLock()); + KScopedLightLock list_lk(process->GetListLock()); + KScopedSchedulerLock sl; + + /* If the current thread is terminating, we can't process an event. */ + R_SUCCEED_IF(GetCurrentThread().IsTerminationRequested()); + + /* Get the debug object. If we have none, there's nothing to process. */ + KDebugBase *debug = GetDebugObject(process); + R_SUCCEED_IF(debug == nullptr); + + /* If the event is an exception and we don't have exception events enabled, we can't handle the event. */ + if (event == ams::svc::DebugEvent_Exception && (debug->continue_flags & ams::svc::ContinueFlag_EnableExceptionEvent) == 0) { + GetCurrentThread().SetDebugExceptionResult(ResultSuccess()); + return svc::ResultNotHandled(); + } + + /* If the current thread is suspended, retry. */ + if (GetCurrentThread().IsSuspended()) { + continue; + } + + /* Suspend all the process's threads. */ + { + auto end = process->GetThreadList().end(); + for (auto it = process->GetThreadList().begin(); it != end; ++it) { + it->RequestSuspend(KThread::SuspendType_Debug); + } + } + + /* Push the event. */ + debug->PushDebugEvent(event, param0, param1, param2, param3, param4); + debug->NotifyAvailable(); + + /* Set the process as breaked. */ + process->SetDebugBreak(); + + /* If the event is an exception, set the result. */ + if (event == ams::svc::DebugEvent_Exception) { + GetCurrentThread().SetDebugExceptionResult(ResultSuccess()); + } + + /* Exit our retry loop. */ + break; + } + + /* If the event is an exception, get the exception result. */ + if (event == ams::svc::DebugEvent_Exception) { + /* Lock the scheduler. */ + KScopedSchedulerLock sl; + + /* If the thread is terminating, we can't process the exception. */ + R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultStopProcessingException()); + + /* Get the debug object. */ + if (KDebugBase *debug = GetDebugObject(process); debug != nullptr) { + /* If we have one, check the debug exception. */ + return GetCurrentThread().GetDebugExceptionResult(); + } else { + /* We don't have a debug object, so stop processing the exception. */ + return svc::ResultStopProcessingException(); + } + } + + return ResultSuccess(); + } + + Result KDebugBase::OnDebugEvent(ams::svc::DebugEvent event, uintptr_t param0, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4) { + if (KProcess *process = GetCurrentProcessPointer(); process != nullptr && process->IsAttachedToDebugger()) { + return ProcessDebugEvent(event, param0, param1, param2, param3, param4); + } + return ResultSuccess(); + } + + Result KDebugBase::OnExitProcess(KProcess *process) { + MESOSPHERE_ASSERT(process != nullptr); + + /* Check if we're attached to a debugger. */ + if (process->IsAttachedToDebugger()) { + /* If we are, lock the scheduler. */ + KScopedSchedulerLock sl; + + /* Push the event. */ + if (KDebugBase *debug = GetDebugObject(process); debug != nullptr) { + debug->PushDebugEvent(ams::svc::DebugEvent_ExitProcess, ams::svc::ProcessExitReason_ExitProcess); + debug->NotifyAvailable(); + } + } + + return ResultSuccess(); + } + + Result KDebugBase::OnTerminateProcess(KProcess *process) { + MESOSPHERE_ASSERT(process != nullptr); + + /* Check if we're attached to a debugger. */ + if (process->IsAttachedToDebugger()) { + /* If we are, lock the scheduler. */ + KScopedSchedulerLock sl; + + /* Push the event. */ + if (KDebugBase *debug = GetDebugObject(process); debug != nullptr) { + debug->PushDebugEvent(ams::svc::DebugEvent_ExitProcess, ams::svc::ProcessExitReason_TerminateProcess); + debug->NotifyAvailable(); + } + } + + return ResultSuccess(); + } + + Result KDebugBase::OnExitThread(KThread *thread) { + MESOSPHERE_ASSERT(thread != nullptr); + + /* Check if we're attached to a debugger. */ + if (KProcess *process = thread->GetOwnerProcess(); process != nullptr && process->IsAttachedToDebugger()) { + /* If we are, submit the event. */ + R_TRY(OnDebugEvent(ams::svc::DebugEvent_ExitThread, thread->GetId(), thread->IsTerminationRequested() ? ams::svc::ThreadExitReason_TerminateThread : ams::svc::ThreadExitReason_ExitThread)); + } + + return ResultSuccess(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_device_address_space.cpp b/libraries/libmesosphere/source/kern_k_device_address_space.cpp index ffe27b765..b53ff2611 100644 --- a/libraries/libmesosphere/source/kern_k_device_address_space.cpp +++ b/libraries/libmesosphere/source/kern_k_device_address_space.cpp @@ -17,9 +17,114 @@ namespace ams::kern { + /* Static initializer. */ void KDeviceAddressSpace::Initialize() { /* This just forwards to the device page table manager. */ KDevicePageTable::Initialize(); } + /* Member functions. */ + Result KDeviceAddressSpace::Initialize(u64 address, u64 size) { + MESOSPHERE_ASSERT_THIS(); + + /* Initialize the device page table. */ + R_TRY(this->table.Initialize(address, size)); + + /* Set member variables. */ + this->space_address = address; + this->space_size = size; + this->is_initialized = true; + + return ResultSuccess(); + } + + void KDeviceAddressSpace::Finalize() { + MESOSPHERE_ASSERT_THIS(); + + /* Finalize the table. */ + this->table.Finalize(); + + /* Finalize base. */ + KAutoObjectWithSlabHeapAndContainer::Finalize(); + } + + Result KDeviceAddressSpace::Attach(ams::svc::DeviceName device_name) { + /* Lock the address space. */ + KScopedLightLock lk(this->lock); + + /* Attach. */ + return this->table.Attach(device_name, this->space_address, this->space_size); + } + + Result KDeviceAddressSpace::Detach(ams::svc::DeviceName device_name) { + /* Lock the address space. */ + KScopedLightLock lk(this->lock); + + /* Detach. */ + return this->table.Detach(device_name); + } + + Result KDeviceAddressSpace::Map(size_t *out_mapped_size, KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool is_aligned, bool refresh_mappings) { + /* Check that the address falls within the space. */ + R_UNLESS((this->space_address <= device_address && device_address + size - 1 <= this->space_address + this->space_size - 1), svc::ResultInvalidCurrentMemory()); + + /* Lock the address space. */ + KScopedLightLock lk(this->lock); + + /* Lock the pages. */ + KPageGroup pg(page_table->GetBlockInfoManager()); + R_TRY(page_table->LockForDeviceAddressSpace(std::addressof(pg), process_address, size, ConvertToKMemoryPermission(device_perm), is_aligned)); + + /* Close the pages we opened when we're done with them. */ + ON_SCOPE_EXIT { pg.Close(); }; + + /* Ensure that if we fail, we don't keep unmapped pages locked. */ + ON_SCOPE_EXIT { + if (*out_mapped_size != size) { + page_table->UnlockForDeviceAddressSpace(process_address + *out_mapped_size, size - *out_mapped_size); + }; + }; + + /* Map the pages. */ + { + /* Clear the output size to zero on failure. */ + auto map_guard = SCOPE_GUARD { *out_mapped_size = 0; }; + + /* Perform the mapping. */ + R_TRY(this->table.Map(out_mapped_size, pg, device_address, device_perm, refresh_mappings)); + + /* We succeeded, so cancel our guard. */ + map_guard.Cancel(); + } + + + return ResultSuccess(); + } + + Result KDeviceAddressSpace::Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address) { + /* Check that the address falls within the space. */ + R_UNLESS((this->space_address <= device_address && device_address + size - 1 <= this->space_address + this->space_size - 1), svc::ResultInvalidCurrentMemory()); + + /* Lock the address space. */ + KScopedLightLock lk(this->lock); + + /* Make and open a page group for the unmapped region. */ + KPageGroup pg(page_table->GetBlockInfoManager()); + R_TRY(page_table->MakeAndOpenPageGroupContiguous(std::addressof(pg), process_address, size / PageSize, + KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap, + KMemoryPermission_None, KMemoryPermission_None, + KMemoryAttribute_AnyLocked | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared)); + + /* Ensure the page group is closed on scope exit. */ + ON_SCOPE_EXIT { pg.Close(); }; + + /* Unmap. */ + R_TRY(this->table.Unmap(pg, device_address)); + + /* Unlock the pages. */ + R_TRY(page_table->UnlockForDeviceAddressSpace(process_address, size)); + + return ResultSuccess(); + } + } diff --git a/libraries/libmesosphere/source/kern_k_dpc_manager.cpp b/libraries/libmesosphere/source/kern_k_dpc_manager.cpp index bef4397aa..1113d5590 100644 --- a/libraries/libmesosphere/source/kern_k_dpc_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_dpc_manager.cpp @@ -126,9 +126,9 @@ namespace ams::kern { /* Launch the new thread. */ g_preemption_priorities[core_id] = priority; if (core_id == cpu::NumCores - 1) { - MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, DpcManagerNormalThreadFunction, 0, DpcManagerThreadPriority, core_id)); - } else { MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, DpcManagerPreemptionThreadFunction, 0, DpcManagerThreadPriority, core_id)); + } else { + MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, DpcManagerNormalThreadFunction, 0, DpcManagerThreadPriority, core_id)); } /* Register the new thread. */ diff --git a/libraries/libmesosphere/source/kern_k_event.cpp b/libraries/libmesosphere/source/kern_k_event.cpp new file mode 100644 index 000000000..08dbb3b87 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_event.cpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KEvent::Initialize() { + MESOSPHERE_ASSERT_THIS(); + + /* Increment reference count. */ + /* Because reference count is one on creation, this will result */ + /* in a reference count of two. Thus, when both readable and */ + /* writable events are closed this object will be destroyed. */ + this->Open(); + + /* Create our sub events. */ + KAutoObject::Create(std::addressof(this->readable_event)); + KAutoObject::Create(std::addressof(this->writable_event)); + + /* Initialize our sub sessions. */ + this->readable_event.Initialize(this); + this->writable_event.Initialize(this); + + /* Set our owner process. */ + this->owner = GetCurrentProcessPointer(); + this->owner->Open(); + + /* Mark initialized. */ + this->initialized = true; + } + + void KEvent::Finalize() { + MESOSPHERE_ASSERT_THIS(); + + KAutoObjectWithSlabHeapAndContainer::Finalize(); + } + + void KEvent::PostDestroy(uintptr_t arg) { + /* Release the event count resource the owner process holds. */ + KProcess *owner = reinterpret_cast(arg); + owner->ReleaseResource(ams::svc::LimitableResource_EventCountMax, 1); + owner->Close(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp index f5278d0e1..09a65b769 100644 --- a/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp +++ b/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp @@ -106,7 +106,7 @@ namespace ams::kern { const uintptr_t start_address = rx_address; const uintptr_t end_address = bss_size > 0 ? bss_address + bss_size : rw_address + rw_size; const size_t as_width = this->Is64BitAddressSpace() ? 39 : 32; - const ASType as_type = this->Is64BitAddressSpace() ? KAddressSpaceInfo::Type_Large64Bit : KAddressSpaceInfo::Type_32Bit; + const ASType as_type = this->Is64BitAddressSpace() ? KAddressSpaceInfo::Type_Map39Bit : KAddressSpaceInfo::Type_MapSmall; const uintptr_t map_start = KAddressSpaceInfo::GetAddressSpaceStart(as_width, as_type); const size_t map_size = KAddressSpaceInfo::GetAddressSpaceSize(as_width, as_type); const uintptr_t map_end = map_start + map_size; diff --git a/libraries/libmesosphere/source/kern_k_interrupt_event.cpp b/libraries/libmesosphere/source/kern_k_interrupt_event.cpp new file mode 100644 index 000000000..b8e24f699 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_interrupt_event.cpp @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + constinit KLightLock g_interrupt_event_lock; + constinit KInterruptEventTask *g_interrupt_event_task_table[KInterruptController::NumInterrupts] = {}; + + } + + Result KInterruptEvent::Initialize(int32_t interrupt_name, ams::svc::InterruptType type) { + MESOSPHERE_ASSERT_THIS(); + + /* Set interrupt id. */ + this->interrupt_id = interrupt_name; + + /* Initialize readable event base. */ + KReadableEvent::Initialize(nullptr); + + /* Try to register the task. */ + R_TRY(KInterruptEventTask::Register(std::addressof(this->task), this->interrupt_id, type == ams::svc::InterruptType_Level, this)); + + /* Mark initialized. */ + this->is_initialized = true; + return ResultSuccess(); + } + + void KInterruptEvent::Finalize() { + MESOSPHERE_ASSERT_THIS(); + + MESOSPHERE_ASSERT(this->task != nullptr); + this->task->Unregister(); + } + + Result KInterruptEvent::Reset() { + MESOSPHERE_ASSERT_THIS(); + + /* Lock the task table. */ + KScopedLightLock lk(g_interrupt_event_lock); + + /* Clear the event. */ + R_TRY(KReadableEvent::Reset()); + + /* Clear the interrupt. */ + Kernel::GetInterruptManager().ClearInterrupt(this->interrupt_id); + + return ResultSuccess(); + } + + Result KInterruptEventTask::Register(KInterruptEventTask **out, s32 interrupt_id, bool level, KInterruptEvent *event) { + /* Verify the interrupt id is defined and global. */ + R_UNLESS(Kernel::GetInterruptManager().IsInterruptDefined(interrupt_id), svc::ResultOutOfRange()); + R_UNLESS(Kernel::GetInterruptManager().IsGlobal(interrupt_id), svc::ResultOutOfRange()); + + /* Lock the task table. */ + KScopedLightLock lk(g_interrupt_event_lock); + + /* Get a task for the id. */ + bool allocated = false; + KInterruptEventTask *task = g_interrupt_event_task_table[interrupt_id]; + if (task != nullptr) { + /* Check that there's not already an event for this task. */ + R_UNLESS(task->event == nullptr, svc::ResultBusy()); + } else { + /* Allocate a new task. */ + task = KInterruptEventTask::Allocate(); + R_UNLESS(task != nullptr, svc::ResultOutOfResource()); + + allocated = true; + } + + /* Register/bind the interrupt task. */ + { + /* Ensure that the task is cleaned up if anything goes wrong. */ + auto task_guard = SCOPE_GUARD { if (allocated) { KInterruptEventTask::Free(task); } }; + + /* Bind the interrupt handler. */ + R_TRY(Kernel::GetInterruptManager().BindHandler(task, interrupt_id, GetCurrentCoreId(), KInterruptController::PriorityLevel_High, true, level)); + + /* We successfully registered, so we don't need to free the task. */ + task_guard.Cancel(); + } + + /* Set the event. */ + task->event = event; + + /* If we allocated, set the event in the table. */ + if (allocated) { + task->interrupt_id = interrupt_id; + g_interrupt_event_task_table[interrupt_id] = task; + } + + /* Set the output. */ + *out = task; + return ResultSuccess(); + } + + void KInterruptEventTask::Unregister() { + MESOSPHERE_ASSERT_THIS(); + + /* Lock the task table. */ + KScopedLightLock lk(g_interrupt_event_lock); + + /* Ensure we can unregister. */ + MESOSPHERE_ABORT_UNLESS(g_interrupt_event_task_table[this->interrupt_id] == this); + MESOSPHERE_ABORT_UNLESS(this->event != nullptr); + this->event = nullptr; + + /* Unbind the interrupt. */ + Kernel::GetInterruptManager().UnbindHandler(this->interrupt_id, GetCurrentCoreId()); + } + + KInterruptTask *KInterruptEventTask::OnInterrupt(s32 interrupt_id) { + MESOSPHERE_ASSERT_THIS(); + + return this; + } + + void KInterruptEventTask::DoTask() { + MESOSPHERE_ASSERT_THIS(); + + /* Lock the task table. */ + KScopedLightLock lk(g_interrupt_event_lock); + + if (this->event != nullptr) { + this->event->Signal(); + } + } +} diff --git a/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp b/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp index 83722e1b4..d553feb8f 100644 --- a/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp @@ -18,9 +18,9 @@ namespace ams::kern { void KInterruptTaskManager::TaskQueue::Enqueue(KInterruptTask *task) { - MESOSPHERE_ASSERT(task->GetNextTask() == nullptr); MESOSPHERE_ASSERT(task != this->head); MESOSPHERE_ASSERT(task != this->tail); + MESOSPHERE_AUDIT(task->GetNextTask() == nullptr); /* Insert the task into the queue. */ if (this->tail != nullptr) { @@ -30,19 +30,33 @@ namespace ams::kern { } this->tail = task; + + /* Set the next task for auditing. */ + #if defined (MESOSPHERE_BUILD_FOR_AUDITING) + task->SetNextTask(GetDummyInterruptTask()); + #endif } void KInterruptTaskManager::TaskQueue::Dequeue() { MESOSPHERE_ASSERT(this->head != nullptr); MESOSPHERE_ASSERT(this->tail != nullptr); + MESOSPHERE_AUDIT(this->tail->GetNextTask() == GetDummyInterruptTask()); /* Pop the task from the front of the queue. */ + KInterruptTask *old_head = this->head; + if (this->head == this->tail) { this->head = nullptr; this->tail = nullptr; } else { this->head = this->head->GetNextTask(); } + + #if defined (MESOSPHERE_BUILD_FOR_AUDITING) + old_head->SetNextTask(nullptr); + #else + AMS_UNUSED(old_head); + #endif } void KInterruptTaskManager::ThreadFunction(uintptr_t arg) { @@ -91,7 +105,7 @@ namespace ams::kern { /* Enqueue the task and signal the scheduler. */ this->task_queue.Enqueue(task); - Kernel::GetScheduler().SetInterruptTaskThreadRunnable(); + Kernel::GetScheduler().SetInterruptTaskRunnable(); } } diff --git a/libraries/libmesosphere/source/kern_k_light_client_session.cpp b/libraries/libmesosphere/source/kern_k_light_client_session.cpp new file mode 100644 index 000000000..deacc8ee0 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_light_client_session.cpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KLightClientSession::Destroy() { + MESOSPHERE_ASSERT_THIS(); + + this->parent->OnClientClosed(); + } + + void KLightClientSession::OnServerClosed() { + MESOSPHERE_ASSERT_THIS(); + } + + Result KLightClientSession::SendSyncRequest(u32 *data) { + MESOSPHERE_ASSERT_THIS(); + + /* Get the request thread. */ + KThread *cur_thread = GetCurrentThreadPointer(); + + /* Set the light data. */ + cur_thread->SetLightSessionData(data); + + /* Send the request. */ + { + KScopedSchedulerLock sl; + + cur_thread->SetSyncedObject(nullptr, ResultSuccess()); + + R_TRY(this->parent->OnRequest(cur_thread)); + } + + /* Get the result. */ + KSynchronizationObject *dummy; + return cur_thread->GetWaitResult(std::addressof(dummy)); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_light_lock.cpp b/libraries/libmesosphere/source/kern_k_light_lock.cpp index 9f91d67bf..e0a317539 100644 --- a/libraries/libmesosphere/source/kern_k_light_lock.cpp +++ b/libraries/libmesosphere/source/kern_k_light_lock.cpp @@ -38,6 +38,7 @@ namespace ams::kern { if (AMS_LIKELY(cur_thread->GetState() == KThread::ThreadState_Runnable)) { cur_thread->SetState(KThread::ThreadState_Waiting); } + if (owner_thread->IsSuspended()) { owner_thread->ContinueIfHasKernelWaiters(); } @@ -75,6 +76,10 @@ namespace ams::kern { if (AMS_LIKELY(next_owner->GetState() == KThread::ThreadState_Waiting)) { next_owner->SetState(KThread::ThreadState_Runnable); } + + if (next_owner->IsSuspended()) { + next_owner->ContinueIfHasKernelWaiters(); + } } /* We may have unsuspended in the process of acquiring the lock, so we'll re-suspend now if so. */ diff --git a/libraries/libmesosphere/source/kern_k_light_server_session.cpp b/libraries/libmesosphere/source/kern_k_light_server_session.cpp new file mode 100644 index 000000000..d049fee48 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_light_server_session.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KLightServerSession::Destroy() { + MESOSPHERE_ASSERT_THIS(); + + this->CleanupRequests(); + + this->parent->OnServerClosed(); + } + + void KLightServerSession::OnClientClosed() { + MESOSPHERE_ASSERT_THIS(); + + this->CleanupRequests(); + } + + Result KLightServerSession::OnRequest(KThread *request_thread) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Check that the server isn't closed. */ + R_UNLESS(!this->parent->IsServerClosed(), svc::ResultSessionClosed()); + + /* Try to sleep the thread. */ + R_UNLESS(this->request_queue.SleepThread(request_thread), svc::ResultTerminationRequested()); + + /* If we don't have a current request, wake up a server thread to handle it. */ + if (this->current_request == nullptr) { + this->server_queue.WakeupFrontThread(); + } + + return ResultSuccess(); + } + + Result KLightServerSession::ReplyAndReceive(u32 *data) { + MESOSPHERE_ASSERT_THIS(); + + /* Set the server context. */ + KThread *server_thread = GetCurrentThreadPointer(); + server_thread->SetLightSessionData(data); + + /* Reply, if we need to. */ + KThread *cur_request = nullptr; + if (data[0] & KLightSession::ReplyFlag) { + KScopedSchedulerLock sl; + + /* Check that we're open. */ + R_UNLESS(!this->parent->IsClientClosed(), svc::ResultSessionClosed()); + R_UNLESS(!this->parent->IsServerClosed(), svc::ResultSessionClosed()); + + /* Check that we have a request to reply to. */ + R_UNLESS(this->current_request != nullptr, svc::ResultInvalidState()); + + /* Check that the server thread is correct. */ + R_UNLESS(this->server_thread == server_thread, svc::ResultInvalidState()); + + /* If we can reply, do so. */ + if (!this->current_request->IsTerminationRequested()) { + MESOSPHERE_ASSERT(this->current_request->GetState() == KThread::ThreadState_Waiting); + MESOSPHERE_ASSERT(this->current_request == this->request_queue.GetFront()); + std::memcpy(this->current_request->GetLightSessionData(), server_thread->GetLightSessionData(), KLightSession::DataSize); + this->request_queue.WakeupThread(this->current_request); + } + + /* Clear our current request. */ + cur_request = this->current_request; + this->current_request = nullptr; + this->server_thread = nullptr; + } + + /* Close the current request, if we had one. */ + if (cur_request != nullptr) { + cur_request->Close(); + } + + /* Receive. */ + bool set_cancellable = false; + while (true) { + KScopedSchedulerLock sl; + + /* Check that we aren't already receiving. */ + R_UNLESS(this->server_queue.IsEmpty(), svc::ResultInvalidState()); + R_UNLESS(this->server_thread == nullptr, svc::ResultInvalidState()); + + /* If we cancelled in a previous loop, clear cancel state. */ + if (set_cancellable) { + server_thread->ClearCancellable(); + set_cancellable = false; + } + + /* Check that we're open. */ + R_UNLESS(!this->parent->IsClientClosed(), svc::ResultSessionClosed()); + R_UNLESS(!this->parent->IsServerClosed(), svc::ResultSessionClosed()); + + /* If we have a request available, use it. */ + if (this->current_request == nullptr && this->request_queue.IsEmpty()) { + this->current_request = this->request_queue.GetFront(); + this->current_request->Open(); + this->server_thread = server_thread; + break; + } else { + /* Otherwise, wait for a request to come in. */ + R_UNLESS(this->server_queue.SleepThread(server_thread), svc::ResultTerminationRequested()); + + /* Check if we were cancelled. */ + if (server_thread->IsWaitCancelled()) { + this->server_queue.WakeupThread(server_thread); + server_thread->ClearWaitCancelled(); + return svc::ResultCancelled(); + } + + /* Otherwise, mark as cancellable. */ + server_thread->SetCancellable(); + set_cancellable = true; + } + } + + /* Copy the client data. */ + std::memcpy(server_thread->GetLightSessionData(), this->current_request->GetLightSessionData(), KLightSession::DataSize); + return ResultSuccess(); + } + + void KLightServerSession::CleanupRequests() { + /* Cleanup all pending requests. */ + KThread *cur_request = nullptr; + { + KScopedSchedulerLock sl; + + /* Handle the current request. */ + if (this->current_request != nullptr) { + /* Reply to the current request. */ + if (!this->current_request->IsTerminationRequested()) { + MESOSPHERE_ASSERT(this->current_request->GetState() == KThread::ThreadState_Waiting); + MESOSPHERE_ASSERT(this->current_request == this->request_queue.GetFront()); + this->request_queue.WakeupThread(this->current_request); + this->current_request->SetSyncedObject(nullptr, svc::ResultSessionClosed()); + } + + /* Clear our current request. */ + cur_request = this->current_request; + this->current_request = nullptr; + this->server_thread = nullptr; + } + + /* Reply to all other requests. */ + while (!this->request_queue.IsEmpty()) { + KThread *client_thread = this->request_queue.WakeupFrontThread(); + client_thread->SetSyncedObject(nullptr, svc::ResultSessionClosed()); + } + + /* Wake up all server threads. */ + while (!this->server_queue.IsEmpty()) { + this->server_queue.WakeupFrontThread(); + } + } + + /* Close the current request, if we had one. */ + if (cur_request != nullptr) { + cur_request->Close(); + } + } + +} diff --git a/libraries/libmesosphere/source/kern_k_light_session.cpp b/libraries/libmesosphere/source/kern_k_light_session.cpp new file mode 100644 index 000000000..6132d09b7 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_light_session.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KLightSession::Initialize(KClientPort *client_port, uintptr_t name) { + MESOSPHERE_ASSERT_THIS(); + + /* Increment reference count. */ + /* Because reference count is one on creation, this will result */ + /* in a reference count of two. Thus, when both server and client are closed */ + /* this object will be destroyed. */ + this->Open(); + + /* Create our sub sessions. */ + KAutoObject::Create(std::addressof(this->server)); + KAutoObject::Create(std::addressof(this->client)); + + /* Initialize our sub sessions. */ + this->server.Initialize(this); + this->client.Initialize(this); + + /* Set state and name. */ + this->state = State::Normal; + this->name = name; + + /* Set our owner process. */ + this->process = GetCurrentProcessPointer(); + this->process->Open(); + + /* Set our port. */ + this->port = client_port; + if (this->port != nullptr) { + this->port->Open(); + } + + /* Mark initialized. */ + this->initialized = true; + } + + void KLightSession::Finalize() { + if (this->port != nullptr) { + this->port->OnSessionFinalized(); + this->port->Close(); + } + } + + void KLightSession::OnServerClosed() { + MESOSPHERE_ASSERT_THIS(); + + if (this->state == State::Normal) { + this->state = State::ServerClosed; + this->client.OnServerClosed(); + } + + this->Close(); + } + + void KLightSession::OnClientClosed() { + MESOSPHERE_ASSERT_THIS(); + + if (this->state == State::Normal) { + this->state = State::ClientClosed; + this->server.OnClientClosed(); + } + + this->Close(); + } + + void KLightSession::PostDestroy(uintptr_t arg) { + /* Release the session count resource the owner process holds. */ + KProcess *owner = reinterpret_cast(arg); + owner->ReleaseResource(ams::svc::LimitableResource_SessionCountMax, 1); + owner->Close(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp index ed8917cba..df93139db 100644 --- a/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp @@ -228,6 +228,87 @@ namespace ams::kern { } } + void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr) { + /* Ensure for auditing that we never end up with an invalid tree. */ + KScopedMemoryBlockManagerAuditor auditor(this); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize)); + MESOSPHERE_ASSERT((attr & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared)) == 0); + + KProcessAddress cur_address = address; + size_t remaining_pages = num_pages; + iterator it = this->FindIterator(address); + + while (remaining_pages > 0) { + const size_t remaining_size = remaining_pages * PageSize; + KMemoryInfo cur_info = it->GetMemoryInfo(); + if (it->HasProperties(test_state, test_perm, test_attr) && !it->HasProperties(state, perm, attr)) { + /* If we need to, create a new block before and insert it. */ + if (cur_info.GetAddress() != GetInteger(cur_address)) { + KMemoryBlock *new_block = allocator->Allocate(); + + it->Split(new_block, cur_address); + it = this->memory_block_tree.insert(*new_block); + it++; + + cur_info = it->GetMemoryInfo(); + cur_address = cur_info.GetAddress(); + } + + /* If we need to, create a new block after and insert it. */ + if (cur_info.GetSize() > remaining_size) { + KMemoryBlock *new_block = allocator->Allocate(); + + it->Split(new_block, cur_address + remaining_size); + it = this->memory_block_tree.insert(*new_block); + + cur_info = it->GetMemoryInfo(); + } + + /* Update block state. */ + it->Update(state, perm, attr); + cur_address += cur_info.GetSize(); + remaining_pages -= cur_info.GetNumPages(); + } else { + /* If we already have the right properties, just advance. */ + if (cur_address + remaining_size < cur_info.GetEndAddress()) { + remaining_pages = 0; + cur_address += remaining_size; + } else { + remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize; + cur_address = cur_info.GetEndAddress(); + } + } + it++; + } + + /* Find the iterator now that we've updated. */ + it = this->FindIterator(address); + if (address != this->start_address) { + it--; + } + + /* Coalesce blocks that we can. */ + while (true) { + iterator prev = it++; + if (it == this->memory_block_tree.end()) { + break; + } + + if (prev->HasSameProperties(*it)) { + KMemoryBlock *block = std::addressof(*it); + const size_t pages = it->GetNumPages(); + this->memory_block_tree.erase(it); + allocator->Free(block); + prev->Add(pages); + it = prev; + } + + if (address + num_pages * PageSize < it->GetMemoryInfo().GetEndAddress()) { + break; + } + } + } + void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, void (KMemoryBlock::*lock_func)(KMemoryPermission new_perm), KMemoryPermission perm) { /* Ensure for auditing that we never end up with an invalid tree. */ KScopedMemoryBlockManagerAuditor auditor(this); @@ -257,7 +338,9 @@ namespace ams::kern { /* If there's a previous, we should check for coalescing. */ check_coalesce_prev = true; prev--; - } else if (cur_info.GetSize() > remaining_size) { + } + + if (cur_info.GetSize() > remaining_size) { /* If we need to, create a new block after and insert it. */ KMemoryBlock *new_block = allocator->Allocate(); diff --git a/libraries/libmesosphere/source/kern_k_memory_layout.cpp b/libraries/libmesosphere/source/kern_k_memory_layout.cpp index 588c7cb79..e768f6af1 100644 --- a/libraries/libmesosphere/source/kern_k_memory_layout.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_layout.cpp @@ -144,7 +144,7 @@ namespace ams::kern { constexpr size_t CoreLocalRegionSize = PageSize * (1 + cpu::NumCores); constexpr size_t CoreLocalRegionSizeWithGuards = CoreLocalRegionSize + 2 * PageSize; constexpr size_t CoreLocalRegionBoundsAlign = 1_GB; - /* TODO: static_assert(CoreLocalRegionSize == sizeof(KCoreLocalRegion)); */ + static_assert(CoreLocalRegionSize == sizeof(KCoreLocalRegion)); KVirtualAddress GetCoreLocalRegionVirtualAddress() { while (true) { diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp index e5c58bd33..082c1db01 100644 --- a/libraries/libmesosphere/source/kern_k_memory_manager.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -87,6 +87,36 @@ namespace ams::kern { } } + Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) { + /* Lock the pool. */ + KScopedLightLock lk(this->pool_locks[pool]); + + /* Check that we don't already have an optimized process. */ + R_UNLESS(!this->has_optimized_process[pool], svc::ResultBusy()); + + /* Set the optimized process id. */ + this->optimized_process_ids[pool] = process_id; + this->has_optimized_process[pool] = true; + + /* Clear the management area for the optimized process. */ + for (auto *manager = this->GetFirstManager(pool, Direction_FromFront); manager != nullptr; manager = this->GetNextManager(manager, Direction_FromFront)) { + manager->InitializeOptimizedMemory(); + } + + return ResultSuccess(); + } + + void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) { + /* Lock the pool. */ + KScopedLightLock lk(this->pool_locks[pool]); + + /* If the process was optimized, clear it. */ + if (this->has_optimized_process[pool] && this->optimized_process_ids[pool] == process_id) { + this->has_optimized_process[pool] = false; + } + } + + KVirtualAddress KMemoryManager::AllocateContinuous(size_t num_pages, size_t align_pages, u32 option) { /* Early return if we're allocating no pages. */ if (num_pages == 0) { @@ -123,13 +153,13 @@ namespace ams::kern { /* Maintain the optimized memory bitmap, if we should. */ if (this->has_optimized_process[pool]) { - chosen_manager->TrackAllocationForOptimizedProcess(allocated_block, num_pages); + chosen_manager->TrackUnoptimizedAllocation(allocated_block, num_pages); } return allocated_block; } - Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool optimize, bool random) { + Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random) { /* Choose a heap based on our page size request. */ const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory()); @@ -163,8 +193,8 @@ namespace ams::kern { } /* Maintain the optimized memory bitmap, if we should. */ - if (optimize) { - cur_manager->TrackAllocationForOptimizedProcess(allocated_block, pages_per_alloc); + if (unoptimized) { + cur_manager->TrackUnoptimizedAllocation(allocated_block, pages_per_alloc); } num_pages -= pages_per_alloc; @@ -196,10 +226,95 @@ namespace ams::kern { return this->AllocatePageGroupImpl(out, num_pages, pool, dir, this->has_optimized_process[pool], true); } + Result KMemoryManager::AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern) { + MESOSPHERE_ASSERT(out != nullptr); + MESOSPHERE_ASSERT(out->GetNumPages() == 0); + + /* Decode the option. */ + const auto [pool, dir] = DecodeOption(option); + + /* Allocate the memory. */ + bool has_optimized, is_optimized; + { + /* Lock the pool that we're allocating from. */ + KScopedLightLock lk(this->pool_locks[pool]); + + /* Check if we have an optimized process. */ + has_optimized = this->has_optimized_process[pool]; + is_optimized = this->optimized_process_ids[pool] == process_id; + + /* Allocate the page group. */ + R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false)); + } + + /* Perform optimized memory tracking, if we should. */ + if (has_optimized && is_optimized) { + /* Iterate over the allocated blocks. */ + for (const auto &block : *out) { + /* Get the block extents. */ + const KVirtualAddress block_address = block.GetAddress(); + const size_t block_pages = block.GetNumPages(); + + /* If it has no pages, we don't need to do anything. */ + if (block_pages == 0) { + continue; + } + + /* Fill all the pages that we need to fill. */ + bool any_new = false; + { + KVirtualAddress cur_address = block_address; + size_t cur_pages = block_pages; + while (cur_pages > 0) { + /* Get the manager for the current address. */ + auto &manager = this->GetManager(cur_address); + + /* Process part or all of the block. */ + const size_t processed_pages = manager.ProcessOptimizedAllocation(std::addressof(any_new), cur_address, cur_pages, fill_pattern); + + /* Advance. */ + cur_address += processed_pages * PageSize; + cur_pages -= processed_pages; + } + } + + /* If there are no new pages, move on to the next block. */ + if (!any_new) { + continue; + } + + /* Update tracking for the allocation. */ + KVirtualAddress cur_address = block_address; + size_t cur_pages = block_pages; + while (cur_pages > 0) { + /* Get the manager for the current address. */ + auto &manager = this->GetManager(cur_address); + + /* Lock the pool for the manager. */ + KScopedLightLock lk(this->pool_locks[manager.GetPool()]); + + /* Track some or all of the current pages. */ + const size_t processed_pages = manager.TrackOptimizedAllocation(cur_address, cur_pages); + + /* Advance. */ + cur_address += processed_pages * PageSize; + cur_pages -= processed_pages; + } + } + } else { + /* Set all the allocated memory. */ + for (const auto &block : *out) { + std::memset(GetVoidPointer(block.GetAddress()), fill_pattern, block.GetSize()); + } + } + + return ResultSuccess(); + } + size_t KMemoryManager::Impl::Initialize(const KMemoryRegion *region, Pool p, KVirtualAddress metadata, KVirtualAddress metadata_end) { /* Calculate metadata sizes. */ const size_t ref_count_size = (region->GetSize() / PageSize) * sizeof(u16); - const size_t optimize_map_size = (util::AlignUp((region->GetSize() / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64); + const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(region->GetSize()); const size_t manager_size = util::AlignUp(optimize_map_size + ref_count_size, PageSize); const size_t page_heap_size = KPageHeap::CalculateMetadataOverheadSize(region->GetSize()); const size_t total_metadata_size = manager_size + page_heap_size; @@ -225,7 +340,7 @@ namespace ams::kern { return total_metadata_size; } - void KMemoryManager::Impl::TrackAllocationForOptimizedProcess(KVirtualAddress block, size_t num_pages) { + void KMemoryManager::Impl::TrackUnoptimizedAllocation(KVirtualAddress block, size_t num_pages) { size_t offset = this->heap.GetPageOffset(block); const size_t last = offset + num_pages - 1; u64 *optimize_map = GetPointer(this->metadata_region); @@ -235,6 +350,57 @@ namespace ams::kern { } } + size_t KMemoryManager::Impl::TrackOptimizedAllocation(KVirtualAddress block, size_t num_pages) { + /* Get the number of tracking pages. */ + const size_t cur_pages = std::min(num_pages, this->heap.GetPageOffsetToEnd(block)); + + /* Get the range we're tracking. */ + size_t offset = this->heap.GetPageOffset(block); + const size_t last = offset + cur_pages - 1; + + /* Track. */ + u64 *optimize_map = GetPointer(this->metadata_region); + while (offset <= last) { + /* Mark the page as being optimized-allocated. */ + optimize_map[offset / BITSIZEOF(u64)] |= (u64(1) << (offset % BITSIZEOF(u64))); + + offset++; + } + + /* Return the number of pages we tracked. */ + return cur_pages; + } + + size_t KMemoryManager::Impl::ProcessOptimizedAllocation(bool *out_any_new, KVirtualAddress block, size_t num_pages, u8 fill_pattern) { + /* Get the number of processable pages. */ + const size_t cur_pages = std::min(num_pages, this->heap.GetPageOffsetToEnd(block)); + + /* Clear any new. */ + *out_any_new = false; + + /* Get the range we're processing. */ + size_t offset = this->heap.GetPageOffset(block); + const size_t last = offset + cur_pages - 1; + + /* Process. */ + u64 *optimize_map = GetPointer(this->metadata_region); + while (offset <= last) { + /* Check if the page has been optimized-allocated before. */ + if ((optimize_map[offset / BITSIZEOF(u64)] & (u64(1) << (offset % BITSIZEOF(u64)))) == 0) { + /* If not, it's new. */ + *out_any_new = true; + + /* Fill the page. */ + std::memset(GetVoidPointer(this->heap.GetAddress() + offset * PageSize), fill_pattern, PageSize); + } + + offset++; + } + + /* Return the number of pages we processed. */ + return cur_pages; + } + size_t KMemoryManager::Impl::CalculateMetadataOverheadSize(size_t region_size) { const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); const size_t optimize_map_size = (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64); diff --git a/libraries/libmesosphere/source/kern_k_page_table_base.cpp b/libraries/libmesosphere/source/kern_k_page_table_base.cpp index d7f5701dd..c489c0cca 100644 --- a/libraries/libmesosphere/source/kern_k_page_table_base.cpp +++ b/libraries/libmesosphere/source/kern_k_page_table_base.cpp @@ -40,7 +40,7 @@ namespace ams::kern { this->code_region_start = 0; this->code_region_end = 0; this->max_heap_size = 0; - this->max_physical_memory_size = 0; + this->mapped_physical_memory_size = 0; this->mapped_unsafe_physical_memory = 0; this->memory_block_slab_manager = std::addressof(Kernel::GetSystemMemoryBlockManager()); @@ -100,9 +100,9 @@ namespace ams::kern { alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias); heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap); stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Stack); - kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type_32Bit); - this->code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_Large64Bit); - this->code_region_end = this->code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_Large64Bit); + kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type_MapSmall); + this->code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_Map39Bit); + this->code_region_end = this->code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_Map39Bit); this->alias_code_region_start = this->code_region_start; this->alias_code_region_end = this->code_region_end; process_code_start = util::AlignDown(GetInteger(code_address), RegionAlignment); @@ -110,11 +110,11 @@ namespace ams::kern { } else { stack_region_size = 0; kernel_map_region_size = 0; - this->code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_32Bit); - this->code_region_end = this->code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_32Bit); + this->code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_MapSmall); + this->code_region_end = this->code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_MapSmall); this->stack_region_start = this->code_region_start; this->alias_code_region_start = this->code_region_start; - this->alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type_Small64Bit) + GetSpaceSize(KAddressSpaceInfo::Type_Small64Bit); + this->alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type_MapLarge) + GetSpaceSize(KAddressSpaceInfo::Type_MapLarge); this->stack_region_end = this->code_region_end; this->kernel_map_region_start = this->code_region_start; this->kernel_map_region_end = this->code_region_end; @@ -225,7 +225,7 @@ namespace ams::kern { /* Set heap and fill members. */ this->current_heap_end = this->heap_region_start; this->max_heap_size = 0; - this->max_physical_memory_size = 0; + this->mapped_physical_memory_size = 0; this->mapped_unsafe_physical_memory = 0; const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled(); @@ -273,8 +273,16 @@ namespace ams::kern { void KPageTableBase::Finalize() { + /* Finalize memory blocks. */ this->memory_block_manager.Finalize(this->memory_block_slab_manager); - MESOSPHERE_TODO("cpu::InvalidateEntireInstructionCache();"); + + /* Free any unsafe mapped memory. */ + if (this->mapped_unsafe_physical_memory) { + Kernel::GetUnsafeMemory().Release(this->mapped_unsafe_physical_memory); + } + + /* Invalidate the entire instruction cache. */ + cpu::InvalidateEntireInstructionCache(); } KProcessAddress KPageTableBase::GetRegionAddress(KMemoryState state) const { @@ -388,8 +396,34 @@ namespace ams::kern { Result KPageTableBase::CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const { /* Validate the states match expectation. */ R_UNLESS((info.state & state_mask) == state, svc::ResultInvalidCurrentMemory()); - R_UNLESS((info.perm & perm_mask) == perm, svc::ResultInvalidCurrentMemory()); - R_UNLESS((info.attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory()); + R_UNLESS((info.perm & perm_mask) == perm, svc::ResultInvalidCurrentMemory()); + R_UNLESS((info.attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory()); + + return ResultSuccess(); + } + + Result KPageTableBase::CheckMemoryStateContiguous(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + /* Get information about the first block. */ + const KProcessAddress last_addr = addr + size - 1; + KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(addr); + KMemoryInfo info = it->GetMemoryInfo(); + + while (true) { + /* Validate against the provided masks. */ + R_TRY(this->CheckMemoryState(info, state_mask, state, perm_mask, perm, attr_mask, attr)); + + /* Break once we're done. */ + if (last_addr <= info.GetLastAddress()) { + break; + } + + /* Advance our iterator. */ + it++; + MESOSPHERE_ASSERT(it != this->memory_block_manager.cend()); + info = it->GetMemoryInfo(); + } return ResultSuccess(); } @@ -439,6 +473,113 @@ namespace ams::kern { return ResultSuccess(); } + Result KPageTableBase::LockMemoryAndOpen(KPageGroup *out_pg, KPhysicalAddress *out_paddr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr) { + /* Validate basic preconditions. */ + MESOSPHERE_ASSERT((lock_attr & attr) == 0); + MESOSPHERE_ASSERT((lock_attr & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared)) == 0); + + /* Validate the lock request. */ + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(addr, size), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check that the output page group is empty, if it exists. */ + if (out_pg) { + MESOSPHERE_ASSERT(out_pg->GetNumPages() == 0); + } + + /* Check the state. */ + KMemoryState old_state; + KMemoryPermission old_perm; + KMemoryAttribute old_attr; + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), addr, size, state_mask | KMemoryState_FlagReferenceCounted, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr)); + + /* Get the physical address, if we're supposed to. */ + if (out_paddr != nullptr) { + MESOSPHERE_ABORT_UNLESS(this->GetPhysicalAddress(out_paddr, addr)); + } + + /* Make the page group, if we're supposed to. */ + if (out_pg != nullptr) { + R_TRY(this->MakePageGroup(*out_pg, addr, num_pages)); + } + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* Decide on new perm and attr. */ + new_perm = (new_perm != KMemoryPermission_None) ? new_perm : old_perm; + KMemoryAttribute new_attr = static_cast(old_attr | lock_attr); + + /* Update permission, if we need to. */ + if (new_perm != old_perm) { + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + const KPageProperties properties = { new_perm, false, (old_attr & KMemoryAttribute_Uncached) != 0, true }; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null, false, properties, OperationType_ChangePermissions, false)); + } + + /* Apply the memory block updates. */ + this->memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, new_attr); + + /* If we have an output group, open. */ + if (out_pg) { + out_pg->Open(); + } + + return ResultSuccess(); + } + + Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr, const KPageGroup *pg) { + /* Validate basic preconditions. */ + MESOSPHERE_ASSERT((attr_mask & lock_attr) == lock_attr); + MESOSPHERE_ASSERT((attr & lock_attr) == lock_attr); + + /* Validate the unlock request. */ + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(addr, size), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check the state. */ + KMemoryState old_state; + KMemoryPermission old_perm; + KMemoryAttribute old_attr; + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), addr, size, state_mask | KMemoryState_FlagReferenceCounted, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr)); + + /* Check the page group. */ + if (pg != nullptr) { + R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), svc::ResultInvalidMemoryRegion()); + } + + /* Decide on new perm and attr. */ + new_perm = (new_perm != KMemoryPermission_None) ? new_perm : old_perm; + KMemoryAttribute new_attr = static_cast(old_attr & ~lock_attr); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* Update permission, if we need to. */ + if (new_perm != old_perm) { + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + const KPageProperties properties = { new_perm, false, (old_attr & KMemoryAttribute_Uncached) != 0, false }; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null, false, properties, OperationType_ChangePermissions, false)); + } + + /* Apply the memory block updates. */ + this->memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, new_attr); + + return ResultSuccess(); + } + Result KPageTableBase::QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const { MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(out_info != nullptr); @@ -452,6 +593,385 @@ namespace ams::kern { return ResultSuccess(); } + Result KPageTableBase::QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, KMemoryState state) const { + MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(out != nullptr); + + const KProcessAddress region_start = this->GetRegionAddress(state); + const size_t region_size = this->GetRegionSize(state); + + /* Check that the address/size are potentially valid. */ + R_UNLESS((address < address + size), svc::ResultNotFound()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + auto &impl = this->GetImpl(); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry cur_entry = {}; + bool cur_valid = false; + TraversalEntry next_entry; + bool next_valid; + size_t tot_size = 0; + + next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), region_start); + next_entry.block_size = (next_entry.block_size - (GetInteger(region_start) & (next_entry.block_size - 1))); + + /* Iterate, looking for entry. */ + while (true) { + if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { + cur_entry.block_size += next_entry.block_size; + } else { + if (cur_valid && cur_entry.phys_addr <= address && address + size <= cur_entry.phys_addr + cur_entry.block_size) { + /* Check if this region is valid. */ + const KProcessAddress mapped_address = (region_start + tot_size) + (address - cur_entry.phys_addr); + if (R_SUCCEEDED(this->CheckMemoryState(mapped_address, size, KMemoryState_All, state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None))) { + /* It is! */ + *out = mapped_address; + return ResultSuccess(); + } + } + + /* Update tracking variables. */ + tot_size += cur_entry.block_size; + cur_entry = next_entry; + cur_valid = next_valid; + } + + if (cur_entry.block_size + tot_size >= region_size) { + break; + } + + next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + } + + /* Check the last entry. */ + R_UNLESS(cur_valid, svc::ResultNotFound()); + R_UNLESS(cur_entry.phys_addr <= address, svc::ResultNotFound()); + R_UNLESS(address + size <= cur_entry.phys_addr + cur_entry.block_size, svc::ResultNotFound()); + + /* Check if the last region is valid. */ + const KProcessAddress mapped_address = (region_start + tot_size) + (address - cur_entry.phys_addr); + R_TRY_CATCH(this->CheckMemoryState(mapped_address, size, KMemoryState_All, state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None)) { + R_CONVERT_ALL(svc::ResultNotFound()); + } R_END_TRY_CATCH; + + /* We found the region. */ + *out = mapped_address; + return ResultSuccess(); + } + + Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Validate that the source address's state is valid. */ + KMemoryState src_state; + R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, src_address, size, KMemoryState_FlagCanAlias, KMemoryState_FlagCanAlias, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Validate that the dst address's state is valid. */ + R_TRY(this->CheckMemoryState(dst_address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Create an update allocator for the source. */ + KMemoryBlockManagerUpdateAllocator src_allocator(this->memory_block_slab_manager); + R_TRY(src_allocator.GetResult()); + + /* Create an update allocator for the destination. */ + KMemoryBlockManagerUpdateAllocator dst_allocator(this->memory_block_slab_manager); + R_TRY(dst_allocator.GetResult()); + + /* Map the memory. */ + { + /* Determine the number of pages being operated on. */ + const size_t num_pages = size / PageSize; + + /* Create page groups for the memory being unmapped. */ + KPageGroup pg(this->block_info_manager); + + /* Create the page group representing the source. */ + R_TRY(this->MakePageGroup(pg, src_address, num_pages)); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Reprotect the source as kernel-read/not mapped. */ + const KMemoryPermission new_src_perm = static_cast(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped); + const KMemoryAttribute new_src_attr = static_cast(KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked); + const KPageProperties src_properties = { new_src_perm, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null, false, src_properties, OperationType_ChangePermissions, false)); + + /* Ensure that we unprotect the source pages on failure. */ + auto unprot_guard = SCOPE_GUARD { + const KPageProperties unprotect_properties = { KMemoryPermission_UserReadWrite, false, false, false }; + MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), src_address, num_pages, Null, false, unprotect_properties, OperationType_ChangePermissions, true)); + }; + + /* Map the alias pages. */ + const KPageProperties dst_map_properties = { KMemoryPermission_UserReadWrite, false, false, false }; + R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties, false)); + + /* We successfully mapped the alias pages, so we don't need to unprotect the src pages on failure. */ + unprot_guard.Cancel(); + + /* Apply the memory block updates. */ + this->memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_src_perm, new_src_attr); + this->memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_Stack, KMemoryPermission_UserReadWrite, KMemoryAttribute_None); + } + + return ResultSuccess(); + } + + Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Validate that the source address's state is valid. */ + KMemoryState src_state; + R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, src_address, size, KMemoryState_FlagCanAlias, KMemoryState_FlagCanAlias, KMemoryPermission_All, KMemoryPermission_NotMapped | KMemoryPermission_KernelRead, KMemoryAttribute_All, KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked)); + + /* Validate that the dst address's state is valid. */ + KMemoryPermission dst_perm; + R_TRY(this->CheckMemoryState(nullptr, std::addressof(dst_perm), nullptr, dst_address, size, KMemoryState_All, KMemoryState_Stack, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Create an update allocator for the source. */ + KMemoryBlockManagerUpdateAllocator src_allocator(this->memory_block_slab_manager); + R_TRY(src_allocator.GetResult()); + + /* Create an update allocator for the destination. */ + KMemoryBlockManagerUpdateAllocator dst_allocator(this->memory_block_slab_manager); + R_TRY(dst_allocator.GetResult()); + + /* Unmap the memory. */ + { + /* Determine the number of pages being operated on. */ + const size_t num_pages = size / PageSize; + + /* Create page groups for the memory being unmapped. */ + KPageGroup pg(this->block_info_manager); + + /* Create the page group representing the destination. */ + R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); + + /* Ensure the page group is the valid for the source. */ + R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), svc::ResultInvalidMemoryRegion()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Unmap the aliased copy of the pages. */ + const KPageProperties dst_unmap_properties = { KMemoryPermission_None, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null, false, dst_unmap_properties, OperationType_Unmap, false)); + + /* Ensure that we re-map the aliased pages on failure. */ + auto remap_guard = SCOPE_GUARD { + const KPageProperties dst_remap_properties = { dst_perm, false, false, false }; + MESOSPHERE_R_ABORT_UNLESS(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_remap_properties, true)); + }; + + /* Try to set the permissions for the source pages back to what they should be. */ + const KPageProperties src_properties = { KMemoryPermission_UserReadWrite, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null, false, src_properties, OperationType_ChangePermissions, false)); + + /* We successfully changed the permissions for the source pages, so we don't need to re-map the dst pages on failure. */ + remap_guard.Cancel(); + + /* Apply the memory block updates. */ + this->memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, KMemoryPermission_UserReadWrite, KMemoryAttribute_None); + this->memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None); + } + + return ResultSuccess(); + } + + Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + /* Validate the mapping request. */ + R_UNLESS(this->CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidMemoryRegion()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Verify that the source memory is normal heap. */ + KMemoryState src_state; + KMemoryPermission src_perm; + R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr, src_address, size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Verify that the destination memory is unmapped. */ + R_TRY(this->CheckMemoryState(dst_address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Create an update allocator for the source. */ + KMemoryBlockManagerUpdateAllocator src_allocator(this->memory_block_slab_manager); + R_TRY(src_allocator.GetResult()); + + /* Create an update allocator for the destination. */ + KMemoryBlockManagerUpdateAllocator dst_allocator(this->memory_block_slab_manager); + R_TRY(dst_allocator.GetResult()); + + /* Map the code memory. */ + { + /* Determine the number of pages being operated on. */ + const size_t num_pages = size / PageSize; + + /* Create page groups for the memory being unmapped. */ + KPageGroup pg(this->block_info_manager); + + /* Create the page group representing the source. */ + R_TRY(this->MakePageGroup(pg, src_address, num_pages)); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Reprotect the source as kernel-read/not mapped. */ + const KMemoryPermission new_perm = static_cast(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped); + const KPageProperties src_properties = { new_perm, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null, false, src_properties, OperationType_ChangePermissions, false)); + + /* Ensure that we unprotect the source pages on failure. */ + auto unprot_guard = SCOPE_GUARD { + const KPageProperties unprotect_properties = { src_perm, false, false, false }; + MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), src_address, num_pages, Null, false, unprotect_properties, OperationType_ChangePermissions, true)); + }; + + /* Map the alias pages. */ + const KPageProperties dst_properties = { new_perm, false, false, false }; + R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false)); + + /* We successfully mapped the alias pages, so we don't need to unprotect the src pages on failure. */ + unprot_guard.Cancel(); + + /* Apply the memory block updates. */ + this->memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_perm, static_cast(KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked)); + this->memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_AliasCode, new_perm, KMemoryAttribute_None); + } + + return ResultSuccess(); + } + + Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) { + /* Validate the mapping request. */ + R_UNLESS(this->CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidMemoryRegion()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Verify that the source memory is locked normal heap. */ + R_TRY(this->CheckMemoryState(src_address, size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, static_cast(KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked))); + + /* Verify the first page of the destination memory is aliasable code, and get its state. */ + KMemoryState dst_state; + R_TRY(this->CheckMemoryState(std::addressof(dst_state), nullptr, nullptr, dst_address, PageSize, KMemoryState_FlagCanCodeAlias, KMemoryState_FlagCanCodeAlias, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Verify that the destination memory is contiguous with the same state as the first page. */ + R_TRY(this->CheckMemoryStateContiguous(dst_address, size, KMemoryState_All, dst_state, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Unmap. */ + { + /* Determine the number of pages being operated on. */ + const size_t num_pages = size / PageSize; + + /* Create page groups for the memory being unmapped. */ + KPageGroup pg(this->block_info_manager); + + /* Create the page group representing the destination. */ + R_TRY(this->MakePageGroup(pg, dst_address, num_pages)); + + /* Verify that the page group contains the same pages as the source. */ + R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), svc::ResultInvalidMemoryRegion()); + + /* Create an update allocator for the source. */ + KMemoryBlockManagerUpdateAllocator src_allocator(this->memory_block_slab_manager); + R_TRY(src_allocator.GetResult()); + + /* Create an update allocator for the destination. */ + KMemoryBlockManagerUpdateAllocator dst_allocator(this->memory_block_slab_manager); + R_TRY(dst_allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Unmap the aliased copy of the pages. */ + const KPageProperties dst_unmap_properties = { KMemoryPermission_None, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null, false, dst_unmap_properties, OperationType_Unmap, false)); + + /* Ensure that we re-map the aliased pages on failure. */ + auto remap_guard = SCOPE_GUARD { + /* Cache the last address for convenience. */ + const auto last_address = dst_address + size - 1; + + /* Iterate over the memory we unmapped. */ + auto it = this->memory_block_manager.FindIterator(dst_address); + auto pg_it = pg.begin(); + KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + size_t pg_size = pg_it->GetNumPages() * PageSize; + + while (true) { + /* Get the memory info for the pages we unmapped, convert to property. */ + const KMemoryInfo info = it->GetMemoryInfo(); + const KPageProperties prev_properties = { info.GetPermission(), false, false, false }; + + /* Determine the range to map. */ + KProcessAddress map_address = std::max(info.GetAddress(), GetInteger(dst_address)); + size_t map_size = std::min(GetInteger(dst_address + size), info.GetEndAddress()) - GetInteger(map_address); + MESOSPHERE_ABORT_UNLESS(map_size != 0); + + /* While we have pages to map, map them. */ + while (map_size > 0) { + /* Check if we're at the end of the physical block. */ + if (pg_size == 0) { + /* Ensure there are more pages to map. */ + MESOSPHERE_ABORT_UNLESS(pg_it != pg.end()); + + /* Advance our physical block. */ + ++pg_it; + pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + pg_size = pg_it->GetNumPages() * PageSize; + } + + /* Map whatever we can. */ + const size_t cur_size = std::min(pg_size, map_size); + MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), map_address, cur_size / PageSize, pg_phys_addr, true, prev_properties, OperationType_Map, true)); + + /* Advance. */ + map_address += cur_size; + map_size -= cur_size; + + pg_phys_addr += cur_size; + pg_size -= cur_size; + } + + /* Check if we're done. */ + if (last_address <= info.GetLastAddress()) { + /* Validate that we must have re-mapped exactly what we unmapped. */ + MESOSPHERE_ABORT_UNLESS((++pg_it) == pg.end()); + break; + } + + /* Advance. */ + ++it; + } + }; + + /* Try to set the permissions for the source pages back to what they should be. */ + const KPageProperties src_properties = { KMemoryPermission_UserReadWrite, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null, false, src_properties, OperationType_ChangePermissions, false)); + + /* We successfully changed the permissions for the source pages, so we don't need to re-map the dst pages on failure. */ + remap_guard.Cancel(); + + /* Apply the memory block updates. */ + this->memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None); + this->memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None); + } + + /* If the destination state was alias code, invalidate the entire instruction cache. */ + if (dst_state == KMemoryState_AliasCode) { + cpu::InvalidateEntireInstructionCache(); + } + + return ResultSuccess(); + } + KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const { KProcessAddress address = Null; @@ -479,8 +999,8 @@ namespace ams::kern { if (address == Null) { /* NOTE: Nintendo does not account for guard pages here. */ /* This may theoretically cause an offset to be chosen that cannot be mapped. */ - /* TODO: Should we account for guard pages? */ - const size_t offset_pages = KSystemControl::GenerateRandomRange(0, region_num_pages - num_pages); + /* We will account for guard pages. */ + const size_t offset_pages = KSystemControl::GenerateRandomRange(0, region_num_pages - num_pages - guard_pages); address = this->memory_block_manager.FindFreeArea(region_start + offset_pages * PageSize, region_num_pages - offset_pages, num_pages, alignment, offset, guard_pages); } } @@ -525,7 +1045,7 @@ namespace ams::kern { auto mapping_guard = SCOPE_GUARD { MESOSPHERE_ABORT_UNLESS(!reuse_ll); if (cur_address != start_address) { - const KPageProperties unmap_properties = {}; + const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, false }; MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, start_address, (cur_address - start_address) / PageSize, Null, false, unmap_properties, OperationType_Unmap, true)); } }; @@ -688,7 +1208,35 @@ namespace ams::kern { } Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) { - MESOSPHERE_UNIMPLEMENTED(); + const size_t num_pages = size / PageSize; + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Verify we can change the memory permission. */ + KMemoryState old_state; + KMemoryPermission old_perm; + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, addr, size, KMemoryState_FlagCanReprotect, KMemoryState_FlagCanReprotect, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Determine new perm. */ + const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); + R_SUCCEED_IF(old_perm == new_perm); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Perform mapping operation. */ + const KPageProperties properties = { new_perm, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null, false, properties, OperationType_ChangePermissions, false)); + + /* Update the blocks. */ + this->memory_block_manager.Update(&allocator, addr, num_pages, old_state, new_perm, KMemoryAttribute_None); + + return ResultSuccess(); } Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) { @@ -708,8 +1256,9 @@ namespace ams::kern { /* Determine new perm/state. */ const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm); KMemoryState new_state = old_state; - const bool is_w = (new_perm & KMemoryPermission_UserWrite) == KMemoryPermission_UserWrite; - const bool is_x = (new_perm & KMemoryPermission_UserExecute) == KMemoryPermission_UserExecute; + const bool is_w = (new_perm & KMemoryPermission_UserWrite) == KMemoryPermission_UserWrite; + const bool is_x = (new_perm & KMemoryPermission_UserExecute) == KMemoryPermission_UserExecute; + const bool was_x = (old_perm & KMemoryPermission_UserExecute) == KMemoryPermission_UserExecute; MESOSPHERE_ASSERT(!(is_w && is_x)); if (is_w) { @@ -725,6 +1274,9 @@ namespace ams::kern { R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages)); } + /* Succeed if there's nothing to do. */ + R_SUCCEED_IF(old_perm == new_perm && old_state == new_state); + /* Create an update allocator. */ KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); R_TRY(allocator.GetResult()); @@ -734,7 +1286,7 @@ namespace ams::kern { /* Perform mapping operation. */ const KPageProperties properties = { new_perm, false, false, false }; - const auto operation = is_x ? OperationType_ChangePermissionsAndRefresh : OperationType_ChangePermissions; + const auto operation = was_x ? OperationType_ChangePermissionsAndRefresh : OperationType_ChangePermissions; R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null, false, properties, operation, false)); /* Update the blocks. */ @@ -751,8 +1303,158 @@ namespace ams::kern { return ResultSuccess(); } + Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) { + const size_t num_pages = size / PageSize; + MESOSPHERE_ASSERT((mask | KMemoryAttribute_SetMask) == KMemoryAttribute_SetMask); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Verify we can change the memory attribute. */ + KMemoryState old_state; + KMemoryPermission old_perm; + KMemoryAttribute old_attr; + constexpr u32 AttributeTestMask = ~(KMemoryAttribute_SetMask | KMemoryAttribute_DeviceShared); + R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), + addr, size, + KMemoryState_FlagCanChangeAttribute, KMemoryState_FlagCanChangeAttribute, + KMemoryPermission_None, KMemoryPermission_None, + AttributeTestMask, KMemoryAttribute_None, ~AttributeTestMask)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Determine the new attribute. */ + const KMemoryAttribute new_attr = static_cast(((old_attr & ~mask) | (attr & mask))); + + /* Perform operation. */ + const KPageProperties properties = { old_perm, false, (new_attr & KMemoryAttribute_Uncached) != 0, false }; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null, false, properties, OperationType_ChangePermissionsAndRefresh, false)); + + /* Update the blocks. */ + this->memory_block_manager.Update(&allocator, addr, num_pages, old_state, old_perm, new_attr); + + return ResultSuccess(); + } + Result KPageTableBase::SetHeapSize(KProcessAddress *out, size_t size) { - MESOSPHERE_UNIMPLEMENTED(); + /* Lock the physical memory mutex. */ + KScopedLightLock map_phys_mem_lk(this->map_physical_memory_lock); + + /* Try to perform a reduction in heap, instead of an extension. */ + KProcessAddress cur_address; + size_t allocation_size; + { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Validate that setting heap size is possible at all. */ + R_UNLESS(!this->is_kernel, svc::ResultOutOfMemory()); + R_UNLESS(size <= static_cast(this->heap_region_end - this->heap_region_start), svc::ResultOutOfMemory()); + R_UNLESS(size <= this->max_heap_size, svc::ResultOutOfMemory()); + + if (size < static_cast(this->current_heap_end - this->heap_region_start)) { + /* The size being requested is less than the current size, so we need to free the end of the heap. */ + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Validate memory state. */ + R_TRY(this->CheckMemoryState(this->heap_region_start + size, (this->current_heap_end - this->heap_region_start) - size, + KMemoryState_All, KMemoryState_Normal, + KMemoryPermission_All, KMemoryPermission_UserReadWrite, + KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Unmap the end of the heap. */ + const size_t num_pages = ((this->current_heap_end - this->heap_region_start) - size) / PageSize; + const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), this->heap_region_start + size, num_pages, Null, false, unmap_properties, OperationType_Unmap, false)); + + /* Release the memory from the resource limit. */ + GetCurrentProcess().ReleaseResource(ams::svc::LimitableResource_PhysicalMemoryMax, num_pages * PageSize); + + /* Apply the memory block update. */ + this->memory_block_manager.Update(std::addressof(allocator), this->heap_region_start + size, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None); + + /* Update the current heap end. */ + this->current_heap_end = this->heap_region_start + size; + + /* Set the output. */ + *out = this->heap_region_start; + return ResultSuccess(); + } else if (size == static_cast(this->current_heap_end - this->heap_region_start)) { + /* The size requested is exactly the current size. */ + *out = this->heap_region_start; + return ResultSuccess(); + } else { + /* We have to allocate memory. Determine how much to allocate and where while the table is locked. */ + cur_address = this->current_heap_end; + allocation_size = size - (this->current_heap_end - this->heap_region_start); + } + } + + /* Reserve memory for the heap extension. */ + KScopedResourceReservation memory_reservation(GetCurrentProcess().GetResourceLimit(), ams::svc::LimitableResource_PhysicalMemoryMax, allocation_size); + R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Allocate pages for the heap extension. */ + KPageGroup pg(this->block_info_manager); + R_TRY(Kernel::GetMemoryManager().Allocate(std::addressof(pg), allocation_size / PageSize, this->allocate_option)); + + /* Open the pages in the group for the duration of the call, and close them at the end. */ + /* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */ + pg.Open(); + ON_SCOPE_EXIT { pg.Close(); }; + + /* Clear all the newly allocated pages. */ + for (const auto &it : pg) { + std::memset(GetVoidPointer(it.GetAddress()), this->heap_fill_value, it.GetSize()); + } + + /* Map the pages. */ + { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Ensure that the heap hasn't changed since we began executing. */ + MESOSPHERE_ABORT_UNLESS(cur_address == this->current_heap_end); + + /* Check the memory state. */ + R_TRY(this->CheckMemoryState(this->current_heap_end, allocation_size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Map the pages. */ + const size_t num_pages = allocation_size / PageSize; + const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), this->current_heap_end, num_pages, pg, map_properties, OperationType_MapGroup, false)); + + /* We succeeded, so commit our memory reservation. */ + memory_reservation.Commit(); + + /* Apply the memory block update. */ + this->memory_block_manager.Update(std::addressof(allocator), this->current_heap_end, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None); + + /* Update the current heap end. */ + this->current_heap_end = this->heap_region_start + size; + + /* Set the output. */ + *out = this->heap_region_start; + return ResultSuccess(); + } } Result KPageTableBase::SetMaxHeapSize(size_t size) { @@ -790,6 +1492,82 @@ namespace ams::kern { return this->QueryInfoImpl(out_info, out_page_info, addr); } + Result KPageTableBase::QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Align the address down to page size. */ + address = util::AlignDown(GetInteger(address), PageSize); + + /* Verify that we can query the address. */ + KMemoryInfo info; + ams::svc::PageInfo page_info; + R_TRY(this->QueryInfoImpl(std::addressof(info), std::addressof(page_info), address)); + + /* Check the memory state. */ + R_TRY(this->CheckMemoryState(info, KMemoryState_FlagCanQueryPhysical, KMemoryState_FlagCanQueryPhysical, KMemoryPermission_UserReadExecute, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Prepare to traverse. */ + KPhysicalAddress phys_addr; + size_t phys_size; + + KProcessAddress virt_addr = info.GetAddress(); + KProcessAddress end_addr = info.GetEndAddress(); + + /* Perform traversal. */ + { + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr); + R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory()); + + /* Set tracking variables. */ + phys_addr = next_entry.phys_addr; + phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); + + /* Iterate. */ + while (true) { + /* Continue the traversal. */ + traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + if (!traverse_valid) { + break; + } + + if (next_entry.phys_addr != (phys_addr + phys_size)) { + /* Check if we're done. */ + if (virt_addr <= address && address <= virt_addr + phys_size - 1) { + break; + } + + /* Advance. */ + phys_addr = next_entry.phys_addr; + virt_addr += next_entry.block_size; + phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); + } else { + phys_size += next_entry.block_size; + } + + /* Check if we're done. */ + if (end_addr < virt_addr + phys_size) { + break; + } + } + MESOSPHERE_ASSERT(virt_addr <= address && address <= virt_addr + phys_size - 1); + + /* Ensure we use the right size. */ + if (end_addr < virt_addr + phys_size) { + phys_size = end_addr - virt_addr; + } + } + + /* Set the output. */ + out->physical_address = GetInteger(phys_addr); + out->virtual_address = GetInteger(virt_addr); + out->size = phys_size; + return ResultSuccess(); + } + Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); MESOSPHERE_ASSERT(util::IsAligned(size, PageSize)); @@ -869,11 +1647,85 @@ namespace ams::kern { } Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) { - MESOSPHERE_UNIMPLEMENTED(); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); + MESOSPHERE_ASSERT(util::IsAligned(size, PageSize)); + MESOSPHERE_ASSERT(size > 0); + R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress()); + const size_t num_pages = size / PageSize; + const KPhysicalAddress last = phys_addr + size - 1; + + /* Get region extents. */ + const KProcessAddress region_start = this->GetRegionAddress(KMemoryState_Static); + const size_t region_size = this->GetRegionSize(KMemoryState_Static); + const size_t region_num_pages = region_size / PageSize; + + /* Locate the memory region. */ + auto region_it = KMemoryLayout::FindContainingRegion(phys_addr); + const auto end_it = KMemoryLayout::GetEnd(phys_addr); + R_UNLESS(region_it != end_it, svc::ResultInvalidAddress()); + + MESOSPHERE_ASSERT(region_it->Contains(GetInteger(phys_addr))); + R_UNLESS(GetInteger(last) <= region_it->GetLastAddress(), svc::ResultInvalidAddress()); + + /* Check the region attributes. */ + const bool is_rw = perm == KMemoryPermission_UserReadWrite; + R_UNLESS( region_it->IsDerivedFrom(KMemoryRegionType_Dram), svc::ResultInvalidAddress()); + R_UNLESS(!region_it->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), svc::ResultInvalidAddress()); + R_UNLESS(!region_it->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, svc::ResultInvalidAddress()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Select an address to map at. */ + KProcessAddress addr = Null; + const size_t phys_alignment = std::min(std::min(GetInteger(phys_addr) & -GetInteger(phys_addr), size & -size), MaxPhysicalMapAlignment); + for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) { + const size_t alignment = KPageTable::GetBlockSize(static_cast(block_type)); + if (alignment > phys_alignment) { + continue; + } + + addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages()); + if (addr != Null) { + break; + } + } + R_UNLESS(addr != Null, svc::ResultOutOfMemory()); + + /* Check that we can map static here. */ + MESOSPHERE_ASSERT(this->CanContain(addr, size, KMemoryState_Static)); + MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Perform mapping operation. */ + const KPageProperties properties = { perm, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false)); + + /* Update the blocks. */ + this->memory_block_manager.Update(&allocator, addr, num_pages, KMemoryState_Static, perm, KMemoryAttribute_None); + + /* We successfully mapped the pages. */ + return ResultSuccess(); } Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) { - MESOSPHERE_UNIMPLEMENTED(); + /* Get the memory region. */ + auto &tree = KMemoryLayout::GetPhysicalMemoryRegionTree(); + auto it = tree.TryFindFirstDerivedRegion(region_type); + R_UNLESS(it != tree.end(), svc::ResultOutOfRange()); + + /* Map the region. */ + R_TRY_CATCH(this->MapStatic(it->GetAddress(), it->GetSize(), perm)) { + R_CONVERT(svc::ResultInvalidAddress, svc::ResultOutOfRange()) + } R_END_TRY_CATCH; + + return ResultSuccess(); } Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { @@ -909,15 +1761,67 @@ namespace ams::kern { } /* Update the blocks. */ - this->memory_block_manager.Update(&allocator, addr, num_pages, state, perm, KMemoryAttribute_None); + this->memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, KMemoryAttribute_None); /* We successfully mapped the pages. */ *out_addr = addr; return ResultSuccess(); } + Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) { + /* Check that the map is in range. */ + const size_t size = num_pages * PageSize; + R_UNLESS(this->CanContain(address, size, state), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check the memory state. */ + R_TRY(this->CheckMemoryState(address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Map the pages. */ + const KPageProperties properties = { perm, false, false, false }; + R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, properties)); + + /* Update the blocks. */ + this->memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, KMemoryAttribute_None); + + return ResultSuccess(); + } + Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) { - MESOSPHERE_UNIMPLEMENTED(); + /* Check that the unmap is in range. */ + const size_t size = num_pages * PageSize; + R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check the memory state. */ + R_TRY(this->CheckMemoryState(address, size, KMemoryState_All, state, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Perform the unmap. */ + const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null, false, unmap_properties, OperationType_Unmap, false)); + + /* Update the blocks. */ + this->memory_block_manager.Update(&allocator, address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None); + + return ResultSuccess(); } Result KPageTableBase::MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) { @@ -1024,7 +1928,7 @@ namespace ams::kern { Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) { /* Ensure that the page group isn't null. */ - AMS_ASSERT(out != nullptr); + MESOSPHERE_ASSERT(out != nullptr); /* Make sure that the region we're mapping is valid for the table. */ const size_t size = num_pages * PageSize; @@ -1045,4 +1949,2024 @@ namespace ams::kern { return ResultSuccess(); } + Result KPageTableBase::MakeAndOpenPageGroupContiguous(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) { + /* Ensure that the page group isn't null. */ + MESOSPHERE_ASSERT(out != nullptr); + + /* Make sure that the region we're mapping is valid for the table. */ + const size_t size = num_pages * PageSize; + R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check if state allows us to create the group. */ + R_TRY(this->CheckMemoryStateContiguous(address, size, state_mask | KMemoryState_FlagReferenceCounted, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr)); + + /* Create a new page group for the region. */ + R_TRY(this->MakePageGroup(*out, address, num_pages)); + + /* Open a new reference to the pages in the group. */ + out->Open(); + + return ResultSuccess(); + } + + Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) { + /* Check that the region is in range. */ + R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check the memory state. */ + R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_Uncached, KMemoryAttribute_None)); + + /* Get the impl. */ + auto &impl = this->GetImpl(); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address); + R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory()); + + /* Prepare tracking variables. */ + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + /* Iterate. */ + while (tot_size < size) { + /* Continue the traversal. */ + traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory()); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + /* Check that the pages are linearly mapped. */ + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory()); + + /* Invalidate the block. */ + if (cur_size > 0) { + /* NOTE: Nintendo does not check the result of invalidation. */ + cpu::InvalidateDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size); + } + + /* Advance. */ + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + /* Ensure we use the right size for the last block. */ + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + /* Check that the last block is linearly mapped. */ + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory()); + + /* Invalidate the last block. */ + if (cur_size > 0) { + /* NOTE: Nintendo does not check the result of invalidation. */ + cpu::InvalidateDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size); + } + + return ResultSuccess(); + } + + Result KPageTableBase::ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) { + /* Lightly validate the region is in range. */ + R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Require that the memory either be user readable or debuggable. */ + const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None)); + if (!can_read) { + const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + R_UNLESS(can_debug, svc::ResultInvalidCurrentMemory()); + } + + /* Get the impl. */ + auto &impl = this->GetImpl(); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address); + R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory()); + + /* Prepare tracking variables. */ + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + auto PerformCopy = [&] ALWAYS_INLINE_LAMBDA () -> Result { + /* Ensure the address is linear mapped. */ + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory()); + + /* Copy as much aligned data as we can. */ + if (cur_size >= sizeof(u32)) { + const size_t copy_size = util::AlignDown(cur_size, sizeof(u32)); + R_UNLESS(UserspaceAccess::CopyMemoryToUserAligned32Bit(buffer, GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), copy_size), svc::ResultInvalidPointer()); + buffer = reinterpret_cast(reinterpret_cast(buffer) + copy_size); + cur_addr += copy_size; + cur_size -= copy_size; + } + + /* Copy remaining data. */ + if (cur_size > 0) { + R_UNLESS(UserspaceAccess::CopyMemoryToUser(buffer, GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size), svc::ResultInvalidPointer()); + } + + return ResultSuccess(); + }; + + /* Iterate. */ + while (tot_size < size) { + /* Continue the traversal. */ + traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + MESOSPHERE_ASSERT(traverse_valid); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + /* Perform copy. */ + R_TRY(PerformCopy()); + + /* Advance. */ + buffer = reinterpret_cast(reinterpret_cast(buffer) + cur_size); + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + /* Ensure we use the right size for the last block. */ + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + /* Perform copy for the last block. */ + R_TRY(PerformCopy()); + + return ResultSuccess(); + } + + Result KPageTableBase::WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) { + /* Lightly validate the region is in range. */ + R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Require that the memory either be user writable or debuggable. */ + const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None)); + if (!can_read) { + const bool can_debug = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + R_UNLESS(can_debug, svc::ResultInvalidCurrentMemory()); + } + + /* Get the impl. */ + auto &impl = this->GetImpl(); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address); + R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory()); + + /* Prepare tracking variables. */ + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + auto PerformCopy = [&] ALWAYS_INLINE_LAMBDA () -> Result { + /* Ensure the address is linear mapped. */ + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory()); + + /* Copy as much aligned data as we can. */ + if (cur_size >= sizeof(u32)) { + const size_t copy_size = util::AlignDown(cur_size, sizeof(u32)); + R_UNLESS(UserspaceAccess::CopyMemoryFromUserAligned32Bit(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), buffer, copy_size), svc::ResultInvalidCurrentMemory()); + buffer = reinterpret_cast(reinterpret_cast(buffer) + copy_size); + cur_addr += copy_size; + cur_size -= copy_size; + } + + /* Copy remaining data. */ + if (cur_size > 0) { + R_UNLESS(UserspaceAccess::CopyMemoryFromUser(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), buffer, cur_size), svc::ResultInvalidCurrentMemory()); + } + + return ResultSuccess(); + }; + + /* Iterate. */ + while (tot_size < size) { + /* Continue the traversal. */ + traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + MESOSPHERE_ASSERT(traverse_valid); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + /* Perform copy. */ + R_TRY(PerformCopy()); + + /* Advance. */ + buffer = reinterpret_cast(reinterpret_cast(buffer) + cur_size); + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + /* Ensure we use the right size for the last block. */ + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + /* Perform copy for the last block. */ + R_TRY(PerformCopy()); + + return ResultSuccess(); + } + + Result KPageTableBase::LockForDeviceAddressSpace(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) { + /* Lightly validate the range before doing anything else. */ + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check the memory state. */ + const u32 test_state = (is_aligned ? KMemoryState_FlagCanAlignedDeviceMap : KMemoryState_FlagCanDeviceMap); + R_TRY(this->CheckMemoryState(address, size, test_state, test_state, perm, perm, KMemoryAttribute_AnyLocked | KMemoryAttribute_IpcLocked | KMemoryAttribute_Locked, 0, KMemoryAttribute_DeviceShared)); + + /* Make the page group, if we should. */ + if (out != nullptr) { + R_TRY(this->MakePageGroup(*out, address, num_pages)); + } + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* Update the memory blocks. */ + this->memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::ShareToDevice, KMemoryPermission_None); + + /* Open the page group. */ + if (out != nullptr) { + out->Open(); + } + + return ResultSuccess(); + } + + Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) { + /* Lightly validate the range before doing anything else. */ + const size_t num_pages = size / PageSize; + R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check the memory state. */ + R_TRY(this->CheckMemoryStateContiguous(address, size, + KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap, + KMemoryPermission_None, KMemoryPermission_None, + KMemoryAttribute_AnyLocked | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* Update the memory blocks. */ + this->memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UnshareToDevice, KMemoryPermission_None); + + return ResultSuccess(); + } + + Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) { + return this->LockMemoryAndOpen(nullptr, out, address, size, + KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer, + KMemoryPermission_All, KMemoryPermission_UserReadWrite, + KMemoryAttribute_All, KMemoryAttribute_None, + static_cast(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite), + KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked); + } + + Result KPageTableBase::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) { + return this->UnlockMemory(address, size, + KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer, + KMemoryPermission_None, KMemoryPermission_None, + KMemoryAttribute_All, KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked, + KMemoryPermission_UserReadWrite, + KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked, nullptr); + } + + Result KPageTableBase::LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm) { + return this->LockMemoryAndOpen(out, nullptr, address, size, + KMemoryState_FlagCanTransfer, KMemoryState_FlagCanTransfer, + KMemoryPermission_All, KMemoryPermission_UserReadWrite, + KMemoryAttribute_All, KMemoryAttribute_None, + perm, + KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked); + } + + Result KPageTableBase::UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg) { + return this->UnlockMemory(address, size, + KMemoryState_FlagCanTransfer, KMemoryState_FlagCanTransfer, + KMemoryPermission_None, KMemoryPermission_None, + KMemoryAttribute_All, KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked, + KMemoryPermission_UserReadWrite, + KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked, std::addressof(pg)); + } + + Result KPageTableBase::LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size) { + return this->LockMemoryAndOpen(out, nullptr, address, size, + KMemoryState_FlagCanCodeMemory, KMemoryState_FlagCanCodeMemory, + KMemoryPermission_All, KMemoryPermission_UserReadWrite, + KMemoryAttribute_All, KMemoryAttribute_None, + static_cast(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite), + KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked); + } + + Result KPageTableBase::UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg) { + return this->UnlockMemory(address, size, + KMemoryState_FlagCanCodeMemory, KMemoryState_FlagCanCodeMemory, + KMemoryPermission_None, KMemoryPermission_None, + KMemoryAttribute_All, KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked, + KMemoryPermission_UserReadWrite, + KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked, std::addressof(pg)); + } + + Result KPageTableBase::CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { + /* Lightly validate the range before doing anything else. */ + R_UNLESS(this->Contains(src_addr, size), svc::ResultInvalidCurrentMemory()); + + /* Copy the memory. */ + { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check memory state. */ + R_TRY(this->CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr)); + + auto &impl = this->GetImpl(); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr); + MESOSPHERE_ABORT_UNLESS(traverse_valid); + + /* Prepare tracking variables. */ + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + auto PerformCopy = [&] ALWAYS_INLINE_LAMBDA () -> Result { + /* Ensure the address is linear mapped. */ + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory()); + + /* Copy as much aligned data as we can. */ + if (cur_size >= sizeof(u32)) { + const size_t copy_size = util::AlignDown(cur_size, sizeof(u32)); + R_UNLESS(UserspaceAccess::CopyMemoryToUserAligned32Bit(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), copy_size), svc::ResultInvalidCurrentMemory()); + dst_addr += copy_size; + cur_addr += copy_size; + cur_size -= copy_size; + } + + /* Copy remaining data. */ + if (cur_size > 0) { + R_UNLESS(UserspaceAccess::CopyMemoryToUser(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size), svc::ResultInvalidCurrentMemory()); + } + + return ResultSuccess(); + }; + + /* Iterate. */ + while (tot_size < size) { + /* Continue the traversal. */ + traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + MESOSPHERE_ASSERT(traverse_valid); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + /* Perform copy. */ + R_TRY(PerformCopy()); + + /* Advance. */ + dst_addr += cur_size; + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + /* Ensure we use the right size for the last block. */ + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + /* Perform copy for the last block. */ + R_TRY(PerformCopy()); + } + + return ResultSuccess(); + } + + Result KPageTableBase::CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { + /* Lightly validate the range before doing anything else. */ + R_UNLESS(this->Contains(src_addr, size), svc::ResultInvalidCurrentMemory()); + + /* Copy the memory. */ + { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check memory state. */ + R_TRY(this->CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr)); + + auto &impl = this->GetImpl(); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr); + MESOSPHERE_ABORT_UNLESS(traverse_valid); + + /* Prepare tracking variables. */ + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + auto PerformCopy = [&] ALWAYS_INLINE_LAMBDA () -> Result { + /* Ensure the address is linear mapped. */ + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory()); + + /* Copy the data. */ + std::memcpy(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size); + + return ResultSuccess(); + }; + + /* Iterate. */ + while (tot_size < size) { + /* Continue the traversal. */ + traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + MESOSPHERE_ASSERT(traverse_valid); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + /* Perform copy. */ + R_TRY(PerformCopy()); + + /* Advance. */ + dst_addr += cur_size; + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + /* Ensure we use the right size for the last block. */ + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + /* Perform copy for the last block. */ + R_TRY(PerformCopy()); + } + + return ResultSuccess(); + } + + Result KPageTableBase::CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) { + /* Lightly validate the range before doing anything else. */ + R_UNLESS(this->Contains(dst_addr, size), svc::ResultInvalidCurrentMemory()); + + /* Copy the memory. */ + { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check memory state. */ + R_TRY(this->CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr)); + + auto &impl = this->GetImpl(); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr); + MESOSPHERE_ABORT_UNLESS(traverse_valid); + + /* Prepare tracking variables. */ + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + auto PerformCopy = [&] ALWAYS_INLINE_LAMBDA () -> Result { + /* Ensure the address is linear mapped. */ + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory()); + + /* Copy as much aligned data as we can. */ + if (cur_size >= sizeof(u32)) { + const size_t copy_size = util::AlignDown(cur_size, sizeof(u32)); + R_UNLESS(UserspaceAccess::CopyMemoryFromUserAligned32Bit(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), copy_size), svc::ResultInvalidCurrentMemory()); + src_addr += copy_size; + cur_addr += copy_size; + cur_size -= copy_size; + } + + /* Copy remaining data. */ + if (cur_size > 0) { + R_UNLESS(UserspaceAccess::CopyMemoryFromUser(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), cur_size), svc::ResultInvalidCurrentMemory()); + } + + return ResultSuccess(); + }; + + /* Iterate. */ + while (tot_size < size) { + /* Continue the traversal. */ + traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + MESOSPHERE_ASSERT(traverse_valid); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + /* Perform copy. */ + R_TRY(PerformCopy()); + + /* Advance. */ + src_addr += cur_size; + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + /* Ensure we use the right size for the last block. */ + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + /* Perform copy for the last block. */ + R_TRY(PerformCopy()); + } + + return ResultSuccess(); + } + + Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) { + /* Lightly validate the range before doing anything else. */ + R_UNLESS(this->Contains(dst_addr, size), svc::ResultInvalidCurrentMemory()); + + /* Copy the memory. */ + { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check memory state. */ + R_TRY(this->CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr)); + + auto &impl = this->GetImpl(); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr); + MESOSPHERE_ABORT_UNLESS(traverse_valid); + + /* Prepare tracking variables. */ + KPhysicalAddress cur_addr = next_entry.phys_addr; + size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1)); + size_t tot_size = cur_size; + + auto PerformCopy = [&] ALWAYS_INLINE_LAMBDA () -> Result { + /* Ensure the address is linear mapped. */ + R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory()); + + /* Copy the data. */ + std::memcpy(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), cur_size); + + return ResultSuccess(); + }; + + /* Iterate. */ + while (tot_size < size) { + /* Continue the traversal. */ + traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + MESOSPHERE_ASSERT(traverse_valid); + + if (next_entry.phys_addr != (cur_addr + cur_size)) { + /* Perform copy. */ + R_TRY(PerformCopy()); + + /* Advance. */ + src_addr += cur_size; + + cur_addr = next_entry.phys_addr; + cur_size = next_entry.block_size; + } else { + cur_size += next_entry.block_size; + } + + tot_size += next_entry.block_size; + } + + /* Ensure we use the right size for the last block. */ + if (tot_size > size) { + cur_size -= (tot_size - size); + } + + /* Perform copy for the last block. */ + R_TRY(PerformCopy()); + } + + return ResultSuccess(); + } + + Result KPageTableBase::CopyMemoryFromHeapToHeap(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { + /* For convenience, alias this. */ + KPageTableBase &src_page_table = *this; + + /* Lightly validate the ranges before doing anything else. */ + R_UNLESS(src_page_table.Contains(src_addr, size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(dst_page_table.Contains(dst_addr, size), svc::ResultInvalidCurrentMemory()); + + /* Copy the memory. */ + { + /* Get the table locks. */ + KLightLock &lock_0 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? src_page_table.general_lock : dst_page_table.general_lock; + KLightLock &lock_1 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? dst_page_table.general_lock : src_page_table.general_lock; + + /* Lock the first lock. */ + KScopedLightLock lk0(lock_0); + + /* If necessary, lock the second lock. */ + std::optional lk1; + if (std::addressof(lock_0) != std::addressof(lock_1)) { + lk1.emplace(lock_1); + } + + /* Check memory state. */ + R_TRY(src_page_table.CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr)); + R_TRY(dst_page_table.CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr)); + + /* Get implementations. */ + auto &src_impl = src_page_table.GetImpl(); + auto &dst_impl = dst_page_table.GetImpl(); + + /* Prepare for traversal. */ + TraversalContext src_context; + TraversalContext dst_context; + TraversalEntry src_next_entry; + TraversalEntry dst_next_entry; + bool traverse_valid; + + /* Begin traversal. */ + traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry), std::addressof(src_context), src_addr); + MESOSPHERE_ABORT_UNLESS(traverse_valid); + traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry), std::addressof(dst_context), dst_addr); + MESOSPHERE_ABORT_UNLESS(traverse_valid); + + /* Prepare tracking variables. */ + KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr; + KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr; + size_t cur_src_size = src_next_entry.block_size - (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1)); + size_t cur_dst_size = dst_next_entry.block_size - (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1)); + + /* Adjust the initial block sizes. */ + src_next_entry.block_size = cur_src_size; + dst_next_entry.block_size = cur_dst_size; + + /* Before we get any crazier, succeed if there's nothing to do. */ + R_SUCCEED_IF(size == 0); + + /* We're going to manage dual traversal via an offset against the total size. */ + KPhysicalAddress cur_src_addr = cur_src_block_addr; + KPhysicalAddress cur_dst_addr = cur_dst_block_addr; + size_t cur_min_size = std::min(cur_src_size, cur_dst_size); + + /* Iterate. */ + size_t ofs = 0; + while (ofs < size) { + /* Determine how much we can copy this iteration. */ + const size_t cur_copy_size = std::min(cur_min_size, size - ofs); + + /* If we need to advance the traversals, do so. */ + bool updated_src = false, updated_dst = false, skip_copy = false; + if (ofs + cur_copy_size != size) { + if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) { + /* Continue the src traversal. */ + traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry), std::addressof(src_context)); + MESOSPHERE_ASSERT(traverse_valid); + + /* Update source. */ + updated_src = cur_src_addr + cur_min_size != GetInteger(src_next_entry.phys_addr); + } + + if (cur_dst_addr + cur_min_size == dst_next_entry.phys_addr + dst_next_entry.block_size) { + /* Continue the dst traversal. */ + traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry), std::addressof(dst_context)); + MESOSPHERE_ASSERT(traverse_valid); + + /* Update destination. */ + updated_dst = cur_dst_addr + cur_min_size != GetInteger(dst_next_entry.phys_addr); + } + + /* If we didn't update either of source/destination, skip the copy this iteration. */ + if (!updated_src && !updated_dst) { + skip_copy = true; + + /* Update the source block address. */ + cur_src_block_addr = src_next_entry.phys_addr; + } + } + + /* Do the copy, unless we're skipping it. */ + if (!skip_copy) { + /* We need both ends of the copy to be heap blocks. */ + R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), svc::ResultInvalidCurrentMemory()); + R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), svc::ResultInvalidCurrentMemory()); + + /* Copy the data. */ + std::memcpy(GetVoidPointer(GetHeapVirtualAddress(cur_dst_addr)), GetVoidPointer(GetHeapVirtualAddress(cur_src_addr)), cur_copy_size); + + /* Update. */ + cur_src_block_addr = src_next_entry.phys_addr; + cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size; + cur_dst_block_addr = dst_next_entry.phys_addr; + cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size; + + /* Advance offset. */ + ofs += cur_copy_size; + } + + /* Update min size. */ + cur_src_size = src_next_entry.block_size; + cur_dst_size = dst_next_entry.block_size; + cur_min_size = std::min(cur_src_block_addr - cur_src_addr + cur_src_size, cur_dst_block_addr - cur_dst_addr + cur_dst_size); + } + } + + return ResultSuccess(); + } + + Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) { + /* For convenience, alias this. */ + KPageTableBase &src_page_table = *this; + + /* Lightly validate the ranges before doing anything else. */ + R_UNLESS(src_page_table.Contains(src_addr, size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(dst_page_table.Contains(dst_addr, size), svc::ResultInvalidCurrentMemory()); + + /* Copy the memory. */ + { + /* Get the table locks. */ + KLightLock &lock_0 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? src_page_table.general_lock : dst_page_table.general_lock; + KLightLock &lock_1 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? dst_page_table.general_lock : src_page_table.general_lock; + + /* Lock the first lock. */ + KScopedLightLock lk0(lock_0); + + /* If necessary, lock the second lock. */ + std::optional lk1; + if (std::addressof(lock_0) != std::addressof(lock_1)) { + lk1.emplace(lock_1); + } + + /* Check memory state. */ + R_TRY(src_page_table.CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr)); + + /* Get implementations. */ + auto &src_impl = src_page_table.GetImpl(); + auto &dst_impl = dst_page_table.GetImpl(); + + /* Prepare for traversal. */ + TraversalContext src_context; + TraversalContext dst_context; + TraversalEntry src_next_entry; + TraversalEntry dst_next_entry; + bool traverse_valid; + + /* Begin traversal. */ + traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry), std::addressof(src_context), src_addr); + MESOSPHERE_ABORT_UNLESS(traverse_valid); + traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry), std::addressof(dst_context), dst_addr); + MESOSPHERE_ABORT_UNLESS(traverse_valid); + + /* Prepare tracking variables. */ + KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr; + KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr; + size_t cur_src_size = src_next_entry.block_size - (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1)); + size_t cur_dst_size = dst_next_entry.block_size - (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1)); + + /* Adjust the initial block sizes. */ + src_next_entry.block_size = cur_src_size; + dst_next_entry.block_size = cur_dst_size; + + /* Before we get any crazier, succeed if there's nothing to do. */ + R_SUCCEED_IF(size == 0); + + /* We're going to manage dual traversal via an offset against the total size. */ + KPhysicalAddress cur_src_addr = cur_src_block_addr; + KPhysicalAddress cur_dst_addr = cur_dst_block_addr; + size_t cur_min_size = std::min(cur_src_size, cur_dst_size); + + /* Iterate. */ + size_t ofs = 0; + while (ofs < size) { + /* Determine how much we can copy this iteration. */ + const size_t cur_copy_size = std::min(cur_min_size, size - ofs); + + /* If we need to advance the traversals, do so. */ + bool updated_src = false, updated_dst = false, skip_copy = false; + if (ofs + cur_copy_size != size) { + if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) { + /* Continue the src traversal. */ + traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry), std::addressof(src_context)); + MESOSPHERE_ASSERT(traverse_valid); + + /* Update source. */ + updated_src = cur_src_addr + cur_min_size != GetInteger(src_next_entry.phys_addr); + } + + if (cur_dst_addr + cur_min_size == dst_next_entry.phys_addr + dst_next_entry.block_size) { + /* Continue the dst traversal. */ + traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry), std::addressof(dst_context)); + MESOSPHERE_ASSERT(traverse_valid); + + /* Update destination. */ + updated_dst = cur_dst_addr + cur_min_size != GetInteger(dst_next_entry.phys_addr); + } + + /* If we didn't update either of source/destination, skip the copy this iteration. */ + if (!updated_src && !updated_dst) { + skip_copy = true; + + /* Update the source block address. */ + cur_src_block_addr = src_next_entry.phys_addr; + } + } + + /* Do the copy, unless we're skipping it. */ + if (!skip_copy) { + /* We need both ends of the copy to be heap blocks. */ + R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), svc::ResultInvalidCurrentMemory()); + R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), svc::ResultInvalidCurrentMemory()); + + /* Copy the data. */ + std::memcpy(GetVoidPointer(GetHeapVirtualAddress(cur_dst_addr)), GetVoidPointer(GetHeapVirtualAddress(cur_src_addr)), cur_copy_size); + + /* Update. */ + cur_src_block_addr = src_next_entry.phys_addr; + cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size; + cur_dst_block_addr = dst_next_entry.phys_addr; + cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size; + + /* Advance offset. */ + ofs += cur_copy_size; + } + + /* Update min size. */ + cur_src_size = src_next_entry.block_size; + cur_dst_size = dst_next_entry.block_size; + cur_min_size = std::min(cur_src_block_addr - cur_src_addr + cur_src_size, cur_dst_block_addr - cur_dst_addr + cur_dst_size); + } + } + + return ResultSuccess(); + } + + Result KPageTableBase::SetupForIpcClient(PageLinkedList *page_list, KProcessAddress address, size_t size, KMemoryPermission test_perm, KMemoryState dst_state) { + /* Validate pre-conditions. */ + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(test_perm == KMemoryPermission_UserReadWrite || test_perm == KMemoryPermission_UserRead); + + /* Check that the address is in range. */ + R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Get the source permission. */ + const auto src_perm = static_cast((test_perm == KMemoryPermission_UserReadWrite) ? KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped : KMemoryPermission_UserRead); + + /* Get aligned extents. */ + const KProcessAddress aligned_src_start = util::AlignDown(GetInteger(address), PageSize); + const KProcessAddress aligned_src_end = util::AlignUp(GetInteger(address) + size, PageSize); + const KProcessAddress mapping_src_start = util::AlignUp(GetInteger(address), PageSize); + const KProcessAddress mapping_src_end = util::AlignDown(GetInteger(address) + size, PageSize); + + const auto aligned_src_last = GetInteger(aligned_src_end) - 1; + const auto mapping_src_last = GetInteger(mapping_src_end) - 1; + + /* Get the test state and attribute mask. */ + u32 test_state; + u32 test_attr_mask; + switch (dst_state) { + case KMemoryState_Ipc: + test_state = KMemoryState_FlagCanUseIpc; + test_attr_mask = KMemoryAttribute_AnyLocked | KMemoryAttribute_Uncached | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked; + break; + case KMemoryState_NonSecureIpc: + test_state = KMemoryState_FlagCanUseNonSecureIpc; + test_attr_mask = KMemoryAttribute_AnyLocked | KMemoryAttribute_Uncached | KMemoryAttribute_Locked; + break; + case KMemoryState_NonDeviceIpc: + test_state = KMemoryState_FlagCanUseNonDeviceIpc; + test_attr_mask = KMemoryAttribute_AnyLocked | KMemoryAttribute_Uncached | KMemoryAttribute_Locked; + break; + default: + return svc::ResultInvalidCombination(); + } + + /* Ensure that on failure, we roll back appropriately. */ + size_t mapped_size = 0; + auto unmap_guard = SCOPE_GUARD { + if (mapped_size > 0) { + /* Determine where the mapping ends. */ + const auto mapped_end = GetInteger(mapping_src_start) + mapped_size; + const auto mapped_last = mapped_end - 1; + + KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(mapping_src_start); + while (true) { + const KMemoryInfo info = it->GetMemoryInfo(); + + const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start) ? info.GetAddress() : GetInteger(mapping_src_start); + const auto cur_end = mapped_last <= info.GetLastAddress() ? mapped_end : info.GetEndAddress(); + const size_t cur_size = cur_end - cur_start; + + /* Fix the permissions, if we need to. */ + if ((info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != src_perm) { + const KPageProperties properties = { info.GetPermission(), false, false, false }; + MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, cur_start, cur_size / PageSize, Null, false, properties, OperationType_ChangePermissions, true)); + } + + /* If the block is at the end, we're done. */ + if (mapped_last <= info.GetLastAddress()) { + break; + } + + /* Advance. */ + ++it; + MESOSPHERE_ABORT_UNLESS(it != this->memory_block_manager.end()); + } + } + }; + + /* Iterate, mapping as needed. */ + KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(aligned_src_start); + while (true) { + const KMemoryInfo info = it->GetMemoryInfo(); + + /* Validate the current block. */ + R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm, test_attr_mask, KMemoryAttribute_None)); + + if (mapping_src_start < mapping_src_end && GetInteger(mapping_src_start) < info.GetEndAddress() && info.GetAddress() < GetInteger(mapping_src_end)) { + const auto cur_start = info.GetAddress() >= GetInteger(mapping_src_start) ? info.GetAddress() : GetInteger(mapping_src_start); + const auto cur_end = mapping_src_last <= info.GetLastAddress() ? GetInteger(mapping_src_end) : info.GetEndAddress(); + const size_t cur_size = cur_end - cur_start; + + /* Set the permissions on the block, if we need to. */ + if ((info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != src_perm) { + const KPageProperties properties = { src_perm, false, false, false }; + R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, Null, false, properties, OperationType_ChangePermissions, false)); + } + + /* Note that we mapped this part. */ + mapped_size += cur_size; + } + + /* If the block is at the end, we're done. */ + if (aligned_src_last <= info.GetLastAddress()) { + break; + } + + /* Advance. */ + ++it; + MESOSPHERE_ABORT_UNLESS(it != this->memory_block_manager.end()); + } + + /* We succeeded, so no need to unmap. */ + unmap_guard.Cancel(); + + return ResultSuccess(); + } + + Result KPageTableBase::SetupForIpcServer(KProcessAddress *out_addr, size_t size, KProcessAddress src_addr, KMemoryPermission test_perm, KMemoryState dst_state, KPageTableBase &src_page_table, bool send) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(src_page_table.IsLockedByCurrentThread()); + + /* Check that we can theoretically map. */ + const KProcessAddress region_start = this->alias_region_start; + const size_t region_size = this->alias_region_end - this->alias_region_start; + R_UNLESS(size < region_size, svc::ResultOutOfAddressSpace()); + + /* Get aligned source extents. */ + const KProcessAddress src_start = src_addr; + const KProcessAddress src_end = src_addr + size; + const KProcessAddress aligned_src_start = util::AlignDown(GetInteger(src_start), PageSize); + const KProcessAddress aligned_src_end = util::AlignUp(GetInteger(src_start) + size, PageSize); + const KProcessAddress mapping_src_start = util::AlignUp(GetInteger(src_start), PageSize); + const KProcessAddress mapping_src_end = util::AlignDown(GetInteger(src_start) + size, PageSize); + const size_t aligned_src_size = aligned_src_end - aligned_src_start; + const size_t mapping_src_size = (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0; + + /* Select a random address to map at. */ + KProcessAddress dst_addr = Null; + for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) { + const size_t alignment = KPageTable::GetBlockSize(static_cast(block_type)); + const size_t offset = GetInteger(aligned_src_start) & (alignment - 1); + + dst_addr = this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, alignment, offset, this->GetNumGuardPages()); + if (dst_addr != Null) { + break; + } + } + R_UNLESS(dst_addr != Null, svc::ResultOutOfAddressSpace()); + + /* Check that we can perform the operation we're about to perform. */ + MESOSPHERE_ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Reserve space for any partial pages we allocate. */ + const size_t unmapped_size = aligned_src_size - mapping_src_size; + KScopedResourceReservation memory_reservation(GetCurrentProcess().GetResourceLimit(), ams::svc::LimitableResource_PhysicalMemoryMax, unmapped_size); + R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Ensure that we we clean up on failure. */ + KVirtualAddress start_partial_page = Null; + KVirtualAddress end_partial_page = Null; + KProcessAddress cur_mapped_addr = dst_addr; + + auto cleanup_guard = SCOPE_GUARD { + if (start_partial_page != Null) { + Kernel::GetMemoryManager().Open(start_partial_page, 1); + Kernel::GetMemoryManager().Close(start_partial_page, 1); + } + if (end_partial_page != Null) { + Kernel::GetMemoryManager().Open(end_partial_page, 1); + Kernel::GetMemoryManager().Close(end_partial_page, 1); + } + if (cur_mapped_addr != dst_addr) { + const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, false }; + MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), dst_addr, (cur_mapped_addr - dst_addr) / PageSize, Null, false, unmap_properties, OperationType_Unmap, true)); + } + }; + + /* Allocate the start page as needed. */ + if (aligned_src_start < mapping_src_start) { + start_partial_page = Kernel::GetMemoryManager().AllocateContinuous(1, 0, this->allocate_option); + R_UNLESS(start_partial_page != Null, svc::ResultOutOfMemory()); + } + + /* Allocate the end page as needed. */ + if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) { + end_partial_page = Kernel::GetMemoryManager().AllocateContinuous(1, 0, this->allocate_option); + R_UNLESS(end_partial_page != Null, svc::ResultOutOfMemory()); + } + + /* Get the implementation. */ + auto &src_impl = src_page_table.GetImpl(); + + /* Get the page properties for any mapping we'll be doing. */ + const KPageProperties dst_map_properties = { test_perm, false, false, false }; + + /* Get the fill value for partial pages. */ + const auto fill_val = this->ipc_fill_value; + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry next_entry; + bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), aligned_src_start); + MESOSPHERE_ASSERT(traverse_valid); + + /* Prepare tracking variables. */ + KPhysicalAddress cur_block_addr = next_entry.phys_addr; + size_t cur_block_size = next_entry.block_size - (GetInteger(cur_block_addr) & (next_entry.block_size - 1)); + size_t tot_block_size = cur_block_size; + + /* Map the start page, if we have one. */ + if (start_partial_page != Null) { + /* Ensure the page holds correct data. */ + if (send) { + const size_t partial_offset = src_start - aligned_src_start; + size_t copy_size, clear_size; + if (src_end < mapping_src_start) { + copy_size = size; + clear_size = mapping_src_start - src_end; + } else { + copy_size = mapping_src_start - src_start; + clear_size = 0; + } + + std::memset(GetVoidPointer(start_partial_page), fill_val, partial_offset); + std::memcpy(GetVoidPointer(start_partial_page + partial_offset), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr) + partial_offset), copy_size); + if (clear_size > 0) { + std::memset(GetVoidPointer(start_partial_page + partial_offset + copy_size), fill_val, clear_size); + } + } else { + std::memset(GetVoidPointer(start_partial_page), fill_val, PageSize); + } + + /* Map the page. */ + R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, GetHeapPhysicalAddress(start_partial_page), true, dst_map_properties, OperationType_Map, false)); + + /* Update tracking extents. */ + cur_mapped_addr += PageSize; + cur_block_addr += PageSize; + cur_block_size -= PageSize; + + /* If the block's size was one page, we may need to continue traversal. */ + if (cur_block_size == 0 && aligned_src_size > PageSize) { + traverse_valid = src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + MESOSPHERE_ASSERT(traverse_valid); + + cur_block_addr = next_entry.phys_addr; + cur_block_size = next_entry.block_size; + tot_block_size += next_entry.block_size; + } + } + + /* Map the remaining pages. */ + while (aligned_src_start + tot_block_size < mapping_src_end) { + /* Continue the traversal. */ + traverse_valid = src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + MESOSPHERE_ASSERT(traverse_valid); + + /* Process the block. */ + if (next_entry.phys_addr != cur_block_addr + cur_block_size) { + /* Map the block we've been processing so far. */ + R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, cur_block_size / PageSize, cur_block_addr, true, dst_map_properties, OperationType_Map, false)); + + /* Update tracking extents. */ + cur_mapped_addr += cur_block_size; + cur_block_addr = next_entry.phys_addr; + cur_block_size = next_entry.block_size; + } else { + cur_block_size += next_entry.block_size; + } + tot_block_size += next_entry.block_size; + } + + /* Handle the last direct-mapped page. */ + if (const KProcessAddress mapped_block_end = aligned_src_start + tot_block_size - cur_block_size; mapped_block_end < mapping_src_end) { + const size_t last_block_size = mapping_src_end - mapped_block_end; + + /* Map the last block. */ + R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, last_block_size / PageSize, cur_block_addr, true, dst_map_properties, OperationType_Map, false)); + + /* Update tracking extents. */ + cur_mapped_addr += last_block_size; + cur_block_addr += last_block_size; + if (mapped_block_end + cur_block_size < aligned_src_end && cur_block_size == last_block_size) { + traverse_valid = src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + MESOSPHERE_ASSERT(traverse_valid); + + cur_block_addr = next_entry.phys_addr; + } + } + + /* Map the end page, if we have one. */ + if (end_partial_page != Null) { + /* Ensure the page holds correct data. */ + if (send) { + const size_t copy_size = src_end - mapping_src_end; + std::memcpy(GetVoidPointer(end_partial_page), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr)), copy_size); + std::memset(GetVoidPointer(end_partial_page + copy_size), fill_val, PageSize - copy_size); + } else { + std::memset(GetVoidPointer(end_partial_page), fill_val, PageSize); + } + + /* Map the page. */ + R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, GetHeapPhysicalAddress(end_partial_page), true, dst_map_properties, OperationType_Map, false)); + } + + /* Update memory blocks to reflect our changes */ + this->memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, dst_state, test_perm, KMemoryAttribute_None); + + /* Set the output address. */ + *out_addr = dst_addr + (src_start - aligned_src_start); + + /* We succeeded. */ + cleanup_guard.Cancel(); + memory_reservation.Commit(); + return ResultSuccess(); + } + + Result KPageTableBase::SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KPageTableBase &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) { + /* For convenience, alias this. */ + KPageTableBase &dst_page_table = *this; + + /* Get the table locks. */ + KLightLock &lock_0 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? src_page_table.general_lock : dst_page_table.general_lock; + KLightLock &lock_1 = (reinterpret_cast(std::addressof(src_page_table)) <= reinterpret_cast(std::addressof(dst_page_table))) ? dst_page_table.general_lock : src_page_table.general_lock; + + /* Lock the first lock. */ + KScopedLightLock lk0(lock_0); + + /* If necessary, lock the second lock. */ + std::optional lk1; + if (std::addressof(lock_0) != std::addressof(lock_1)) { + lk1.emplace(lock_1); + } + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(src_page_table.memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(std::addressof(src_page_table)); + + /* Perform client setup. */ + R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), src_addr, size, test_perm, dst_state)); + + /* Ensure that we clean up appropriately if we fail after this. */ + auto cleanup_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(src_page_table.CleanupForIpcClientOnServerSetupFailure(updater.GetPageList(), src_addr, size, test_perm)); }; + + /* Perform server setup. */ + R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state, src_page_table, send)); + + /* Get the mapped extents. */ + const KProcessAddress src_map_start = util::AlignUp(GetInteger(src_addr), PageSize); + const KProcessAddress src_map_end = util::AlignDown(GetInteger(src_addr) + size, PageSize); + + /* If anything was mapped, ipc-lock the pages. */ + if (src_map_start < src_map_end) { + /* Get the source permission. */ + const auto src_perm = static_cast((test_perm == KMemoryPermission_UserReadWrite) ? KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped : KMemoryPermission_UserRead); + src_page_table.memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, (src_map_end - src_map_start) / PageSize, &KMemoryBlock::LockForIpc, src_perm); + } + + /* We succeeded, so cancel our cleanup guard. */ + cleanup_guard.Cancel(); + + return ResultSuccess(); + } + + Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state, KProcess *server_process) { + /* Validate the address. */ + R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Validate the memory state. */ + R_TRY(this->CheckMemoryState(address, size, KMemoryState_All, dst_state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Get aligned extents. */ + const KProcessAddress aligned_start = util::AlignDown(GetInteger(address), PageSize); + const KProcessAddress aligned_end = util::AlignUp(GetInteger(address) + size, PageSize); + const size_t aligned_size = aligned_end - aligned_start; + const size_t aligned_num_pages = aligned_size / PageSize; + + /* Unmap the pages. */ + const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), aligned_start, aligned_num_pages, Null, false, unmap_properties, OperationType_Unmap, false)); + + /* Update memory blocks. */ + this->memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None); + + /* Release from the resource limit as relevant. */ + if (auto *resource_limit = server_process->GetResourceLimit(); resource_limit != nullptr) { + const KProcessAddress mapping_start = util::AlignUp(GetInteger(address), PageSize); + const KProcessAddress mapping_end = util::AlignDown(GetInteger(address) + size, PageSize); + const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0; + resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, aligned_size - mapping_size); + } + + return ResultSuccess(); + } + + Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) { + /* Validate the address. */ + R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Get aligned source extents. */ + const KProcessAddress mapping_start = util::AlignUp(GetInteger(address), PageSize); + const KProcessAddress mapping_end = util::AlignDown(GetInteger(address) + size, PageSize); + const KProcessAddress mapping_last = mapping_end - 1; + const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0; + + /* If nothing was mapped, we're actually done immediately. */ + R_SUCCEED_IF(mapping_size == 0); + + /* Get the test state and attribute mask. */ + u32 test_state; + u32 test_attr_mask; + switch (dst_state) { + case KMemoryState_Ipc: + test_state = KMemoryState_FlagCanUseIpc; + test_attr_mask = KMemoryAttribute_AnyLocked | KMemoryAttribute_Uncached | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked; + break; + case KMemoryState_NonSecureIpc: + test_state = KMemoryState_FlagCanUseNonSecureIpc; + test_attr_mask = KMemoryAttribute_AnyLocked | KMemoryAttribute_Uncached | KMemoryAttribute_Locked; + break; + case KMemoryState_NonDeviceIpc: + test_state = KMemoryState_FlagCanUseNonDeviceIpc; + test_attr_mask = KMemoryAttribute_AnyLocked | KMemoryAttribute_Uncached | KMemoryAttribute_Locked; + break; + default: + return svc::ResultInvalidCombination(); + } + + /* Lock the table. */ + /* NOTE: Nintendo does this *after* creating the updater below, but this does not follow convention elsewhere in KPageTableBase. */ + KScopedLightLock lk(this->general_lock); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Ensure that on failure, we roll back appropriately. */ + size_t mapped_size = 0; + auto unmap_guard = SCOPE_GUARD { + if (mapped_size > 0) { + /* Determine where the mapping ends. */ + const auto mapped_end = GetInteger(mapping_start) + mapped_size; + const auto mapped_last = mapped_end - 1; + + KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(mapping_start); + while (true) { + const KMemoryInfo info = it->GetMemoryInfo(); + + const auto cur_start = info.GetAddress() >= GetInteger(mapping_start) ? info.GetAddress() : GetInteger(mapping_start); + const auto cur_end = mapped_last <= info.GetLastAddress() ? mapped_end : info.GetEndAddress(); + const size_t cur_size = cur_end - cur_start; + + /* Fix the permissions, if we need to. */ + if (info.GetIpcLockCount() == 1 && (info.GetPermission() != info.GetOriginalPermission())) { + const KPageProperties properties = { info.GetPermission(), false, false, false }; + MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_start, cur_size / PageSize, Null, false, properties, OperationType_ChangePermissions, true)); + } + + /* If the block is at the end, we're done. */ + if (mapped_last <= info.GetLastAddress()) { + break; + } + + /* Advance. */ + ++it; + MESOSPHERE_ABORT_UNLESS(it != this->memory_block_manager.end()); + } + } + }; + + /* Iterate, reprotecting as needed. */ + KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(mapping_start); + while (true) { + const KMemoryInfo info = it->GetMemoryInfo(); + + /* Validate the current block. */ + R_TRY(this->CheckMemoryState(info, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked)); + + const auto cur_start = info.GetAddress() >= GetInteger(mapping_start) ? info.GetAddress() : GetInteger(mapping_start); + const auto cur_end = mapping_last <= info.GetLastAddress() ? GetInteger(mapping_end) : info.GetEndAddress(); + const size_t cur_size = cur_end - cur_start; + + /* Set the permissions on the block, if we need to. */ + if (info.GetIpcLockCount() == 1 && (info.GetPermission() != info.GetOriginalPermission())) { + const KPageProperties properties = { info.GetOriginalPermission(), false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), cur_start, cur_size / PageSize, Null, false, properties, OperationType_ChangePermissions, false)); + } + + /* Mark that we mapped the block. */ + mapped_size += cur_size; + + /* If the block is at the end, we're done. */ + if (mapping_last <= info.GetLastAddress()) { + break; + } + + /* Advance. */ + ++it; + MESOSPHERE_ABORT_UNLESS(it != this->memory_block_manager.end()); + } + + /* Unlock the pages. */ + this->memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, KMemoryPermission_None); + + /* We succeeded, so no need to unmap. */ + unmap_guard.Cancel(); + + return ResultSuccess(); + } + + Result KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList *page_list, KProcessAddress address, size_t size, KMemoryPermission src_perm) { + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + + /* Get the mapped extents. */ + const KProcessAddress src_map_start = util::AlignUp(GetInteger(address), PageSize); + const KProcessAddress src_map_end = util::AlignDown(GetInteger(address) + size, PageSize); + const KProcessAddress src_map_last = src_map_end - 1; + + /* If nothing was reprotected, there's no cleanup to do. */ + R_SUCCEED_IF(src_map_start >= src_map_end); + + /* Get the permission to check against. */ + const auto prot_perm = (src_perm == KMemoryPermission_UserReadWrite ? KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped : KMemoryPermission_UserRead); + + /* Iterate over blocks, fixing permissions. */ + KMemoryBlockManager::const_iterator it = this->memory_block_manager.FindIterator(address); + while (true) { + const KMemoryInfo info = it->GetMemoryInfo(); + + const auto cur_start = info.GetAddress() >= GetInteger(src_map_start) ? info.GetAddress() : GetInteger(src_map_start); + const auto cur_end = src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress(); + + /* If we can, fix the protections on the block. */ + if (info.GetIpcLockCount() == 0 && (info.GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) { + const KPageProperties properties = { src_perm, false, false, false }; + R_TRY(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, Null, false, properties, OperationType_ChangePermissions, true)); + } + + /* If we're past the end of the region, we're done. */ + if (src_map_last <= info.GetLastAddress()) { + break; + } + + /* Advance. */ + ++it; + MESOSPHERE_ABORT_UNLESS(it != this->memory_block_manager.end()); + } + + return ResultSuccess(); + } + + Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) { + /* Lock the physical memory lock. */ + KScopedLightLock phys_lk(this->map_physical_memory_lock); + + /* Calculate the last address for convenience. */ + const KProcessAddress last_address = address + size - 1; + + /* Define iteration variables. */ + KProcessAddress cur_address; + size_t mapped_size; + + /* The entire mapping process can be retried. */ + while (true) { + /* Check if the memory is already mapped. */ + { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + + /* Iterate over the memory. */ + cur_address = address; + mapped_size = 0; + + auto it = this->memory_block_manager.FindIterator(cur_address); + while (true) { + /* Check that the iterator is valid. */ + MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + + /* Get the memory info. */ + const KMemoryInfo info = it->GetMemoryInfo(); + + /* Check if we're done. */ + if (last_address <= info.GetLastAddress()) { + if (info.GetState() != KMemoryState_Free) { + mapped_size += (last_address + 1 - cur_address); + } + break; + } + + /* Track the memory if it's mapped. */ + if (info.GetState() != KMemoryState_Free) { + mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address; + } + + /* Advance. */ + cur_address = info.GetEndAddress(); + ++it; + } + + /* If the size mapped is the size requested, we've nothing to do. */ + R_SUCCEED_IF(size == mapped_size); + } + + /* Allocate and map the memory. */ + { + /* Reserve the memory from the process resource limit. */ + KScopedResourceReservation memory_reservation(GetCurrentProcess().GetResourceLimit(), ams::svc::LimitableResource_PhysicalMemoryMax, size - mapped_size); + R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Allocate pages for the new memory. */ + KPageGroup pg(this->block_info_manager); + R_TRY(Kernel::GetMemoryManager().AllocateForProcess(std::addressof(pg), (size - mapped_size) / PageSize, this->allocate_option, GetCurrentProcess().GetId(), this->heap_fill_value)); + + /* Open a reference to the pages we allocated, and close our reference when we're done. */ + pg.Open(); + ON_SCOPE_EXIT { pg.Close(); }; + + /* Map the memory. */ + { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Verify that nobody has mapped memory since we first checked. */ + { + /* Iterate over the memory. */ + size_t checked_mapped_size = 0; + cur_address = address; + + auto it = this->memory_block_manager.FindIterator(cur_address); + while (true) { + /* Check that the iterator is valid. */ + MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + + /* Get the memory info. */ + const KMemoryInfo info = it->GetMemoryInfo(); + + /* Check if we're done. */ + if (last_address <= info.GetLastAddress()) { + if (info.GetState() != KMemoryState_Free) { + checked_mapped_size += (last_address + 1 - cur_address); + } + break; + } + + /* Track the memory if it's mapped. */ + if (info.GetState() != KMemoryState_Free) { + checked_mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address; + } + + /* Advance. */ + cur_address = info.GetEndAddress(); + ++it; + } + + /* If the size now isn't what it was before, somebody mapped or unmapped concurrently. */ + /* If this happened, retry. */ + if (mapped_size != checked_mapped_size) { + continue; + } + } + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Reset the current tracking address, and make sure we clean up on failure. */ + cur_address = address; + auto unmap_guard = SCOPE_GUARD { + if (cur_address > address) { + const KProcessAddress last_unmap_address = cur_address - 1; + + /* Iterate, unmapping the pages. */ + cur_address = address; + + auto it = this->memory_block_manager.FindIterator(cur_address); + while (true) { + /* Check that the iterator is valid. */ + MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + + /* Get the memory info. */ + const KMemoryInfo info = it->GetMemoryInfo(); + + /* If the memory state is free, we mapped it and need to unmap it. */ + if (info.GetState() == KMemoryState_Free) { + /* Determine the range to unmap. */ + const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, false }; + const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_unmap_address + 1 - cur_address) / PageSize; + + /* Unmap. */ + MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null, false, unmap_properties, OperationType_Unmap, true)); + } + + /* Check if we're done. */ + if (last_unmap_address <= info.GetLastAddress()) { + break; + } + + /* Advance. */ + cur_address = info.GetEndAddress(); + ++it; + } + } + }; + + /* Iterate over the memory. */ + auto pg_it = pg.begin(); + KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + size_t pg_pages = pg_it->GetNumPages(); + + auto it = this->memory_block_manager.FindIterator(cur_address); + while (true) { + /* Check that the iterator is valid. */ + MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + + /* Get the memory info. */ + const KMemoryInfo info = it->GetMemoryInfo(); + + /* If it's unmapped, we need to map it. */ + if (info.GetState() == KMemoryState_Free) { + /* Determine the range to map. */ + const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, false }; + size_t map_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize; + + /* While we have pages to map, map them. */ + while (map_pages > 0) { + /* Check if we're at the end of the physical block. */ + if (pg_pages == 0) { + /* Ensure there are more pages to map. */ + MESOSPHERE_ASSERT(pg_it != pg.end()); + + /* Advance our physical block. */ + ++pg_it; + pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + pg_pages = pg_it->GetNumPages(); + } + + /* Map whatever we can. */ + const size_t cur_pages = std::min(pg_pages, map_pages); + R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_pages, pg_phys_addr, true, map_properties, OperationType_Map, false)); + + /* Advance. */ + cur_address += cur_pages * PageSize; + map_pages -= cur_pages; + + pg_phys_addr += cur_pages * PageSize; + pg_pages -= cur_pages; + } + } + + /* Check if we're done. */ + if (last_address <= info.GetLastAddress()) { + break; + } + + /* Advance. */ + cur_address = info.GetEndAddress(); + ++it; + } + + /* We succeeded, so commit the memory reservation. */ + memory_reservation.Commit(); + + /* Increase our tracked mapped size. */ + this->mapped_physical_memory_size += (size - mapped_size); + + /* Update the relevant memory blocks. */ + this->memory_block_manager.UpdateIfMatch(std::addressof(allocator), address, size / PageSize, + KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, + KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None); + + /* Cancel our guard. */ + unmap_guard.Cancel(); + + return ResultSuccess(); + } + } + } + } + + Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) { + /* Lock the physical memory lock. */ + KScopedLightLock phys_lk(this->map_physical_memory_lock); + + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Calculate the last address for convenience. */ + const KProcessAddress last_address = address + size - 1; + + /* Define iteration variables. */ + KProcessAddress cur_address; + size_t mapped_size; + + /* Check if the memory is mapped. */ + { + /* Iterate over the memory. */ + cur_address = address; + mapped_size = 0; + + auto it = this->memory_block_manager.FindIterator(cur_address); + while (true) { + /* Check that the iterator is valid. */ + MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + + /* Get the memory info. */ + const KMemoryInfo info = it->GetMemoryInfo(); + + /* Verify the memory's state. */ + const bool is_normal = info.GetState() == KMemoryState_Normal && info.GetAttribute() == 0; + const bool is_free = info.GetState() == KMemoryState_Free; + R_UNLESS(is_normal || is_free, svc::ResultInvalidCurrentMemory()); + + /* Check if we're done. */ + if (last_address <= info.GetLastAddress()) { + if (is_normal) { + mapped_size += (last_address + 1 - cur_address); + } + break; + } + + /* Track the memory if it's mapped. */ + if (is_normal) { + mapped_size += KProcessAddress(info.GetEndAddress()) - cur_address; + } + + /* Advance. */ + cur_address = info.GetEndAddress(); + ++it; + } + + /* If there's nothing mapped, we've nothing to do. */ + R_SUCCEED_IF(mapped_size == 0); + } + + /* Make a page group for the unmap region. */ + KPageGroup pg(this->block_info_manager); + { + auto &impl = this->GetImpl(); + + /* Begin traversal. */ + TraversalContext context; + TraversalEntry cur_entry = {}; + bool cur_valid = false; + TraversalEntry next_entry; + bool next_valid; + size_t tot_size = 0; + + cur_address = address; + next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_address); + next_entry.block_size = (next_entry.block_size - (GetInteger(next_entry.phys_addr) & (next_entry.block_size - 1))); + + /* Iterate, building the group. */ + while (true) { + if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) { + cur_entry.block_size += next_entry.block_size; + } else { + if (cur_valid) { + MESOSPHERE_ABORT_UNLESS(IsHeapPhysicalAddress(cur_entry.phys_addr)); + R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_entry.phys_addr), cur_entry.block_size / PageSize)); + } + + /* Update tracking variables. */ + tot_size += cur_entry.block_size; + cur_entry = next_entry; + cur_valid = next_valid; + } + + if (cur_entry.block_size + tot_size >= size) { + break; + } + + next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)); + } + + /* Add the last block. */ + if (cur_valid) { + MESOSPHERE_ABORT_UNLESS(IsHeapPhysicalAddress(cur_entry.phys_addr)); + R_TRY(pg.AddBlock(GetHeapVirtualAddress(cur_entry.phys_addr), (size - tot_size) / PageSize)); + } + } + MESOSPHERE_ASSERT(pg.GetNumPages() == mapped_size / PageSize); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Open a reference to the pages, we're unmapping, and close the reference when we're done. */ + pg.Open(); + ON_SCOPE_EXIT { pg.Close(); }; + + /* Reset the current tracking address, and make sure we clean up on failure. */ + cur_address = address; + auto remap_guard = SCOPE_GUARD { + if (cur_address > address) { + const KProcessAddress last_map_address = cur_address - 1; + cur_address = address; + + /* Iterate over the memory we unmapped. */ + auto it = this->memory_block_manager.FindIterator(cur_address); + auto pg_it = pg.begin(); + KPhysicalAddress pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + size_t pg_pages = pg_it->GetNumPages(); + + while (true) { + /* Get the memory info for the pages we unmapped, convert to property. */ + const KMemoryInfo info = it->GetMemoryInfo(); + const KPageProperties prev_properties = { info.GetPermission(), false, false, false }; + + /* If the memory is normal, we unmapped it and need to re-map it. */ + if (info.GetState() == KMemoryState_Normal) { + /* Determine the range to map. */ + size_t map_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_map_address + 1 - cur_address) / PageSize; + + /* While we have pages to map, map them. */ + while (map_pages > 0) { + /* Check if we're at the end of the physical block. */ + if (pg_pages == 0) { + /* Ensure there are more pages to map. */ + MESOSPHERE_ABORT_UNLESS(pg_it != pg.end()); + + /* Advance our physical block. */ + ++pg_it; + pg_phys_addr = GetHeapPhysicalAddress(pg_it->GetAddress()); + pg_pages = pg_it->GetNumPages(); + } + + /* Map whatever we can. */ + const size_t cur_pages = std::min(pg_pages, map_pages); + MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, pg_phys_addr, true, prev_properties, OperationType_Map, true)); + + /* Advance. */ + cur_address += cur_pages * PageSize; + map_pages -= cur_pages; + + pg_phys_addr += cur_pages * PageSize; + pg_pages -= cur_pages; + } + } + + /* Check if we're done. */ + if (last_map_address <= info.GetLastAddress()) { + break; + } + + /* Advance. */ + ++it; + } + } + }; + + /* Iterate over the memory, unmapping as we go. */ + auto it = this->memory_block_manager.FindIterator(cur_address); + while (true) { + /* Check that the iterator is valid. */ + MESOSPHERE_ASSERT(it != this->memory_block_manager.end()); + + /* Get the memory info. */ + const KMemoryInfo info = it->GetMemoryInfo(); + + /* If the memory state is normal, we need to unmap it. */ + if (info.GetState() == KMemoryState_Normal) { + /* Determine the range to unmap. */ + const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, false }; + const size_t cur_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize; + + /* Unmap. */ + R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null, false, unmap_properties, OperationType_Unmap, false)); + } + + /* Check if we're done. */ + if (last_address <= info.GetLastAddress()) { + break; + } + + /* Advance. */ + cur_address = info.GetEndAddress(); + ++it; + } + + /* Release the memory resource. */ + this->mapped_physical_memory_size -= mapped_size; + GetCurrentProcess().ReleaseResource(ams::svc::LimitableResource_PhysicalMemoryMax, mapped_size); + + /* Update memory blocks. */ + this->memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None); + + /* We succeeded. */ + remap_guard.Cancel(); + return ResultSuccess(); + } + + Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { + /* Try to reserve the unsafe memory. */ + R_UNLESS(Kernel::GetUnsafeMemory().TryReserve(size), svc::ResultLimitReached()); + + /* Ensure we release our reservation on failure. */ + auto reserve_guard = SCOPE_GUARD { Kernel::GetUnsafeMemory().Release(size); }; + + /* Create a page group for the new memory. */ + KPageGroup pg(this->block_info_manager); + + /* Allocate the new memory. */ + const size_t num_pages = size / PageSize; + R_TRY(Kernel::GetMemoryManager().Allocate(std::addressof(pg), num_pages, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront))); + + /* Open the page group, and close it when we're done with it. */ + pg.Open(); + ON_SCOPE_EXIT { pg.Close(); }; + + /* Clear the new memory. */ + for (const auto &block : pg) { + std::memset(GetVoidPointer(block.GetAddress()), this->heap_fill_value, block.GetSize()); + } + + /* Map the new memory. */ + { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check the memory state. */ + R_TRY(this->CheckMemoryState(address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Map the pages. */ + const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties, OperationType_MapGroup, false)); + + /* Apply the memory block update. */ + this->memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None); + + /* Update our mapped unsafe size. */ + this->mapped_unsafe_physical_memory += size; + + /* We succeeded. */ + reserve_guard.Cancel(); + return ResultSuccess(); + } + } + + Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) { + /* Lock the table. */ + KScopedLightLock lk(this->general_lock); + + /* Check whether we can unmap this much unsafe physical memory. */ + R_UNLESS(size <= this->mapped_unsafe_physical_memory, svc::ResultInvalidCurrentMemory()); + + /* Check the memory state. */ + R_TRY(this->CheckMemoryState(address, size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Create an update allocator. */ + KMemoryBlockManagerUpdateAllocator allocator(this->memory_block_slab_manager); + R_TRY(allocator.GetResult()); + + /* We're going to perform an update, so create a helper. */ + KScopedPageTableUpdater updater(this); + + /* Unmap the memory. */ + const size_t num_pages = size / PageSize; + const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, false }; + R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null, false, unmap_properties, OperationType_Unmap, false)); + + /* Apply the memory block update. */ + this->memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None); + + /* Release the unsafe memory from the limit. */ + Kernel::GetUnsafeMemory().Release(size); + + /* Update our mapped unsafe size. */ + this->mapped_unsafe_physical_memory -= size; + + return ResultSuccess(); + } + } diff --git a/libraries/libmesosphere/source/kern_k_port.cpp b/libraries/libmesosphere/source/kern_k_port.cpp index 9b081ed3c..a343a1887 100644 --- a/libraries/libmesosphere/source/kern_k_port.cpp +++ b/libraries/libmesosphere/source/kern_k_port.cpp @@ -34,11 +34,41 @@ namespace ams::kern { } void KPort::OnClientClosed() { - MESOSPHERE_UNIMPLEMENTED(); + MESOSPHERE_ASSERT_THIS(); + + KScopedSchedulerLock sl; + + if (this->state == State::Normal) { + this->state = State::ClientClosed; + } } void KPort::OnServerClosed() { - MESOSPHERE_UNIMPLEMENTED(); + MESOSPHERE_ASSERT_THIS(); + + KScopedSchedulerLock sl; + + if (this->state == State::Normal) { + this->state = State::ServerClosed; + } + } + + Result KPort::EnqueueSession(KServerSession *session) { + KScopedSchedulerLock sl; + + R_UNLESS(this->state == State::Normal, svc::ResultPortClosed()); + + this->server.EnqueueSession(session); + return ResultSuccess(); + } + + Result KPort::EnqueueSession(KLightServerSession *session) { + KScopedSchedulerLock sl; + + R_UNLESS(this->state == State::Normal, svc::ResultPortClosed()); + + this->server.EnqueueSession(session); + return ResultSuccess(); } } diff --git a/libraries/libmesosphere/source/kern_k_process.cpp b/libraries/libmesosphere/source/kern_k_process.cpp index c3dc4320b..051379f32 100644 --- a/libraries/libmesosphere/source/kern_k_process.cpp +++ b/libraries/libmesosphere/source/kern_k_process.cpp @@ -21,12 +21,128 @@ namespace ams::kern { constexpr u64 InitialProcessIdMin = 1; constexpr u64 InitialProcessIdMax = 0x50; + + constexpr u64 ProcessIdMin = InitialProcessIdMax + 1; + constexpr u64 ProcessIdMax = std::numeric_limits::max(); + std::atomic g_initial_process_id = InitialProcessIdMin; + std::atomic g_process_id = ProcessIdMin; + + void TerminateChildren(KProcess *process, const KThread *thread_to_not_terminate) { + /* Request that all children threads terminate. */ + { + KScopedLightLock proc_lk(process->GetListLock()); + KScopedSchedulerLock sl; + + auto &thread_list = process->GetThreadList(); + for (auto it = thread_list.begin(); it != thread_list.end(); ++it) { + if (KThread *thread = std::addressof(*it); thread != thread_to_not_terminate) { + if (thread->GetState() != KThread::ThreadState_Terminated) { + thread->RequestTerminate(); + } + } + } + } + + /* Wait for all children threads to terminate.*/ + while (true) { + /* Get the next child. */ + KThread *cur_child = nullptr; + { + KScopedLightLock proc_lk(process->GetListLock()); + + auto &thread_list = process->GetThreadList(); + for (auto it = thread_list.begin(); it != thread_list.end(); ++it) { + if (KThread *thread = std::addressof(*it); thread != thread_to_not_terminate) { + if (thread->GetState() != KThread::ThreadState_Terminated) { + if (AMS_LIKELY(thread->Open())) { + cur_child = thread; + break; + } + } + } + } + } + + /* If we didn't find any non-terminated children, we're done. */ + if (cur_child == nullptr) { + break; + } + + /* Terminate and close the thread. */ + cur_child->Terminate(); + cur_child->Close(); + } + } } void KProcess::Finalize() { - MESOSPHERE_UNIMPLEMENTED(); + /* Ensure we're not executing on any core. */ + for (size_t i = 0; i < cpu::NumCores; ++i) { + MESOSPHERE_ASSERT(Kernel::GetCurrentContext(static_cast(i)).current_process.load(std::memory_order_relaxed) != this); + } + + /* Delete the process local region. */ + this->DeleteThreadLocalRegion(this->plr_address); + + /* Get the used memory size. */ + const size_t used_memory_size = this->GetUsedUserPhysicalMemorySize(); + + /* Finalize the page table. */ + this->page_table.Finalize(); + + /* Free the system resource. */ + if (this->system_resource_address != Null) { + /* Check that we have no outstanding allocations. */ + MESOSPHERE_ABORT_UNLESS(this->memory_block_slab_manager.GetUsed() == 0); + MESOSPHERE_ABORT_UNLESS(this->block_info_manager.GetUsed() == 0); + MESOSPHERE_ABORT_UNLESS(this->page_table_manager.GetUsed() == 0); + + /* Free the memory. */ + KSystemControl::FreeSecureMemory(this->system_resource_address, this->system_resource_num_pages * PageSize, this->memory_pool); + + /* Clear our tracking variables. */ + this->system_resource_address = Null; + this->system_resource_num_pages = 0; + + /* Finalize optimized memory. If memory wasn't optimized, this is a no-op. */ + Kernel::GetMemoryManager().FinalizeOptimizedMemory(this->GetId(), this->memory_pool); + } + + /* Release memory to the resource limit. */ + if (this->resource_limit != nullptr) { + MESOSPHERE_ABORT_UNLESS(used_memory_size >= this->memory_release_hint); + this->resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, used_memory_size, used_memory_size - this->memory_release_hint); + this->resource_limit->Close(); + } + + /* Free all shared memory infos. */ + { + auto it = this->shared_memory_list.begin(); + while (it != this->shared_memory_list.end()) { + KSharedMemoryInfo *info = std::addressof(*it); + KSharedMemory *shmem = info->GetSharedMemory(); + + while (!info->Close()) { + shmem->Close(); + } + shmem->Close(); + + it = this->shared_memory_list.erase(it); + KSharedMemoryInfo::Free(info); + } + } + + /* Our thread local page list must be empty at this point. */ + MESOSPHERE_ABORT_UNLESS(this->partially_used_tlp_tree.empty()); + MESOSPHERE_ABORT_UNLESS(this->fully_used_tlp_tree.empty()); + + /* Log that we finalized for debug. */ + MESOSPHERE_LOG("KProcess::Finalize() pid=%ld name=%-12s\n", this->process_id, this->name); + + /* Perform inherited finalization. */ + KAutoObjectWithSlabHeapAndContainer::Finalize(); } Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms) { @@ -106,7 +222,7 @@ namespace ams::kern { Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool) { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(res_limit != nullptr); - MESOSPHERE_ABORT_UNLESS((params.code_num_pages * PageSize) / PageSize == params.code_num_pages); + MESOSPHERE_ABORT_UNLESS((params.code_num_pages * PageSize) / PageSize == static_cast(params.code_num_pages)); /* Set members. */ this->memory_pool = pool; @@ -153,8 +269,279 @@ namespace ams::kern { return ResultSuccess(); } + Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms, svc::KUserPointer user_caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(res_limit != nullptr); + + /* Set pool and resource limit. */ + this->memory_pool = pool; + this->resource_limit = res_limit; + + /* Get the memory sizes. */ + const size_t code_num_pages = params.code_num_pages; + const size_t system_resource_num_pages = params.system_resource_num_pages; + const size_t code_size = code_num_pages * PageSize; + const size_t system_resource_size = system_resource_num_pages * PageSize; + + /* Reserve memory for the system resource. */ + KScopedResourceReservation memory_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax, code_size + KSystemControl::CalculateRequiredSecureMemorySize(system_resource_size, pool)); + R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Setup page table resource objects. */ + KMemoryBlockSlabManager *mem_block_manager; + KBlockInfoManager *block_info_manager; + KPageTableManager *pt_manager; + + this->system_resource_address = Null; + this->system_resource_num_pages = 0; + + if (system_resource_num_pages != 0) { + /* Allocate secure memory. */ + R_TRY(KSystemControl::AllocateSecureMemory(std::addressof(this->system_resource_address), system_resource_size, pool)); + + /* Set the number of system resource pages. */ + MESOSPHERE_ASSERT(this->system_resource_address != Null); + this->system_resource_num_pages = system_resource_num_pages; + + /* Initialize managers. */ + const size_t rc_size = util::AlignUp(KPageTableManager::CalculateReferenceCountSize(system_resource_size), PageSize); + this->dynamic_page_manager.Initialize(this->system_resource_address + rc_size, system_resource_size - rc_size); + this->page_table_manager.Initialize(std::addressof(this->dynamic_page_manager), GetPointer(this->system_resource_address)); + this->memory_block_slab_manager.Initialize(std::addressof(this->dynamic_page_manager)); + this->block_info_manager.Initialize(std::addressof(this->dynamic_page_manager)); + + mem_block_manager = std::addressof(this->memory_block_slab_manager); + block_info_manager = std::addressof(this->block_info_manager); + pt_manager = std::addressof(this->page_table_manager); + } else { + const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication); + mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager()); + block_info_manager = std::addressof(Kernel::GetBlockInfoManager()); + pt_manager = std::addressof(Kernel::GetPageTableManager()); + } + + /* Ensure we don't leak any secure memory we allocated. */ + auto sys_resource_guard = SCOPE_GUARD { + if (this->system_resource_address != Null) { + /* Check that we have no outstanding allocations. */ + MESOSPHERE_ABORT_UNLESS(this->memory_block_slab_manager.GetUsed() == 0); + MESOSPHERE_ABORT_UNLESS(this->block_info_manager.GetUsed() == 0); + MESOSPHERE_ABORT_UNLESS(this->page_table_manager.GetUsed() == 0); + + /* Free the memory. */ + KSystemControl::FreeSecureMemory(this->system_resource_address, system_resource_size, pool); + + /* Clear our tracking variables. */ + this->system_resource_address = Null; + this->system_resource_num_pages = 0; + } + }; + + /* Setup page table. */ + /* NOTE: Nintendo passes process ID despite not having set it yet. */ + /* This goes completely unused, but even so... */ + { + const auto as_type = static_cast(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask); + const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr); + R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, !enable_aslr, pool, params.code_address, code_size, mem_block_manager, block_info_manager, pt_manager)); + } + auto pt_guard = SCOPE_GUARD { this->page_table.Finalize(); }; + + /* Ensure we can insert the code region. */ + R_UNLESS(this->page_table.CanContain(params.code_address, code_size, KMemoryState_Code), svc::ResultInvalidMemoryRegion()); + + /* Map the code region. */ + R_TRY(this->page_table.MapPages(params.code_address, code_num_pages, KMemoryState_Code, static_cast(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped))); + + /* Initialize capabilities. */ + R_TRY(this->capabilities.Initialize(user_caps, num_caps, std::addressof(this->page_table))); + + /* Initialize the process id. */ + this->process_id = g_process_id++; + MESOSPHERE_ABORT_UNLESS(ProcessIdMin <= this->process_id); + MESOSPHERE_ABORT_UNLESS(this->process_id <= ProcessIdMax); + + /* If we should optimize memory allocations, do so. */ + if (this->system_resource_address != Null && (params.flags & ams::svc::CreateProcessFlag_OptimizeMemoryAllocation) != 0) { + R_TRY(Kernel::GetMemoryManager().InitializeOptimizedMemory(this->process_id, pool)); + } + + /* Initialize the rest of the process. */ + R_TRY(this->Initialize(params)); + + /* Open a reference to the resource limit. */ + this->resource_limit->Open(); + + /* We succeeded, so commit our memory reservation and cancel our guards. */ + sys_resource_guard.Cancel(); + pt_guard.Cancel(); + memory_reservation.Commit(); + + return ResultSuccess(); + } + void KProcess::DoWorkerTask() { - MESOSPHERE_UNIMPLEMENTED(); + /* Terminate child threads. */ + TerminateChildren(this, nullptr); + + /* Call the debug callback. */ + KDebug::OnExitProcess(this); + + /* Finish termination. */ + this->FinishTermination(); + } + + void KProcess::StartTermination() { + /* Terminate child threads other than the current one. */ + TerminateChildren(this, GetCurrentThreadPointer()); + + /* Finalize the handle tahble. */ + this->handle_table.Finalize(); + } + + void KProcess::FinishTermination() { + /* Release resource limit hint. */ + if (this->resource_limit != nullptr) { + this->memory_release_hint = this->GetUsedUserPhysicalMemorySize(); + this->resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, 0, this->memory_release_hint); + } + + /* Change state. */ + { + KScopedSchedulerLock sl; + this->ChangeState(State_Terminated); + } + + /* Close. */ + this->Close(); + } + + void KProcess::Exit() { + MESOSPHERE_ASSERT_THIS(); + + /* Determine whether we need to start terminating */ + bool needs_terminate = false; + { + KScopedLightLock lk(this->state_lock); + KScopedSchedulerLock sl; + + MESOSPHERE_ASSERT(this->state != State_Created); + MESOSPHERE_ASSERT(this->state != State_CreatedAttached); + MESOSPHERE_ASSERT(this->state != State_Crashed); + MESOSPHERE_ASSERT(this->state != State_Terminated); + if (this->state == State_Running || this->state == State_RunningAttached || this->state == State_DebugBreak) { + this->ChangeState(State_Terminating); + needs_terminate = true; + } + } + + /* If we need to start termination, do so. */ + if (needs_terminate) { + this->StartTermination(); + + /* Note for debug that we're exiting the process. */ + MESOSPHERE_LOG("KProcess::Exit() pid=%ld name=%-12s\n", this->process_id, this->name); + + /* Register the process as a work task. */ + KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this); + } + + /* Exit the current thread. */ + GetCurrentThread().Exit(); + MESOSPHERE_PANIC("Thread survived call to exit"); + } + + Result KProcess::Terminate() { + MESOSPHERE_ASSERT_THIS(); + + /* Determine whether we need to start terminating */ + bool needs_terminate = false; + { + KScopedLightLock lk(this->state_lock); + + /* Check whether we're allowed to terminate. */ + R_UNLESS(this->state != State_Created, svc::ResultInvalidState()); + R_UNLESS(this->state != State_CreatedAttached, svc::ResultInvalidState()); + + KScopedSchedulerLock sl; + + if (this->state == State_Running || this->state == State_RunningAttached || this->state == State_Crashed || this->state == State_DebugBreak) { + this->ChangeState(State_Terminating); + needs_terminate = true; + } + } + + /* If we need to terminate, do so. */ + if (needs_terminate) { + /* Start termination. */ + this->StartTermination(); + + /* Note for debug that we're terminating the process. */ + MESOSPHERE_LOG("KProcess::Terminate() pid=%ld name=%-12s\n", this->process_id, this->name); + + /* Call the debug callback. */ + KDebug::OnTerminateProcess(this); + + /* Finish termination. */ + this->FinishTermination(); + } + + return ResultSuccess(); + } + + Result KProcess::AddSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) { + /* Lock ourselves, to prevent concurrent access. */ + KScopedLightLock lk(this->state_lock); + + /* Try to find an existing info for the memory. */ + KSharedMemoryInfo *info = nullptr; + for (auto it = this->shared_memory_list.begin(); it != this->shared_memory_list.end(); ++it) { + if (it->GetSharedMemory() == shmem) { + info = std::addressof(*it); + break; + } + } + + /* If we didn't find an info, create one. */ + if (info == nullptr) { + /* Allocate a new info. */ + info = KSharedMemoryInfo::Allocate(); + R_UNLESS(info != nullptr, svc::ResultOutOfResource()); + + /* Initialize the info and add it to our list. */ + info->Initialize(shmem); + this->shared_memory_list.push_back(*info); + } + + /* Open a reference to the shared memory and its info. */ + shmem->Open(); + info->Open(); + + return ResultSuccess(); + } + + void KProcess::RemoveSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) { + /* Lock ourselves, to prevent concurrent access. */ + KScopedLightLock lk(this->state_lock); + + /* Find an existing info for the memory. */ + KSharedMemoryInfo *info = nullptr; + auto it = this->shared_memory_list.begin(); + for (/* ... */; it != this->shared_memory_list.end(); ++it) { + if (it->GetSharedMemory() == shmem) { + info = std::addressof(*it); + break; + } + } + MESOSPHERE_ABORT_UNLESS(info != nullptr); + + /* Close a reference to the info and its memory. */ + if (info->Close()) { + this->shared_memory_list.erase(it); + KSharedMemoryInfo::Free(info); + } + + shmem->Close(); } Result KProcess::CreateThreadLocalRegion(KProcessAddress *out) { @@ -208,6 +595,54 @@ namespace ams::kern { return ResultSuccess(); } + Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) { + KThreadLocalPage *page_to_free = nullptr; + + /* Release the region. */ + { + KScopedSchedulerLock sl; + + /* Try to find the page in the partially used list. */ + auto it = this->partially_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); + if (it == this->partially_used_tlp_tree.end()) { + /* If we don't find it, it has to be in the fully used list. */ + it = this->fully_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); + R_UNLESS(it != this->fully_used_tlp_tree.end(), svc::ResultInvalidAddress()); + + /* Release the region. */ + it->Release(addr); + + /* Move the page out of the fully used list. */ + KThreadLocalPage *tlp = std::addressof(*it); + this->fully_used_tlp_tree.erase(it); + if (tlp->IsAllFree()) { + page_to_free = tlp; + } else { + this->partially_used_tlp_tree.insert(*tlp); + } + } else { + /* Release the region. */ + it->Release(addr); + + /* Handle the all-free case. */ + KThreadLocalPage *tlp = std::addressof(*it); + if (tlp->IsAllFree()) { + this->partially_used_tlp_tree.erase(it); + page_to_free = tlp; + } + } + } + + /* If we should free the page it was in, do so. */ + if (page_to_free != nullptr) { + page_to_free->Finalize(); + + KThreadLocalPage::Free(page_to_free); + } + + return ResultSuccess(); + } + void *KProcess::GetThreadLocalRegionPointer(KProcessAddress addr) { KThreadLocalPage *tlp = nullptr; { @@ -264,7 +699,76 @@ namespace ams::kern { MESOSPHERE_ASSERT(this->num_threads > 0); if (const auto count = --this->num_threads; count == 0) { - MESOSPHERE_TODO("this->Terminate();"); + this->Terminate(); + } + } + + bool KProcess::EnterUserException() { + /* Get the current thread. */ + KThread *cur_thread = GetCurrentThreadPointer(); + MESOSPHERE_ASSERT(this == cur_thread->GetOwnerProcess()); + + /* Try to claim the exception thread. */ + if (this->exception_thread != cur_thread) { + const uintptr_t address_key = reinterpret_cast(std::addressof(this->exception_thread)); + while (true) { + { + KScopedSchedulerLock sl; + + /* If the thread is terminating, it can't enter. */ + if (cur_thread->IsTerminationRequested()) { + return false; + } + + /* If we have no exception thread, we succeeded. */ + if (this->exception_thread == nullptr) { + this->exception_thread = cur_thread; + return true; + } + + /* Otherwise, wait for us to not have an exception thread. */ + cur_thread->SetAddressKey(address_key); + this->exception_thread->AddWaiter(cur_thread); + if (cur_thread->GetState() == KThread::ThreadState_Runnable) { + cur_thread->SetState(KThread::ThreadState_Waiting); + } + } + /* Remove the thread as a waiter from the lock owner. */ + { + KScopedSchedulerLock sl; + KThread *owner_thread = cur_thread->GetLockOwner(); + if (owner_thread != nullptr) { + owner_thread->RemoveWaiter(cur_thread); + } + } + } + } else { + return false; + } + } + + bool KProcess::LeaveUserException() { + return this->ReleaseUserException(GetCurrentThreadPointer()); + } + + bool KProcess::ReleaseUserException(KThread *thread) { + KScopedSchedulerLock sl; + + if (this->exception_thread == thread) { + this->exception_thread = nullptr; + + /* Remove waiter thread. */ + s32 num_waiters; + KThread *next = thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast(std::addressof(this->exception_thread))); + if (next != nullptr) { + if (next->GetState() == KThread::ThreadState_Waiting) { + next->SetState(KThread::ThreadState_Runnable); + } + } + + return true; + } else { + return false; } } @@ -280,11 +784,53 @@ namespace ams::kern { this->thread_list.erase(this->thread_list.iterator_to(*thread)); } + size_t KProcess::GetUsedUserPhysicalMemorySize() const { + const size_t norm_size = this->page_table.GetNormalMemorySize(); + const size_t other_size = this->code_size + this->main_thread_stack_size; + const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(this->system_resource_num_pages * PageSize, this->memory_pool); + + return norm_size + other_size + sec_size; + } + + size_t KProcess::GetTotalUserPhysicalMemorySize() const { + /* Get the amount of free and used size. */ + const size_t free_size = this->resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax); + const size_t used_size = this->GetUsedNonSystemUserPhysicalMemorySize(); + const size_t max_size = this->max_process_memory; + + if (used_size + free_size > max_size) { + return max_size; + } else { + return free_size + used_size; + } + } + + size_t KProcess::GetUsedNonSystemUserPhysicalMemorySize() const { + const size_t norm_size = this->page_table.GetNormalMemorySize(); + const size_t other_size = this->code_size + this->main_thread_stack_size; + + return norm_size + other_size; + } + + size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const { + /* Get the amount of free and used size. */ + const size_t free_size = this->resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax); + const size_t used_size = this->GetUsedUserPhysicalMemorySize(); + const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(this->system_resource_num_pages * PageSize, this->memory_pool); + const size_t max_size = this->max_process_memory; + + if (used_size + free_size > max_size) { + return max_size - sec_size; + } else { + return free_size + used_size - sec_size; + } + } + Result KProcess::Run(s32 priority, size_t stack_size) { MESOSPHERE_ASSERT_THIS(); /* Lock ourselves, to prevent concurrent access. */ - KScopedLightLock lk(this->lock); + KScopedLightLock lk(this->state_lock); /* Validate that we're in a state where we can initialize. */ const auto state = this->state; @@ -303,7 +849,7 @@ namespace ams::kern { R_UNLESS(stack_size + this->code_size >= this->code_size, svc::ResultOutOfMemory()); /* Place a tentative reservation of memory for our new stack. */ - KScopedResourceReservation mem_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax); + KScopedResourceReservation mem_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax, stack_size); R_UNLESS(mem_reservation.Succeeded(), svc::ResultLimitReached()); /* Allocate and map our stack. */ @@ -370,8 +916,287 @@ namespace ams::kern { return ResultSuccess(); } - void KProcess::SetPreemptionState() { - MESOSPHERE_UNIMPLEMENTED(); + Result KProcess::Reset() { + MESOSPHERE_ASSERT_THIS(); + + /* Lock the process and the scheduler. */ + KScopedLightLock lk(this->state_lock); + KScopedSchedulerLock sl; + + /* Validate that we're in a state that we can reset. */ + R_UNLESS(this->state != State_Terminated, svc::ResultInvalidState()); + R_UNLESS(this->is_signaled, svc::ResultInvalidState()); + + /* Clear signaled. */ + this->is_signaled = false; + return ResultSuccess(); + } + + Result KProcess::SetActivity(ams::svc::ProcessActivity activity) { + /* Lock ourselves and the scheduler. */ + KScopedLightLock lk(this->state_lock); + KScopedLightLock list_lk(this->list_lock); + KScopedSchedulerLock sl; + + /* Validate our state. */ + R_UNLESS(this->state != State_Terminating, svc::ResultInvalidState()); + R_UNLESS(this->state != State_Terminated, svc::ResultInvalidState()); + + /* Either pause or resume. */ + if (activity == ams::svc::ProcessActivity_Paused) { + /* Verify that we're not suspended. */ + R_UNLESS(!this->is_suspended, svc::ResultInvalidState()); + + /* Suspend all threads. */ + auto end = this->GetThreadList().end(); + for (auto it = this->GetThreadList().begin(); it != end; ++it) { + it->RequestSuspend(KThread::SuspendType_Process); + } + + /* Set ourselves as suspended. */ + this->SetSuspended(true); + } else { + MESOSPHERE_ASSERT(activity == ams::svc::ProcessActivity_Runnable); + + /* Verify that we're suspended. */ + R_UNLESS(this->is_suspended, svc::ResultInvalidState()); + + /* Resume all threads. */ + auto end = this->GetThreadList().end(); + for (auto it = this->GetThreadList().begin(); it != end; ++it) { + it->Resume(KThread::SuspendType_Process); + } + + /* Set ourselves as resumed. */ + this->SetSuspended(false); + } + + return ResultSuccess(); + } + + Result KProcess::GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer out_thread_ids, s32 max_out_count) { + /* Lock the list. */ + KScopedLightLock lk(this->list_lock); + + /* Iterate over the list. */ + s32 count = 0; + auto end = this->GetThreadList().end(); + for (auto it = this->GetThreadList().begin(); it != end; ++it) { + /* If we're within array bounds, write the id. */ + if (count < max_out_count) { + /* Get the thread id. */ + KThread *thread = std::addressof(*it); + const u64 id = thread->GetId(); + + /* Copy the id to userland. */ + R_TRY(out_thread_ids.CopyArrayElementFrom(std::addressof(id), count)); + } + + /* Increment the count. */ + ++count; + } + + /* We successfully iterated the list. */ + *out_num_threads = count; + return ResultSuccess(); + } + + KProcess::State KProcess::SetDebugObject(void *debug_object) { + /* Attaching should only happen to non-null objects while the scheduler is locked. */ + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + MESOSPHERE_ASSERT(debug_object != nullptr); + + /* Cache our state to return it to the debug object. */ + const auto old_state = this->state; + + /* Set the object. */ + this->attached_object = debug_object; + + /* Check that our state is valid for attach. */ + MESOSPHERE_ASSERT(this->state == State_Created || this->state == State_Running || this->state == State_Crashed); + + /* Update our state. */ + if (this->state != State_DebugBreak) { + if (this->state == State_Created) { + this->ChangeState(State_CreatedAttached); + } else { + this->ChangeState(State_DebugBreak); + } + } + + return old_state; + } + + void KProcess::ClearDebugObject(KProcess::State old_state) { + /* Detaching from process should only happen while the scheduler is locked. */ + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Clear the attached object. */ + this->attached_object = nullptr; + + /* Validate that the process is in an attached state. */ + MESOSPHERE_ASSERT(this->state == State_CreatedAttached || this->state == State_RunningAttached || this->state == State_DebugBreak || this->state == State_Terminating || this->state == State_Terminated); + + /* Change the state appropriately. */ + if (this->state == State_CreatedAttached) { + this->ChangeState(State_Created); + } else if (this->state == State_RunningAttached || this->state == State_DebugBreak) { + /* Disallow transition back to created from running. */ + if (old_state == State_Created) { + old_state = State_Running; + } + this->ChangeState(old_state); + } + } + + bool KProcess::EnterJitDebug(ams::svc::DebugEvent event, ams::svc::DebugException exception, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4) { + /* Check that we're the current process. */ + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this == GetCurrentProcessPointer()); + + /* If we aren't allowed to enter jit debug, don't. */ + if ((this->flags & ams::svc::CreateProcessFlag_EnableDebug) == 0) { + return false; + } + + /* We're the current process, so we should be some kind of running. */ + MESOSPHERE_ASSERT(this->state != State_Created); + MESOSPHERE_ASSERT(this->state != State_CreatedAttached); + MESOSPHERE_ASSERT(this->state != State_Terminated); + + /* Try to enter JIT debug. */ + while (true) { + /* Lock ourselves and the scheduler. */ + KScopedLightLock lk(this->state_lock); + KScopedLightLock list_lk(this->list_lock); + KScopedSchedulerLock sl; + + /* If we're attached to a debugger, we're necessarily in debug. */ + if (this->IsAttachedToDebugger()) { + return true; + } + + /* If the current thread is terminating, we can't enter debug. */ + if (GetCurrentThread().IsTerminationRequested()) { + return false; + } + + /* We're not attached to debugger, so check that. */ + MESOSPHERE_ASSERT(this->state != State_RunningAttached); + MESOSPHERE_ASSERT(this->state != State_DebugBreak); + + /* If we're terminating, we can't enter debug. */ + if (this->state != State_Running && this->state != State_Crashed) { + MESOSPHERE_ASSERT(this->state == State_Terminating); + return false; + } + + /* If the current thread is suspended, retry. */ + if (GetCurrentThread().IsSuspended()) { + continue; + } + + /* Suspend all our threads. */ + { + auto end = this->GetThreadList().end(); + for (auto it = this->GetThreadList().begin(); it != end; ++it) { + it->RequestSuspend(KThread::SuspendType_Debug); + } + } + + /* Change our state to crashed. */ + this->ChangeState(State_Crashed); + + /* Enter jit debug. */ + this->is_jit_debug = true; + this->jit_debug_event_type = event; + this->jit_debug_exception_type = exception; + this->jit_debug_params[0] = param1; + this->jit_debug_params[1] = param2; + this->jit_debug_params[2] = param3; + this->jit_debug_params[3] = param4; + this->jit_debug_thread_id = GetCurrentThread().GetId(); + + /* Exit our retry loop. */ + break; + } + + /* Check if our state indicates we're in jit debug. */ + { + KScopedSchedulerLock sl; + + if (this->state == State_Running || this->state == State_RunningAttached || this->state == State_Crashed || this->state == State_DebugBreak) { + return true; + } + } + + return false; + } + + KEventInfo *KProcess::GetJitDebugInfo() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + if (this->is_jit_debug) { + return KDebugBase::CreateDebugEvent(this->jit_debug_event_type, this->jit_debug_exception_type, this->jit_debug_params[0], this->jit_debug_params[1], this->jit_debug_params[2], this->jit_debug_params[3], this->jit_debug_thread_id); + } else { + return nullptr; + } + } + + void KProcess::ClearJitDebugInfo() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + this->is_jit_debug = false; + } + + KProcess *KProcess::GetProcessFromId(u64 process_id) { + /* Lock the list. */ + KProcess::ListAccessor accessor; + const auto end = accessor.end(); + + /* Iterate over the list. */ + for (auto it = accessor.begin(); it != end; ++it) { + /* Get the process. */ + KProcess *process = static_cast(std::addressof(*it)); + + if (process->GetId() == process_id) { + if (AMS_LIKELY(process->Open())) { + return process; + } + } + } + + /* We failed to find the process. */ + return nullptr; + } + + Result KProcess::GetProcessList(s32 *out_num_processes, ams::kern::svc::KUserPointer out_process_ids, s32 max_out_count) { + /* Lock the list. */ + KProcess::ListAccessor accessor; + const auto end = accessor.end(); + + /* Iterate over the list. */ + s32 count = 0; + for (auto it = accessor.begin(); it != end; ++it) { + /* If we're within array bounds, write the id. */ + if (count < max_out_count) { + /* Get the process id. */ + KProcess *process = static_cast(std::addressof(*it)); + const u64 id = process->GetId(); + + /* Copy the id to userland. */ + R_TRY(out_process_ids.CopyArrayElementFrom(std::addressof(id), count)); + } + + /* Increment the count. */ + ++count; + } + + /* We successfully iterated the list. */ + *out_num_processes = count; + return ResultSuccess(); } } diff --git a/libraries/libmesosphere/source/kern_k_scheduler.cpp b/libraries/libmesosphere/source/kern_k_scheduler.cpp index 97dcb1165..ee1869557 100644 --- a/libraries/libmesosphere/source/kern_k_scheduler.cpp +++ b/libraries/libmesosphere/source/kern_k_scheduler.cpp @@ -90,7 +90,9 @@ namespace ams::kern { } if (this->state.should_count_idle) { if (AMS_LIKELY(highest_thread != nullptr)) { - /* TODO: Set parent process's idle count if it exists. */ + if (KProcess *process = highest_thread->GetOwnerProcess(); process != nullptr) { + process->SetRunningThread(this->core_id, highest_thread, this->state.idle_count); + } } else { this->state.idle_count++; } @@ -118,13 +120,13 @@ namespace ams::kern { for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) { KThread *top_thread = priority_queue.GetScheduledFront(core_id); if (top_thread != nullptr) { - /* If the thread has no waiters, we need to check if the process has a thread pinned by PreemptionState. */ + /* If the thread has no waiters, we need to check if the process has a thread pinned. */ if (top_thread->GetNumKernelWaiters() == 0) { if (KProcess *parent = top_thread->GetOwnerProcess(); parent != nullptr) { - if (KThread *suggested = parent->GetPreemptionStatePinnedThread(core_id); suggested != nullptr && suggested != top_thread) { - /* We prefer our parent's pinned thread possible. However, we also don't want to schedule un-runnable threads. */ - if (suggested->GetRawState() == KThread::ThreadState_Runnable) { - top_thread = suggested; + if (KThread *pinned = parent->GetPinnedThread(core_id); pinned != nullptr && pinned != top_thread) { + /* We prefer our parent's pinned thread if possible. However, we also don't want to schedule un-runnable threads. */ + if (pinned->GetRawState() == KThread::ThreadState_Runnable) { + top_thread = pinned; } else { top_thread = nullptr; } @@ -199,7 +201,7 @@ namespace ams::kern { return cores_needing_scheduling; } - void KScheduler::SetInterruptTaskThreadRunnable() { + void KScheduler::InterruptTaskThreadToRunnable() { MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1); KThread *task_thread = Kernel::GetInterruptTaskManager().GetThread(); @@ -232,7 +234,7 @@ namespace ams::kern { const s64 prev_tick = this->last_context_switch_time; const s64 cur_tick = KHardwareTimer::GetTick(); const s64 tick_diff = cur_tick - prev_tick; - cur_thread->AddCpuTime(tick_diff); + cur_thread->AddCpuTime(this->core_id, tick_diff); if (cur_process != nullptr) { cur_process->AddCpuTime(tick_diff); } @@ -252,6 +254,7 @@ namespace ams::kern { /* Switch the current process, if we're switching processes. */ if (KProcess *next_process = next_thread->GetOwnerProcess(); next_process != cur_process) { + /* MESOSPHERE_LOG("!!! PROCESS SWITCH !!! %s -> %s\n", cur_process != nullptr ? cur_process->GetName() : nullptr, next_process != nullptr ? next_process->GetName() : nullptr); */ KProcess::Switch(cur_process, next_process); } @@ -260,6 +263,47 @@ namespace ams::kern { /* Set the new Thread Local region. */ cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress())); + SetCurrentThreadLocalRegion(next_thread->GetThreadLocalRegionHeapAddress()); + } + + void KScheduler::ClearPreviousThread(KThread *thread) { + MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); + for (size_t i = 0; i < cpu::NumCores; ++i) { + std::atomic *prev_thread_ptr = reinterpret_cast *>(std::addressof(Kernel::GetScheduler(static_cast(i)).prev_thread)); + static_assert(sizeof(*prev_thread_ptr) == sizeof(KThread *)); + + prev_thread_ptr->compare_exchange_weak(thread, nullptr); + } + } + + void KScheduler::PinCurrentThread(KProcess *cur_process) { + MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); + + /* Get the current thread. */ + const s32 core_id = GetCurrentCoreId(); + KThread *cur_thread = GetCurrentThreadPointer(); + + /* Pin it. */ + cur_process->PinThread(core_id, cur_thread); + cur_thread->Pin(); + + /* An update is needed. */ + SetSchedulerUpdateNeeded(); + } + + void KScheduler::UnpinCurrentThread(KProcess *cur_process) { + MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); + + /* Get the current thread. */ + const s32 core_id = GetCurrentCoreId(); + KThread *cur_thread = GetCurrentThreadPointer(); + + /* Unpin it. */ + cur_thread->Unpin(); + cur_process->UnpinThread(core_id, cur_thread); + + /* An update is needed. */ + SetSchedulerUpdateNeeded(); } void KScheduler::OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state) { @@ -391,5 +435,187 @@ namespace ams::kern { SetSchedulerUpdateNeeded(); } + void KScheduler::YieldWithoutCoreMigration() { + /* Validate preconditions. */ + MESOSPHERE_ASSERT(CanSchedule()); + MESOSPHERE_ASSERT(GetCurrentProcessPointer() != nullptr); + + /* Get the current thread and process. */ + KThread &cur_thread = GetCurrentThread(); + KProcess &cur_process = GetCurrentProcess(); + + /* If the thread's yield count matches, there's nothing for us to do. */ + if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { + return; + } + + /* Get a reference to the priority queue. */ + auto &priority_queue = GetPriorityQueue(); + + /* Perform the yield. */ + { + KScopedSchedulerLock sl; + + const auto cur_state = cur_thread.GetRawState(); + if (cur_state == KThread::ThreadState_Runnable) { + /* Put the current thread at the back of the queue. */ + KThread *next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); + IncrementScheduledCount(std::addressof(cur_thread)); + + /* If the next thread is different, we have an update to perform. */ + if (next_thread != std::addressof(cur_thread)) { + SetSchedulerUpdateNeeded(); + } else { + /* Otherwise, set the thread's yield count so that we won't waste work until the process is scheduled again. */ + cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); + } + } + } + } + + void KScheduler::YieldWithCoreMigration() { + /* Validate preconditions. */ + MESOSPHERE_ASSERT(CanSchedule()); + MESOSPHERE_ASSERT(GetCurrentProcessPointer() != nullptr); + + /* Get the current thread and process. */ + KThread &cur_thread = GetCurrentThread(); + KProcess &cur_process = GetCurrentProcess(); + + /* If the thread's yield count matches, there's nothing for us to do. */ + if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { + return; + } + + /* Get a reference to the priority queue. */ + auto &priority_queue = GetPriorityQueue(); + + /* Perform the yield. */ + { + KScopedSchedulerLock sl; + + const auto cur_state = cur_thread.GetRawState(); + if (cur_state == KThread::ThreadState_Runnable) { + /* Get the current active core. */ + const s32 core_id = cur_thread.GetActiveCore(); + + /* Put the current thread at the back of the queue. */ + KThread *next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread)); + IncrementScheduledCount(std::addressof(cur_thread)); + + /* While we have a suggested thread, try to migrate it! */ + bool recheck = false; + KThread *suggested = priority_queue.GetSuggestedFront(core_id); + while (suggested != nullptr) { + /* Check if the suggested thread is the thread running on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + + if (KThread *running_on_suggested_core = (suggested_core >= 0) ? Kernel::GetScheduler(suggested_core).state.highest_priority_thread : nullptr; running_on_suggested_core != suggested) { + /* If the current thread's priority is higher than our suggestion's we prefer the next thread to the suggestion. */ + /* We also prefer the next thread when the current thread's priority is equal to the suggestions, but the next thread has been waiting longer. */ + if ((suggested->GetPriority() > cur_thread.GetPriority()) || + (suggested->GetPriority() == cur_thread.GetPriority() && next_thread != std::addressof(cur_thread) && next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick())) + { + suggested = nullptr; + break; + } + + /* If we're allowed to do a migration, do one. */ + /* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion to the front of the queue. */ + if (running_on_suggested_core == nullptr || running_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested, true); + IncrementScheduledCount(suggested); + break; + } else { + /* We couldn't perform a migration, but we should check again on a future yield. */ + recheck = true; + } + } + + /* Get the next suggestion. */ + suggested = priority_queue.GetSuggestedNext(core_id, suggested); + } + + + /* If we still have a suggestion or the next thread is different, we have an update to perform. */ + if (suggested != nullptr || next_thread != std::addressof(cur_thread)) { + SetSchedulerUpdateNeeded(); + } else if (!recheck) { + /* Otherwise if we don't need to re-check, set the thread's yield count so that we won't waste work until the process is scheduled again. */ + cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); + } + } + } + } + + void KScheduler::YieldToAnyThread() { + /* Validate preconditions. */ + MESOSPHERE_ASSERT(CanSchedule()); + MESOSPHERE_ASSERT(GetCurrentProcessPointer() != nullptr); + + /* Get the current thread and process. */ + KThread &cur_thread = GetCurrentThread(); + KProcess &cur_process = GetCurrentProcess(); + + /* If the thread's yield count matches, there's nothing for us to do. */ + if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) { + return; + } + + /* Get a reference to the priority queue. */ + auto &priority_queue = GetPriorityQueue(); + + /* Perform the yield. */ + { + KScopedSchedulerLock sl; + + const auto cur_state = cur_thread.GetRawState(); + if (cur_state == KThread::ThreadState_Runnable) { + /* Get the current active core. */ + const s32 core_id = cur_thread.GetActiveCore(); + + /* Migrate the current thread to core -1. */ + cur_thread.SetActiveCore(-1); + priority_queue.ChangeCore(core_id, std::addressof(cur_thread)); + IncrementScheduledCount(std::addressof(cur_thread)); + + /* If there's nothing scheduled, we can try to perform a migration. */ + if (priority_queue.GetScheduledFront(core_id) == nullptr) { + /* While we have a suggested thread, try to migrate it! */ + KThread *suggested = priority_queue.GetSuggestedFront(core_id); + while (suggested != nullptr) { + /* Check if the suggested thread is the top thread on its core. */ + const s32 suggested_core = suggested->GetActiveCore(); + if (KThread *top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) { + /* If we're allowed to do a migration, do one. */ + if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) { + suggested->SetActiveCore(core_id); + priority_queue.ChangeCore(suggested_core, suggested); + IncrementScheduledCount(suggested); + } + + /* Regardless of whether we migrated, we had a candidate, so we're done. */ + break; + } + + /* Get the next suggestion. */ + suggested = priority_queue.GetSuggestedNext(core_id, suggested); + } + + /* If the suggestion is different from the current thread, we need to perform an update. */ + if (suggested != std::addressof(cur_thread)) { + SetSchedulerUpdateNeeded(); + } else { + /* Otherwise, set the thread's yield count so that we won't waste work until the process is scheduled again. */ + cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount()); + } + } else { + /* Otherwise, we have an update to perform. */ + SetSchedulerUpdateNeeded(); + } + } + } + } } diff --git a/libraries/libmesosphere/source/kern_k_server_port.cpp b/libraries/libmesosphere/source/kern_k_server_port.cpp index 06096bc86..965f7972a 100644 --- a/libraries/libmesosphere/source/kern_k_server_port.cpp +++ b/libraries/libmesosphere/source/kern_k_server_port.cpp @@ -84,13 +84,70 @@ namespace ams::kern { } bool KServerPort::IsSignaled() const { - /* TODO: Check preconditions later. */ MESOSPHERE_ASSERT_THIS(); if (this->IsLight()) { return !this->light_session_list.empty(); } else { - return this->session_list.empty(); + return !this->session_list.empty(); } } + void KServerPort::EnqueueSession(KServerSession *session) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(!this->IsLight()); + + KScopedSchedulerLock sl; + + /* Add the session to our queue. */ + this->session_list.push_back(*session); + if (this->session_list.size() == 1) { + this->NotifyAvailable(); + } + } + + void KServerPort::EnqueueSession(KLightServerSession *session) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this->IsLight()); + + KScopedSchedulerLock sl; + + /* Add the session to our queue. */ + this->light_session_list.push_back(*session); + if (this->light_session_list.size() == 1) { + this->NotifyAvailable(); + } + } + + KServerSession *KServerPort::AcceptSession() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(!this->IsLight()); + + KScopedSchedulerLock sl; + + /* Return the first session in the list. */ + if (this->session_list.empty()) { + return nullptr; + } + + KServerSession *session = std::addressof(this->session_list.front()); + this->session_list.pop_front(); + return session; + } + + KLightServerSession *KServerPort::AcceptLightSession() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this->IsLight()); + + KScopedSchedulerLock sl; + + /* Return the first session in the list. */ + if (this->light_session_list.empty()) { + return nullptr; + } + + KLightServerSession *session = std::addressof(this->light_session_list.front()); + this->light_session_list.pop_front(); + return session; + } + } diff --git a/libraries/libmesosphere/source/kern_k_server_session.cpp b/libraries/libmesosphere/source/kern_k_server_session.cpp new file mode 100644 index 000000000..36159014c --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_server_session.cpp @@ -0,0 +1,1355 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace ipc { + + using MessageBuffer = ams::svc::ipc::MessageBuffer; + + } + + namespace { + + constexpr inline size_t PointerTransferBufferAlignment = 0x10; + + class ReceiveList { + private: + u32 data[ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountMax * ipc::MessageBuffer::ReceiveListEntry::GetDataSize() / sizeof(u32)]; + s32 recv_list_count; + uintptr_t msg_buffer_end; + uintptr_t msg_buffer_space_end; + public: + static constexpr int GetEntryCount(const ipc::MessageBuffer::MessageHeader &header) { + const auto count = header.GetReceiveListCount(); + switch (count) { + case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_None: + return 0; + case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer: + return 0; + case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToSingleBuffer: + return 1; + default: + return count - ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset; + } + } + public: + ReceiveList(const u32 *dst_msg, uintptr_t dst_address, const KProcessPageTable &dst_page_table, const ipc::MessageBuffer::MessageHeader &dst_header, const ipc::MessageBuffer::SpecialHeader &dst_special_header, size_t msg_size, size_t out_offset, s32 dst_recv_list_idx, bool is_tls) { + this->recv_list_count = dst_header.GetReceiveListCount(); + this->msg_buffer_end = dst_address + sizeof(u32) * out_offset; + this->msg_buffer_space_end = dst_address + msg_size; + + const u32 *recv_list = dst_msg + dst_recv_list_idx; + const auto entry_count = GetEntryCount(dst_header); + + if (is_tls) { + __builtin_memcpy(this->data, recv_list, entry_count * ipc::MessageBuffer::ReceiveListEntry::GetDataSize()); + } else { + uintptr_t page_addr = util::AlignDown(dst_address, PageSize); + uintptr_t cur_addr = dst_address + dst_recv_list_idx * sizeof(u32); + for (size_t i = 0; i < entry_count * ipc::MessageBuffer::ReceiveListEntry::GetDataSize() / sizeof(u32); ++i) { + if (page_addr != util::AlignDown(cur_addr, PageSize)) { + KPhysicalAddress phys_addr; + dst_page_table.GetPhysicalAddress(std::addressof(phys_addr), KProcessAddress(cur_addr)); + + recv_list = GetPointer(KPageTable::GetHeapVirtualAddress(phys_addr)); + page_addr = util::AlignDown(cur_addr, PageSize); + } + this->data[i] = *(recv_list++); + cur_addr += sizeof(u32); + } + } + } + + constexpr bool IsIndex() const { + return this->recv_list_count > ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset; + } + + void GetBuffer(uintptr_t &out, size_t size, int &key) const { + switch (this->recv_list_count) { + case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_None: + { + out = 0; + } + break; + case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer: + { + const uintptr_t buf = util::AlignUp(this->msg_buffer_end + key, PointerTransferBufferAlignment); + + if ((buf < buf + size) && (buf + size <= this->msg_buffer_space_end)) { + out = buf; + key = buf + size - this->msg_buffer_end; + } else { + out = 0; + } + } + break; + case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToSingleBuffer: + { + const ipc::MessageBuffer::ReceiveListEntry entry(this->data[0], this->data[1]); + const uintptr_t buf = util::AlignUp(entry.GetAddress() + key, PointerTransferBufferAlignment); + + const uintptr_t entry_addr = entry.GetAddress(); + const size_t entry_size = entry.GetSize(); + + if ((buf < buf + size) && (entry_addr < entry_addr + entry_size) && (buf + size <= entry_addr + entry_size)) { + out = buf; + key = buf + size - entry_addr; + } else { + out = 0; + } + } + break; + default: + { + if (key < this->recv_list_count - ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset) { + const ipc::MessageBuffer::ReceiveListEntry entry(this->data[2 * key + 0], this->data[2 * key + 1]); + + const uintptr_t entry_addr = entry.GetAddress(); + const size_t entry_size = entry.GetSize(); + + if ((entry_addr < entry_addr + entry_size) && (entry_size >= size)) { + out = entry_addr; + } + } else { + out = 0; + } + } + break; + } + } + }; + + template + ALWAYS_INLINE Result ProcessMessageSpecialData(int &offset, KProcess &dst_process, KProcess &src_process, KThread &src_thread, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, const ipc::MessageBuffer::SpecialHeader &src_special_header) { + /* Copy the special header to the destination. */ + offset = dst_msg.Set(src_special_header); + + /* Copy the process ID. */ + if (src_special_header.GetHasProcessId()) { + /* NOTE: Atmosphere extends the official kernel here to enable the mitm api. */ + /* If building the kernel without this support, just set the following to false. */ + constexpr bool EnableProcessIdPassthroughForAtmosphere = true; + + if constexpr (EnableProcessIdPassthroughForAtmosphere) { + constexpr u64 PassthroughProcessIdMask = UINT64_C(0xFFFF000000000000); + constexpr u64 PassthroughProcessIdValue = UINT64_C(0xFFFE000000000000); + static_assert((PassthroughProcessIdMask & PassthroughProcessIdValue) == PassthroughProcessIdValue); + + const u64 src_process_id_value = src_msg.GetProcessId(offset); + const bool is_passthrough = (src_process_id_value & PassthroughProcessIdMask) == PassthroughProcessIdValue; + + offset = dst_msg.SetProcessId(offset, is_passthrough ? (src_process_id_value & ~PassthroughProcessIdMask) : src_process.GetId()); + } else { + offset = dst_msg.SetProcessId(offset, src_process.GetId()); + } + } + + /* Prepare to process handles. */ + auto &dst_handle_table = dst_process.GetHandleTable(); + auto &src_handle_table = src_process.GetHandleTable(); + Result result = ResultSuccess(); + + /* Process copy handles. */ + for (auto i = 0; i < src_special_header.GetCopyHandleCount(); ++i) { + /* Get the handles. */ + const ams::svc::Handle src_handle = src_msg.GetHandle(offset); + ams::svc::Handle dst_handle = ams::svc::InvalidHandle; + + /* If we're in a success state, try to move the handle to the new table. */ + if (R_SUCCEEDED(result) && src_handle != ams::svc::InvalidHandle) { + KScopedAutoObject obj = src_handle_table.GetObjectForIpc(src_handle, std::addressof(src_thread)); + if (obj.IsNotNull()) { + Result add_result = dst_handle_table.Add(std::addressof(dst_handle), obj.GetPointerUnsafe()); + if (R_FAILED(add_result)) { + result = add_result; + dst_handle = ams::svc::InvalidHandle; + } + } else { + result = svc::ResultInvalidHandle(); + } + } + + /* Set the handle. */ + offset = dst_msg.SetHandle(offset, dst_handle); + } + + /* Process move handles. */ + if constexpr (MoveHandleAllowed) { + for (auto i = 0; i < src_special_header.GetMoveHandleCount(); ++i) { + /* Get the handles. */ + const ams::svc::Handle src_handle = src_msg.GetHandle(offset); + ams::svc::Handle dst_handle = ams::svc::InvalidHandle; + + /* Whether or not we've succeeded, we need to remove the handles from the source table. */ + if (src_handle != ams::svc::InvalidHandle) { + if (R_SUCCEEDED(result)) { + KScopedAutoObject obj = src_handle_table.GetObjectForIpcWithoutPseudoHandle(src_handle); + if (obj.IsNotNull()) { + Result add_result = dst_handle_table.Add(std::addressof(dst_handle), obj.GetPointerUnsafe()); + + src_handle_table.Remove(src_handle); + + if (R_FAILED(add_result)) { + result = add_result; + dst_handle = ams::svc::InvalidHandle; + } + } else { + result = svc::ResultInvalidHandle(); + } + } else { + src_handle_table.Remove(src_handle); + } + } + + /* Set the handle. */ + offset = dst_msg.SetHandle(offset, dst_handle); + } + } + + return result; + } + + ALWAYS_INLINE Result ProcessReceiveMessagePointerDescriptors(int &offset, int &pointer_key, KProcessPageTable &dst_page_table, KProcessPageTable &src_page_table, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, const ReceiveList &dst_recv_list, bool dst_user) { + /* Get the offset at the start of processing. */ + const int cur_offset = offset; + + /* Get the pointer desc. */ + ipc::MessageBuffer::PointerDescriptor src_desc(src_msg, cur_offset); + offset += ipc::MessageBuffer::PointerDescriptor::GetDataSize() / sizeof(u32); + + /* Extract address/size. */ + const uintptr_t src_pointer = src_desc.GetAddress(); + const size_t recv_size = src_desc.GetSize(); + uintptr_t recv_pointer = 0; + + /* Process the buffer, if it has a size. */ + if (recv_size > 0) { + /* If using indexing, set index. */ + if (dst_recv_list.IsIndex()) { + pointer_key = src_desc.GetIndex(); + } + + /* Get the buffer. */ + dst_recv_list.GetBuffer(recv_pointer, recv_size, pointer_key); + R_UNLESS(recv_pointer != 0, svc::ResultOutOfResource()); + + /* Perform the pointer data copy. */ + if (dst_user) { + R_TRY(src_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table, recv_pointer, recv_size, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + static_cast(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite), + KMemoryAttribute_AnyLocked | KMemoryAttribute_Uncached | KMemoryAttribute_Locked, KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked, + src_pointer, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + KMemoryPermission_UserRead, + KMemoryAttribute_Uncached, KMemoryAttribute_None)); + } else { + R_TRY(src_page_table.CopyMemoryFromLinearToUser(recv_pointer, recv_size, src_pointer, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + KMemoryPermission_UserRead, + KMemoryAttribute_Uncached, KMemoryAttribute_None)); + } + } + + /* Set the output descriptor. */ + dst_msg.Set(cur_offset, ipc::MessageBuffer::PointerDescriptor(reinterpret_cast(recv_pointer), recv_size, src_desc.GetIndex())); + + return ResultSuccess(); + } + + constexpr ALWAYS_INLINE Result GetMapAliasMemoryState(KMemoryState &out, ipc::MessageBuffer::MapAliasDescriptor::Attribute attr) { + switch (attr) { + case ipc::MessageBuffer::MapAliasDescriptor::Attribute_Ipc: out = KMemoryState_Ipc; break; + case ipc::MessageBuffer::MapAliasDescriptor::Attribute_NonSecureIpc: out = KMemoryState_NonSecureIpc; break; + case ipc::MessageBuffer::MapAliasDescriptor::Attribute_NonDeviceIpc: out = KMemoryState_NonDeviceIpc; break; + default: return svc::ResultInvalidCombination(); + } + + return ResultSuccess(); + } + + constexpr ALWAYS_INLINE Result GetMapAliasTestStateAndAttributeMask(u32 &out_state, u32 &out_attr_mask, KMemoryState state) { + switch (state) { + case KMemoryState_Ipc: + out_state = KMemoryState_FlagCanUseIpc; + out_attr_mask = KMemoryAttribute_AnyLocked | KMemoryAttribute_Uncached | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked; + break; + case KMemoryState_NonSecureIpc: + out_state = KMemoryState_FlagCanUseNonSecureIpc; + out_attr_mask = KMemoryAttribute_AnyLocked | KMemoryAttribute_Uncached | KMemoryAttribute_Locked; + break; + case KMemoryState_NonDeviceIpc: + out_state = KMemoryState_FlagCanUseNonDeviceIpc; + out_attr_mask = KMemoryAttribute_AnyLocked | KMemoryAttribute_Uncached | KMemoryAttribute_Locked; + break; + default: + return svc::ResultInvalidCombination(); + } + + return ResultSuccess(); + } + + ALWAYS_INLINE void CleanupSpecialData(KProcess &dst_process, u32 *dst_msg_ptr, size_t dst_buffer_size) { + /* Parse the message. */ + const ipc::MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size); + const ipc::MessageBuffer::MessageHeader dst_header(dst_msg); + const ipc::MessageBuffer::SpecialHeader dst_special_header(dst_msg, dst_header); + + /* Check that the size is big enough. */ + if (ipc::MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) > dst_buffer_size) { + return; + } + + /* Set the special header. */ + int offset = dst_msg.Set(dst_special_header); + + /* Clear the process id, if needed. */ + if (dst_special_header.GetHasProcessId()) { + offset = dst_msg.SetProcessId(offset, 0); + } + + /* Clear handles, as relevant. */ + auto &dst_handle_table = dst_process.GetHandleTable(); + for (auto i = 0; i < (dst_special_header.GetCopyHandleCount() + dst_special_header.GetMoveHandleCount()); ++i) { + const ams::svc::Handle handle = dst_msg.GetHandle(offset); + + if (handle != ams::svc::InvalidHandle) { + dst_handle_table.Remove(handle); + } + + offset = dst_msg.SetHandle(offset, ams::svc::InvalidHandle); + } + } + + ALWAYS_INLINE Result CleanupServerHandles(uintptr_t message, size_t buffer_size, KPhysicalAddress message_paddr) { + /* Server is assumed to be current thread. */ + const KThread &thread = GetCurrentThread(); + + /* Get the linear message pointer. */ + u32 *msg_ptr; + if (message) { + msg_ptr = GetPointer(KPageTable::GetHeapVirtualAddress(message_paddr)); + } else { + msg_ptr = static_cast(thread.GetThreadLocalRegionHeapAddress())->message_buffer; + buffer_size = sizeof(ams::svc::ThreadLocalRegion{}.message_buffer); + message = GetInteger(thread.GetThreadLocalRegionAddress()); + } + + /* Parse the message. */ + const ipc::MessageBuffer msg(msg_ptr, buffer_size); + const ipc::MessageBuffer::MessageHeader header(msg); + const ipc::MessageBuffer::SpecialHeader special_header(msg, header); + + /* Check that the size is big enough. */ + R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(header, special_header) <= buffer_size, svc::ResultInvalidCombination()); + + /* If there's a special header, there may be move handles we need to close. */ + if (header.GetHasSpecialHeader()) { + /* Determine the offset to the start of handles. */ + auto offset = msg.GetSpecialDataIndex(header, special_header); + if (special_header.GetHasProcessId()) { + offset += sizeof(u64) / sizeof(u32); + } + if (auto copy_count = special_header.GetCopyHandleCount(); copy_count > 0) { + offset += (sizeof(ams::svc::Handle) * copy_count) / sizeof(u32); + } + + /* Get the handle table. */ + auto &handle_table = thread.GetOwnerProcess()->GetHandleTable(); + + /* Close the handles. */ + for (auto i = 0; i < special_header.GetMoveHandleCount(); ++i) { + handle_table.Remove(msg.GetHandle(offset)); + offset += sizeof(ams::svc::Handle) / sizeof(u32); + } + } + + return ResultSuccess(); + } + + ALWAYS_INLINE Result CleanupServerMap(KSessionRequest *request, KProcess *server_process) { + /* If there's no server process, there's nothing to clean up. */ + R_SUCCEED_IF(server_process == nullptr); + + /* Get the page table. */ + auto &server_page_table = server_process->GetPageTable(); + + /* Cleanup Send mappings. */ + for (size_t i = 0; i < request->GetSendCount(); ++i) { + R_TRY(server_page_table.CleanupForIpcServer(request->GetSendServerAddress(i), request->GetSendSize(i), request->GetSendMemoryState(i), server_process)); + } + + /* Cleanup Receive mappings. */ + for (size_t i = 0; i < request->GetReceiveCount(); ++i) { + R_TRY(server_page_table.CleanupForIpcServer(request->GetReceiveServerAddress(i), request->GetReceiveSize(i), request->GetReceiveMemoryState(i), server_process)); + } + + /* Cleanup Exchange mappings. */ + for (size_t i = 0; i < request->GetExchangeCount(); ++i) { + R_TRY(server_page_table.CleanupForIpcServer(request->GetExchangeServerAddress(i), request->GetExchangeSize(i), request->GetExchangeMemoryState(i), server_process)); + } + + return ResultSuccess(); + } + + ALWAYS_INLINE Result CleanupClientMap(KSessionRequest *request, KProcessPageTable *client_page_table) { + /* If there's no client page table, there's nothing to clean up. */ + R_SUCCEED_IF(client_page_table == nullptr); + + /* Cleanup Send mappings. */ + for (size_t i = 0; i < request->GetSendCount(); ++i) { + R_TRY(client_page_table->CleanupForIpcClient(request->GetSendClientAddress(i), request->GetSendSize(i), request->GetSendMemoryState(i))); + } + + /* Cleanup Receive mappings. */ + for (size_t i = 0; i < request->GetReceiveCount(); ++i) { + R_TRY(client_page_table->CleanupForIpcClient(request->GetReceiveClientAddress(i), request->GetReceiveSize(i), request->GetReceiveMemoryState(i))); + } + + /* Cleanup Exchange mappings. */ + for (size_t i = 0; i < request->GetExchangeCount(); ++i) { + R_TRY(client_page_table->CleanupForIpcClient(request->GetExchangeClientAddress(i), request->GetExchangeSize(i), request->GetExchangeMemoryState(i))); + } + + return ResultSuccess(); + } + + ALWAYS_INLINE Result CleanupMap(KSessionRequest *request, KProcess *server_process, KProcessPageTable *client_page_table) { + /* Cleanup the server map. */ + R_TRY(CleanupServerMap(request, server_process)); + + /* Cleanup the client map. */ + R_TRY(CleanupClientMap(request, client_page_table)); + + return ResultSuccess(); + } + + ALWAYS_INLINE Result ProcessReceiveMessageMapAliasDescriptors(int &offset, KProcessPageTable &dst_page_table, KProcessPageTable &src_page_table, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, KSessionRequest *request, KMemoryPermission perm, bool send) { + /* Get the offset at the start of processing. */ + const int cur_offset = offset; + + /* Get the map alias descriptor. */ + ipc::MessageBuffer::MapAliasDescriptor src_desc(src_msg, cur_offset); + offset += ipc::MessageBuffer::MapAliasDescriptor::GetDataSize() / sizeof(u32); + + /* Extract address/size. */ + const KProcessAddress src_address = src_desc.GetAddress(); + const size_t size = src_desc.GetSize(); + KProcessAddress dst_address = 0; + + /* Determine the result memory state. */ + KMemoryState dst_state; + R_TRY(GetMapAliasMemoryState(dst_state, src_desc.GetAttribute())); + + /* Process the buffer, if it has a size. */ + if (size > 0) { + /* Set up the source pages for ipc. */ + R_TRY(dst_page_table.SetupForIpc(std::addressof(dst_address), size, src_address, src_page_table, perm, dst_state, send)); + + /* Ensure that we clean up on failure. */ + auto setup_guard = SCOPE_GUARD { + dst_page_table.CleanupForIpcServer(dst_address, size, dst_state, request->GetServerProcess()); + src_page_table.CleanupForIpcClient(src_address, size, dst_state); + }; + + /* Push the appropriate mapping. */ + if (perm == KMemoryPermission_UserRead) { + R_TRY(request->PushSend(src_address, dst_address, size, dst_state)); + } else if (send) { + R_TRY(request->PushExchange(src_address, dst_address, size, dst_state)); + } else { + R_TRY(request->PushReceive(src_address, dst_address, size, dst_state)); + } + + /* We successfully pushed the mapping. */ + setup_guard.Cancel(); + } + + /* Set the output descriptor. */ + dst_msg.Set(cur_offset, ipc::MessageBuffer::MapAliasDescriptor(GetVoidPointer(dst_address), size, src_desc.GetAttribute())); + + return ResultSuccess(); + } + + ALWAYS_INLINE Result ReceiveMessage(bool &recv_list_broken, uintptr_t dst_message_buffer, size_t dst_buffer_size, KPhysicalAddress dst_message_paddr, KThread &src_thread, uintptr_t src_message_buffer, size_t src_buffer_size, KServerSession *session, KSessionRequest *request) { + /* Prepare variables for receive. */ + const KThread &dst_thread = GetCurrentThread(); + KProcess &dst_process = *(dst_thread.GetOwnerProcess()); + KProcess &src_process = *(src_thread.GetOwnerProcess()); + auto &dst_page_table = dst_process.GetPageTable(); + auto &src_page_table = src_process.GetPageTable(); + + /* The receive list is initially not broken. */ + recv_list_broken = false; + + /* Set the server process for the request. */ + request->SetServerProcess(std::addressof(dst_process)); + + /* Determine the message buffers. */ + u32 *dst_msg_ptr, *src_msg_ptr; + bool dst_user, src_user; + + if (dst_message_buffer) { + dst_msg_ptr = GetPointer(KPageTable::GetHeapVirtualAddress(dst_message_paddr)); + dst_user = true; + } else { + dst_msg_ptr = static_cast(dst_thread.GetThreadLocalRegionHeapAddress())->message_buffer; + dst_buffer_size = sizeof(ams::svc::ThreadLocalRegion{}.message_buffer); + dst_message_buffer = GetInteger(dst_thread.GetThreadLocalRegionAddress()); + dst_user = false; + } + + if (src_message_buffer) { + /* NOTE: Nintendo does not check the result of this GetPhysicalAddress call. */ + KPhysicalAddress src_message_paddr; + src_page_table.GetPhysicalAddress(std::addressof(src_message_paddr), src_message_buffer); + + src_msg_ptr = GetPointer(KPageTable::GetHeapVirtualAddress(src_message_paddr)); + src_user = true; + } else { + src_msg_ptr = static_cast(src_thread.GetThreadLocalRegionHeapAddress())->message_buffer; + src_buffer_size = sizeof(ams::svc::ThreadLocalRegion{}.message_buffer); + src_message_buffer = GetInteger(src_thread.GetThreadLocalRegionAddress()); + src_user = false; + } + + /* Parse the headers. */ + const ipc::MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size); + const ipc::MessageBuffer src_msg(src_msg_ptr, src_buffer_size); + const ipc::MessageBuffer::MessageHeader dst_header(dst_msg); + const ipc::MessageBuffer::MessageHeader src_header(src_msg); + const ipc::MessageBuffer::SpecialHeader dst_special_header(dst_msg, dst_header); + const ipc::MessageBuffer::SpecialHeader src_special_header(src_msg, src_header); + + /* Get the end of the source message. */ + const size_t src_end_offset = ipc::MessageBuffer::GetRawDataIndex(src_header, src_special_header) + src_header.GetRawCount(); + + /* Ensure that the headers fit. */ + R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) <= dst_buffer_size, svc::ResultInvalidCombination()); + R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(src_header, src_special_header) <= src_buffer_size, svc::ResultInvalidCombination()); + + /* Ensure the receive list offset is after the end of raw data. */ + if (dst_header.GetReceiveListOffset()) { + R_UNLESS(dst_header.GetReceiveListOffset() >= ipc::MessageBuffer::GetRawDataIndex(dst_header, dst_special_header) + dst_header.GetRawCount(), svc::ResultInvalidCombination()); + } + + /* Ensure that the destination buffer is big enough to receive the source. */ + R_UNLESS(dst_buffer_size >= src_end_offset * sizeof(u32), svc::ResultMessageTooLarge()); + + /* Get the receive list. */ + const s32 dst_recv_list_idx = ipc::MessageBuffer::GetReceiveListIndex(dst_header, dst_special_header); + ReceiveList dst_recv_list(dst_msg_ptr, dst_message_buffer, dst_page_table, dst_header, dst_special_header, dst_buffer_size, src_end_offset, dst_recv_list_idx, !dst_user); + + /* Ensure that the source special header isn't invalid. */ + const bool src_has_special_header = src_header.GetHasSpecialHeader(); + if (src_has_special_header) { + /* Sending move handles from client -> server is not allowed. */ + R_UNLESS(src_special_header.GetMoveHandleCount() == 0, svc::ResultInvalidCombination()); + } + + /* Prepare for further processing. */ + int pointer_key = 0; + int offset = dst_msg.Set(src_header); + + /* Set up a guard to make sure that we end up in a clean state on error. */ + auto cleanup_guard = SCOPE_GUARD { + /* Cleanup mappings. */ + CleanupMap(request, std::addressof(dst_process), std::addressof(src_page_table)); + + /* Cleanup special data. */ + if (src_header.GetHasSpecialHeader()) { + CleanupSpecialData(dst_process, dst_msg_ptr, dst_buffer_size); + } + + /* Cleanup the header if the receive list isn't broken. */ + if (!recv_list_broken) { + dst_msg.Set(dst_header); + if (dst_header.GetHasSpecialHeader()) { + dst_msg.Set(dst_special_header); + } + } + }; + + /* Process any special data. */ + if (src_header.GetHasSpecialHeader()) { + /* After we process, make sure we track whether the receive list is broken. */ + ON_SCOPE_EXIT { if (offset > dst_recv_list_idx) { recv_list_broken = true; } }; + + /* Process special data. */ + R_TRY(ProcessMessageSpecialData(offset, dst_process, src_process, src_thread, dst_msg, src_msg, src_special_header)); + } + + /* Process any pointer buffers. */ + for (auto i = 0; i < src_header.GetPointerCount(); ++i) { + /* After we process, make sure we track whether the receive list is broken. */ + ON_SCOPE_EXIT { if (offset > dst_recv_list_idx) { recv_list_broken = true; } }; + + R_TRY(ProcessReceiveMessagePointerDescriptors(offset, pointer_key, dst_page_table, src_page_table, dst_msg, src_msg, dst_recv_list, dst_user && dst_header.GetReceiveListCount() == ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer)); + } + + /* Process any map alias buffers. */ + for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) { + /* After we process, make sure we track whether the receive list is broken. */ + ON_SCOPE_EXIT { if (offset > dst_recv_list_idx) { recv_list_broken = true; } }; + + /* We process in order send, recv, exch. Buffers after send (recv/exch) are ReadWrite. */ + const KMemoryPermission perm = (i >= src_header.GetSendCount()) ? KMemoryPermission_UserReadWrite : KMemoryPermission_UserRead; + + /* Buffer is send if it is send or exch. */ + const bool send = (i < src_header.GetSendCount()) || (i >= src_header.GetSendCount() + src_header.GetReceiveCount()); + + R_TRY(ProcessReceiveMessageMapAliasDescriptors(offset, dst_page_table, src_page_table, dst_msg, src_msg, request, perm, send)); + } + + /* Process any raw data. */ + if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) { + /* After we process, make sure we track whether the receive list is broken. */ + ON_SCOPE_EXIT { if (offset + raw_count > dst_recv_list_idx) { recv_list_broken = true; } }; + + /* Get the offset and size. */ + const size_t offset_words = offset * sizeof(u32); + const size_t raw_size = raw_count * sizeof(u32); + + /* Fast case is TLS -> TLS, do raw memcpy if we can. */ + if (!dst_user && !src_user) { + std::memcpy(dst_msg_ptr + offset, src_msg_ptr + offset, raw_size); + } else if (dst_user) { + /* Determine how much fast size we can copy. */ + const size_t max_fast_size = std::min(offset_words + raw_size, PageSize); + const size_t fast_size = max_fast_size - offset_words; + + /* Determine the source permission. User buffer should be unmapped + read, TLS should be user readable. */ + const KMemoryPermission src_perm = static_cast(src_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelRead : KMemoryPermission_UserRead); + + /* Perform the fast part of the copy. */ + R_TRY(src_page_table.CopyMemoryFromLinearToKernel(reinterpret_cast(dst_msg_ptr) + offset_words, fast_size, src_message_buffer + offset_words, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + src_perm, + KMemoryAttribute_Uncached, KMemoryAttribute_None)); + + /* If the fast part of the copy didn't get everything, perform the slow part of the copy. */ + if (fast_size < raw_size) { + R_TRY(src_page_table.CopyMemoryFromHeapToHeap(dst_page_table, dst_message_buffer + max_fast_size, raw_size - fast_size, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + static_cast(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite), + KMemoryAttribute_AnyLocked | KMemoryAttribute_Uncached | KMemoryAttribute_Locked, KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked, + src_message_buffer + max_fast_size, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + src_perm, + KMemoryAttribute_Uncached, KMemoryAttribute_None)); + } + } else /* if (src_user) */ { + /* The source is a user buffer, so it should be unmapped + readable. */ + constexpr KMemoryPermission SourcePermission = static_cast(KMemoryPermission_NotMapped | KMemoryPermission_KernelRead); + + /* Copy the memory. */ + R_TRY(src_page_table.CopyMemoryFromLinearToUser(dst_message_buffer + offset_words, raw_size, src_message_buffer + offset_words, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + SourcePermission, + KMemoryAttribute_Uncached, KMemoryAttribute_None)); + } + } + + /* We succeeded! */ + cleanup_guard.Cancel(); + return ResultSuccess(); + } + + ALWAYS_INLINE Result ProcessSendMessageReceiveMapping(KProcessPageTable &dst_page_table, KProcessAddress client_address, KProcessAddress server_address, size_t size, KMemoryState src_state) { + /* If the size is zero, there's nothing to process. */ + R_SUCCEED_IF(size == 0); + + /* Get the memory state and attribute mask to test. */ + u32 test_state; + u32 test_attr_mask; + R_TRY(GetMapAliasTestStateAndAttributeMask(test_state, test_attr_mask, src_state)); + + /* Determine buffer extents. */ + KProcessAddress aligned_dst_start = util::AlignDown(GetInteger(client_address), PageSize); + KProcessAddress aligned_dst_end = util::AlignUp(GetInteger(client_address) + size, PageSize); + KProcessAddress mapping_dst_start = util::AlignUp(GetInteger(client_address), PageSize); + KProcessAddress mapping_dst_end = util::AlignDown(GetInteger(client_address) + size, PageSize); + + KProcessAddress mapping_src_end = util::AlignDown(GetInteger(server_address) + size, PageSize); + + /* If the start of the buffer is unaligned, handle that. */ + if (aligned_dst_start != mapping_dst_start) { + MESOSPHERE_ASSERT(client_address < mapping_dst_start); + const size_t copy_size = std::min(size, mapping_dst_start - client_address); + R_TRY(dst_page_table.CopyMemoryFromUserToLinear(client_address, copy_size, + test_state, test_state, + KMemoryPermission_UserReadWrite, + test_attr_mask, KMemoryAttribute_None, + server_address)); + } + + /* If the end of the buffer is unaligned, handle that. */ + if (mapping_dst_end < aligned_dst_end && (aligned_dst_start == mapping_dst_start || aligned_dst_start < mapping_dst_end)) { + const size_t copy_size = client_address + size - mapping_dst_end; + R_TRY(dst_page_table.CopyMemoryFromUserToLinear(mapping_dst_end, copy_size, + test_state, test_state, + KMemoryPermission_UserReadWrite, + test_attr_mask, KMemoryAttribute_None, + mapping_src_end)); + } + + return ResultSuccess(); + } + + ALWAYS_INLINE Result ProcessSendMessagePointerDescriptors(int &offset, int &pointer_key, KProcessPageTable &dst_page_table, KProcessPageTable &src_page_table, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, const ReceiveList &dst_recv_list, bool dst_user) { + /* Get the offset at the start of processing. */ + const int cur_offset = offset; + + /* Get the pointer desc. */ + ipc::MessageBuffer::PointerDescriptor src_desc(src_msg, cur_offset); + offset += ipc::MessageBuffer::PointerDescriptor::GetDataSize() / sizeof(u32); + + /* Extract address/size. */ + const uintptr_t src_pointer = src_desc.GetAddress(); + const size_t recv_size = src_desc.GetSize(); + uintptr_t recv_pointer = 0; + + /* Process the buffer, if it has a size. */ + if (recv_size > 0) { + /* If using indexing, set index. */ + if (dst_recv_list.IsIndex()) { + pointer_key = src_desc.GetIndex(); + } + + /* Get the buffer. */ + dst_recv_list.GetBuffer(recv_pointer, recv_size, pointer_key); + R_UNLESS(recv_pointer != 0, svc::ResultOutOfResource()); + + /* Perform the pointer data copy. */ + const KMemoryPermission dst_perm = static_cast(dst_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite : KMemoryPermission_UserReadWrite); + R_TRY(dst_page_table.CopyMemoryFromUserToLinear(recv_pointer, recv_size, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + dst_perm, + KMemoryAttribute_Uncached, KMemoryAttribute_None, + src_pointer)); + } + + /* Set the output descriptor. */ + dst_msg.Set(cur_offset, ipc::MessageBuffer::PointerDescriptor(reinterpret_cast(recv_pointer), recv_size, src_desc.GetIndex())); + + return ResultSuccess(); + } + + ALWAYS_INLINE Result SendMessage(uintptr_t src_message_buffer, size_t src_buffer_size, KPhysicalAddress src_message_paddr, KThread &dst_thread, uintptr_t dst_message_buffer, size_t dst_buffer_size, KServerSession *session, KSessionRequest *request) { + /* Prepare variables for send. */ + KThread &src_thread = GetCurrentThread(); + KProcess &dst_process = *(dst_thread.GetOwnerProcess()); + KProcess &src_process = *(src_thread.GetOwnerProcess()); + auto &dst_page_table = dst_process.GetPageTable(); + auto &src_page_table = src_process.GetPageTable(); + + /* Determine the message buffers. */ + u32 *dst_msg_ptr, *src_msg_ptr; + bool dst_user, src_user; + + if (dst_message_buffer) { + /* NOTE: Nintendo does not check the result of this GetPhysicalAddress call. */ + KPhysicalAddress dst_message_paddr; + dst_page_table.GetPhysicalAddress(std::addressof(dst_message_paddr), dst_message_buffer); + + dst_msg_ptr = GetPointer(KPageTable::GetHeapVirtualAddress(dst_message_paddr)); + dst_user = true; + } else { + dst_msg_ptr = static_cast(dst_thread.GetThreadLocalRegionHeapAddress())->message_buffer; + dst_buffer_size = sizeof(ams::svc::ThreadLocalRegion{}.message_buffer); + dst_message_buffer = GetInteger(dst_thread.GetThreadLocalRegionAddress()); + dst_user = false; + } + + if (src_message_buffer) { + src_msg_ptr = GetPointer(KPageTable::GetHeapVirtualAddress(src_message_paddr)); + src_user = true; + } else { + src_msg_ptr = static_cast(src_thread.GetThreadLocalRegionHeapAddress())->message_buffer; + src_buffer_size = sizeof(ams::svc::ThreadLocalRegion{}.message_buffer); + src_message_buffer = GetInteger(src_thread.GetThreadLocalRegionAddress()); + src_user = false; + } + + /* Parse the headers. */ + const ipc::MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size); + const ipc::MessageBuffer src_msg(src_msg_ptr, src_buffer_size); + const ipc::MessageBuffer::MessageHeader dst_header(dst_msg); + const ipc::MessageBuffer::MessageHeader src_header(src_msg); + const ipc::MessageBuffer::SpecialHeader dst_special_header(dst_msg, dst_header); + const ipc::MessageBuffer::SpecialHeader src_special_header(src_msg, src_header); + + /* Get the end of the source message. */ + const size_t src_end_offset = ipc::MessageBuffer::GetRawDataIndex(src_header, src_special_header) + src_header.GetRawCount(); + + /* Declare variables for processing. */ + int offset = 0; + int pointer_key = 0; + bool processed_special_data = false; + + /* Set up a guard to make sure that we end up in a clean state on error. */ + auto cleanup_guard = SCOPE_GUARD { + /* Cleanup special data. */ + if (processed_special_data) { + if (src_header.GetHasSpecialHeader()) { + CleanupSpecialData(dst_process, dst_msg_ptr, dst_buffer_size); + } + } else { + CleanupServerHandles(src_user ? src_message_buffer : 0, src_buffer_size, src_message_paddr); + } + + /* Cleanup mappings. */ + CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table)); + }; + + /* Ensure that the headers fit. */ + R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(src_header, src_special_header) <= src_buffer_size, svc::ResultInvalidCombination()); + R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) <= dst_buffer_size, svc::ResultInvalidCombination()); + + /* Ensure the receive list offset is after the end of raw data. */ + if (dst_header.GetReceiveListOffset()) { + R_UNLESS(dst_header.GetReceiveListOffset() >= ipc::MessageBuffer::GetRawDataIndex(dst_header, dst_special_header) + dst_header.GetRawCount(), svc::ResultInvalidCombination()); + } + + /* Ensure that the destination buffer is big enough to receive the source. */ + R_UNLESS(dst_buffer_size >= src_end_offset * sizeof(u32), svc::ResultMessageTooLarge()); + + /* Replies must have no buffers. */ + R_UNLESS(src_header.GetSendCount() == 0, svc::ResultInvalidCombination()); + R_UNLESS(src_header.GetReceiveCount() == 0, svc::ResultInvalidCombination()); + R_UNLESS(src_header.GetExchangeCount() == 0, svc::ResultInvalidCombination()); + + /* Get the receive list. */ + const s32 dst_recv_list_idx = ipc::MessageBuffer::GetReceiveListIndex(dst_header, dst_special_header); + ReceiveList dst_recv_list(dst_msg_ptr, dst_message_buffer, dst_page_table, dst_header, dst_special_header, dst_buffer_size, src_end_offset, dst_recv_list_idx, !dst_user); + + /* Handle any receive buffers. */ + for (size_t i = 0; i < request->GetReceiveCount(); ++i) { + R_TRY(ProcessSendMessageReceiveMapping(dst_page_table, request->GetReceiveClientAddress(i), request->GetReceiveServerAddress(i), request->GetReceiveSize(i), request->GetReceiveMemoryState(i))); + } + + /* Handle any exchange buffers. */ + for (size_t i = 0; i < request->GetExchangeCount(); ++i) { + R_TRY(ProcessSendMessageReceiveMapping(dst_page_table, request->GetExchangeClientAddress(i), request->GetExchangeServerAddress(i), request->GetExchangeSize(i), request->GetExchangeMemoryState(i))); + } + + /* Set the header. */ + offset = dst_msg.Set(src_header); + + /* Process any special data. */ + MESOSPHERE_ASSERT(GetCurrentThreadPointer() == std::addressof(src_thread)); + processed_special_data = true; + if (src_header.GetHasSpecialHeader()) { + R_TRY(ProcessMessageSpecialData(offset, dst_process, src_process, src_thread, dst_msg, src_msg, src_special_header)); + } + + /* Process any pointer buffers. */ + for (auto i = 0; i < src_header.GetPointerCount(); ++i) { + R_TRY(ProcessSendMessagePointerDescriptors(offset, pointer_key, dst_page_table, src_page_table, dst_msg, src_msg, dst_recv_list, dst_user && dst_header.GetReceiveListCount() == ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer)); + } + + /* Clear any map alias buffers. */ + for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) { + offset = dst_msg.Set(offset, ipc::MessageBuffer::MapAliasDescriptor()); + } + + /* Process any raw data. */ + if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) { + /* Get the offset and size. */ + const size_t offset_words = offset * sizeof(u32); + const size_t raw_size = raw_count * sizeof(u32); + + /* Fast case is TLS -> TLS, do raw memcpy if we can. */ + if (!dst_user && !src_user) { + std::memcpy(dst_msg_ptr + offset, src_msg_ptr + offset, raw_size); + } else if (src_user) { + /* Determine how much fast size we can copy. */ + const size_t max_fast_size = std::min(offset_words + raw_size, PageSize); + const size_t fast_size = max_fast_size - offset_words; + + /* Determine the dst permission. User buffer should be unmapped + read, TLS should be user readable. */ + const KMemoryPermission dst_perm = static_cast(dst_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite : KMemoryPermission_UserReadWrite); + + /* Perform the fast part of the copy. */ + R_TRY(dst_page_table.CopyMemoryFromKernelToLinear(dst_message_buffer + offset_words, fast_size, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + dst_perm, + KMemoryAttribute_Uncached, KMemoryAttribute_None, + reinterpret_cast(src_msg_ptr) + offset_words)); + + /* If the fast part of the copy didn't get everything, perform the slow part of the copy. */ + if (fast_size < raw_size) { + R_TRY(src_page_table.CopyMemoryFromHeapToHeap(dst_page_table, dst_message_buffer + max_fast_size, raw_size - fast_size, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + dst_perm, + KMemoryAttribute_Uncached, KMemoryAttribute_None, + src_message_buffer + max_fast_size, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + static_cast(KMemoryPermission_NotMapped | KMemoryPermission_KernelRead), + KMemoryAttribute_AnyLocked | KMemoryAttribute_Uncached | KMemoryAttribute_Locked, KMemoryAttribute_AnyLocked | KMemoryAttribute_Locked)); + } + } else /* if (dst_user) */ { + /* The destination is a user buffer, so it should be unmapped + readable. */ + constexpr KMemoryPermission DestinationPermission = static_cast(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite); + + /* Copy the memory. */ + R_TRY(src_page_table.CopyMemoryFromUserToLinear(dst_message_buffer + offset_words, raw_size, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + DestinationPermission, + KMemoryAttribute_Uncached, KMemoryAttribute_None, + src_message_buffer + offset_words)); + } + } + + /* We succeeded. Perform cleanup with validation. */ + cleanup_guard.Cancel(); + + return CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table)); + } + + ALWAYS_INLINE void ReplyAsyncError(KProcess *to_process, uintptr_t to_msg_buf, size_t to_msg_buf_size, Result result) { + /* Convert the buffer to a physical address. */ + KPhysicalAddress phys_addr; + to_process->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), KProcessAddress(to_msg_buf)); + + /* Convert the physical address to a linear pointer. */ + u32 *to_msg = GetPointer(KPageTable::GetHeapVirtualAddress(phys_addr)); + + /* Set the error. */ + ipc::MessageBuffer msg(to_msg, to_msg_buf_size); + msg.SetAsyncResult(result); + } + + } + + void KServerSession::Destroy() { + MESOSPHERE_ASSERT_THIS(); + + this->parent->OnServerClosed(); + + this->CleanupRequests(); + + this->parent->Close(); + } + + Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server_buffer_size, KPhysicalAddress server_message_paddr) { + MESOSPHERE_ASSERT_THIS(); + + /* Lock the session. */ + KScopedLightLock lk(this->lock); + + /* Get the request and client thread. */ + KSessionRequest *request; + KScopedAutoObject client_thread; + { + KScopedSchedulerLock sl; + + /* Ensure that we can service the request. */ + R_UNLESS(!this->parent->IsClientClosed(), svc::ResultSessionClosed()); + + /* Ensure we aren't already servicing a request. */ + R_UNLESS(this->current_request == nullptr, svc::ResultNotFound()); + + /* Ensure we have a request to service. */ + R_UNLESS(!this->request_list.empty(), svc::ResultNotFound()); + + /* Pop the first request from the list. */ + request = std::addressof(this->request_list.front()); + this->request_list.pop_front(); + + /* Get the thread for the request. */ + client_thread = KScopedAutoObject(request->GetThread()); + R_UNLESS(client_thread.IsNotNull(), svc::ResultSessionClosed()); + } + + /* Set the request as our current. */ + this->current_request = request; + + /* Get the client address. */ + uintptr_t client_message = request->GetAddress(); + size_t client_buffer_size = request->GetSize(); + bool recv_list_broken = false; + + /* Receive the message. */ + Result result = ReceiveMessage(recv_list_broken, server_message, server_buffer_size, server_message_paddr, *client_thread.GetPointerUnsafe(), client_message, client_buffer_size, this, request); + + /* Handle cleanup on receive failure. */ + if (R_FAILED(result)) { + /* Cache the result to return it to the client. */ + const Result result_for_client = result; + + /* Clear the current request. */ + { + KScopedSchedulerLock sl; + MESOSPHERE_ASSERT(this->current_request == request); + this->current_request = nullptr; + if (!this->request_list.empty()) { + this->NotifyAvailable(); + } + } + + /* Reply to the client. */ + { + /* After we reply, close our reference to the request. */ + ON_SCOPE_EXIT { request->Close(); }; + + /* Get the event to check whether the request is async. */ + if (KWritableEvent *event = request->GetEvent(); event != nullptr) { + /* The client sent an async request. */ + KProcess *client = client_thread->GetOwnerProcess(); + auto &client_pt = client->GetPageTable(); + + /* Send the async result. */ + if (R_FAILED(result_for_client)) { + ReplyAsyncError(client, client_message, client_buffer_size, result_for_client); + } + + /* Unlock the client buffer. */ + /* NOTE: Nintendo does not check the result of this. */ + client_pt.UnlockForIpcUserBuffer(client_message, client_buffer_size); + + /* Signal the event. */ + event->Signal(); + } else { + /* Set the thread as runnable. */ + KScopedSchedulerLock sl; + if (client_thread->GetState() == KThread::ThreadState_Waiting) { + client_thread->SetSyncedObject(nullptr, result_for_client); + client_thread->SetState(KThread::ThreadState_Runnable); + } + } + } + + /* Set the server result. */ + if (recv_list_broken) { + result = svc::ResultReceiveListBroken(); + } else { + result = svc::ResultNotFound(); + } + } + + return result; + } + + Result KServerSession::SendReply(uintptr_t server_message, uintptr_t server_buffer_size, KPhysicalAddress server_message_paddr) { + MESOSPHERE_ASSERT_THIS(); + + /* Lock the session. */ + KScopedLightLock lk(this->lock); + + /* Get the request. */ + KSessionRequest *request; + { + KScopedSchedulerLock sl; + + /* Get the current request. */ + request = this->current_request; + R_UNLESS(request != nullptr, svc::ResultInvalidState()); + + /* Clear the current request, since we're processing it. */ + this->current_request = nullptr; + if (!this->request_list.empty()) { + this->NotifyAvailable(); + } + } + + /* Close reference to the request once we're done processing it. */ + ON_SCOPE_EXIT { request->Close(); }; + + /* Extract relevant information from the request. */ + const uintptr_t client_message = request->GetAddress(); + const size_t client_buffer_size = request->GetSize(); + KThread *client_thread = request->GetThread(); + KWritableEvent *event = request->GetEvent(); + + /* Check whether we're closed. */ + const bool closed = (client_thread == nullptr || this->parent->IsClientClosed()); + + Result result; + if (!closed) { + /* If we're not closed, send the reply. */ + result = SendMessage(server_message, server_buffer_size, server_message_paddr, *client_thread, client_message, client_buffer_size, this, request); + } else { + /* Otherwise, we'll need to do some cleanup. */ + KProcess *server_process = request->GetServerProcess(); + KProcess *client_process = (client_thread != nullptr) ? client_thread->GetOwnerProcess() : nullptr; + KProcessPageTable *client_page_table = (client_process != nullptr) ? std::addressof(client_process->GetPageTable()) : nullptr; + + /* Cleanup server handles. */ + result = CleanupServerHandles(server_message, server_buffer_size, server_message_paddr); + + /* Cleanup mappings. */ + Result cleanup_map_result = CleanupMap(request, server_process, client_page_table); + + /* If we successfully cleaned up handles, use the map cleanup result as our result. */ + if (R_SUCCEEDED(result)) { + result = cleanup_map_result; + } + } + + /* Select a result for the client. */ + Result client_result = result; + if (closed && R_SUCCEEDED(result)) { + result = svc::ResultSessionClosed(); + client_result = svc::ResultSessionClosed(); + } else { + result = ResultSuccess(); + } + + /* If there's a client thread, update it. */ + if (client_thread != nullptr) { + if (event != nullptr) { + /* Get the client process/page table. */ + KProcess *client_process = client_thread->GetOwnerProcess(); + KProcessPageTable *client_page_table = std::addressof(client_process->GetPageTable()); + + /* If we need to, reply with an async error. */ + if (R_FAILED(client_result)) { + ReplyAsyncError(client_process, client_message, client_buffer_size, client_result); + } + + /* Unlock the client buffer. */ + /* NOTE: Nintendo does not check the result of this. */ + client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); + + /* Signal the event. */ + event->Signal(); + } else { + /* Set the thread as runnable. */ + KScopedSchedulerLock sl; + if (client_thread->GetState() == KThread::ThreadState_Waiting) { + client_thread->SetSyncedObject(nullptr, client_result); + client_thread->SetState(KThread::ThreadState_Runnable); + } + } + } + + return result; + } + + Result KServerSession::OnRequest(KSessionRequest *request) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Ensure that we can handle new requests. */ + R_UNLESS(!this->parent->IsServerClosed(), svc::ResultSessionClosed()); + + /* If there's no event, this is synchronous, so we should check for thread termination. */ + if (request->GetEvent() == nullptr) { + KThread *thread = request->GetThread(); + R_UNLESS(!thread->IsTerminationRequested(), svc::ResultTerminationRequested()); + thread->SetState(KThread::ThreadState_Waiting); + } + + /* Get whether we're empty. */ + const bool was_empty = this->request_list.empty(); + + /* Add the request to the list. */ + request->Open(); + this->request_list.push_back(*request); + + /* If we were empty, signal. */ + if (was_empty) { + this->NotifyAvailable(); + } + + return ResultSuccess(); + } + + bool KServerSession::IsSignaledImpl() const { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* If the client is closed, we're always signaled. */ + if (this->parent->IsClientClosed()) { + return true; + } + + /* Otherwise, we're signaled if we have a request and aren't handling one. */ + return !this->request_list.empty() && this->current_request == nullptr; + } + + bool KServerSession::IsSignaled() const { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + return this->IsSignaledImpl(); + } + + void KServerSession::CleanupRequests() { + MESOSPHERE_ASSERT_THIS(); + + KScopedLightLock lk(this->lock); + + /* Clean up any pending requests. */ + while (true) { + /* Get the next request. */ + KSessionRequest *request = nullptr; + { + KScopedSchedulerLock sl; + + if (this->current_request) { + /* Choose the current request if we have one. */ + request = this->current_request; + this->current_request = nullptr; + } else if (!this->request_list.empty()) { + /* Pop the request from the front of the list. */ + request = std::addressof(this->request_list.front()); + this->request_list.pop_front(); + } + } + + /* If there's no request, we're done. */ + if (request == nullptr) { + break; + } + + /* Close a reference to the request once it's cleaned up. */ + ON_SCOPE_EXIT { request->Close(); }; + + /* Extract relevant information from the request. */ + const uintptr_t client_message = request->GetAddress(); + const size_t client_buffer_size = request->GetSize(); + KThread *client_thread = request->GetThread(); + KWritableEvent *event = request->GetEvent(); + + KProcess *server_process = request->GetServerProcess(); + KProcess *client_process = (client_thread != nullptr) ? client_thread->GetOwnerProcess() : nullptr; + KProcessPageTable *client_page_table = (client_process != nullptr) ? std::addressof(client_process->GetPageTable()) : nullptr; + + /* Cleanup the mappings. */ + Result result = CleanupMap(request, server_process, client_page_table); + + /* If there's a client thread, update it. */ + if (client_thread != nullptr) { + if (event != nullptr) { + /* We need to reply async. */ + ReplyAsyncError(client_process, client_message, client_buffer_size, (R_SUCCEEDED(result) ? svc::ResultSessionClosed() : result)); + + /* Unlock the client buffer. */ + /* NOTE: Nintendo does not check the result of this. */ + client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size); + + /* Signal the event. */ + event->Signal(); + } else { + /* Set the thread as runnable. */ + KScopedSchedulerLock sl; + if (client_thread->GetState() == KThread::ThreadState_Waiting) { + client_thread->SetSyncedObject(nullptr, (R_SUCCEEDED(result) ? svc::ResultSessionClosed() : result)); + client_thread->SetState(KThread::ThreadState_Runnable); + } + } + } + } + } + + void KServerSession::OnClientClosed() { + MESOSPHERE_ASSERT_THIS(); + + KScopedLightLock lk(this->lock); + + /* Handle any pending requests. */ + KSessionRequest *prev_request = nullptr; + while (true) { + /* Declare variables for processing the request. */ + KSessionRequest *request = nullptr; + KWritableEvent *event = nullptr; + KThread *thread = nullptr; + bool cur_request = false; + bool terminate = false; + + /* Get the next request. */ + { + KScopedSchedulerLock sl; + + if (this->current_request != nullptr && this->current_request != prev_request) { + /* Set the request, open a reference as we process it. */ + request = this->current_request; + request->Open(); + cur_request = true; + + /* Get thread and event for the request. */ + thread = request->GetThread(); + event = request->GetEvent(); + + /* If the thread is terminating, handle that. */ + if (thread->IsTerminationRequested()) { + request->ClearThread(); + request->ClearEvent(); + terminate = true; + } + prev_request = request; + } else if (!this->request_list.empty()) { + /* Pop the request from the front of the list. */ + request = std::addressof(this->request_list.front()); + this->request_list.pop_front(); + + /* Get thread and event for the request. */ + thread = request->GetThread(); + event = request->GetEvent(); + } + } + + /* If there are no requests, we're done. */ + if (request == nullptr) { + break; + } + + /* All requests must have threads. */ + MESOSPHERE_ASSERT(thread != nullptr); + + /* Ensure that we close the request when done. */ + ON_SCOPE_EXIT { request->Close(); }; + + /* If we're terminating, close a reference to the thread and event. */ + if (terminate) { + thread->Close(); + if (event != nullptr) { + event->Close(); + } + } + + /* If we need to, reply. */ + if (event != nullptr && !cur_request) { + /* There must be no mappings. */ + MESOSPHERE_ASSERT(request->GetSendCount() == 0); + MESOSPHERE_ASSERT(request->GetReceiveCount() == 0); + MESOSPHERE_ASSERT(request->GetExchangeCount() == 0); + + /* Get the process and page table. */ + KProcess *client_process = thread->GetOwnerProcess(); + auto &client_pt = client_process->GetPageTable(); + + /* Reply to the request. */ + ReplyAsyncError(client_process, request->GetAddress(), request->GetSize(), svc::ResultSessionClosed()); + + /* Unlock the buffer. */ + /* NOTE: Nintendo does not check the result of this. */ + client_pt.UnlockForIpcUserBuffer(request->GetAddress(), request->GetSize()); + + /* Signal the event. */ + event->Signal(); + } + } + + /* Notify. */ + this->NotifyAbort(svc::ResultSessionClosed()); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_session.cpp b/libraries/libmesosphere/source/kern_k_session.cpp new file mode 100644 index 000000000..2194842d2 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_session.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KSession::Initialize(KClientPort *client_port, uintptr_t name) { + MESOSPHERE_ASSERT_THIS(); + + /* Increment reference count. */ + /* Because reference count is one on creation, this will result */ + /* in a reference count of two. Thus, when both server and client are closed */ + /* this object will be destroyed. */ + this->Open(); + + /* Create our sub sessions. */ + KAutoObject::Create(std::addressof(this->server)); + KAutoObject::Create(std::addressof(this->client)); + + /* Initialize our sub sessions. */ + this->server.Initialize(this); + this->client.Initialize(this); + + /* Set state and name. */ + this->state = State::Normal; + this->name = name; + + /* Set our owner process. */ + this->process = GetCurrentProcessPointer(); + this->process->Open(); + + /* Set our port. */ + this->port = client_port; + if (this->port != nullptr) { + this->port->Open(); + } + + /* Mark initialized. */ + this->initialized = true; + } + + void KSession::Finalize() { + if (this->port != nullptr) { + this->port->OnSessionFinalized(); + this->port->Close(); + } + } + + void KSession::OnServerClosed() { + MESOSPHERE_ASSERT_THIS(); + + if (this->state == State::Normal) { + this->state = State::ServerClosed; + this->client.OnServerClosed(); + } + } + + void KSession::OnClientClosed() { + MESOSPHERE_ASSERT_THIS(); + + if (this->state == State::Normal) { + this->state = State::ClientClosed; + this->server.OnClientClosed(); + } + } + + void KSession::PostDestroy(uintptr_t arg) { + /* Release the session count resource the owner process holds. */ + KProcess *owner = reinterpret_cast(arg); + owner->ReleaseResource(ams::svc::LimitableResource_SessionCountMax, 1); + owner->Close(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_session_request.cpp b/libraries/libmesosphere/source/kern_k_session_request.cpp new file mode 100644 index 000000000..8b20c52f5 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_session_request.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + Result KSessionRequest::SessionMappings::PushMap(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state, size_t index) { + /* At most 15 buffers of each type (4-bit descriptor counts). */ + MESOSPHERE_ASSERT(index < ((1ul << 4) - 1) * 3); + + /* Get the mapping. */ + Mapping *mapping; + if (index < NumStaticMappings) { + mapping = std::addressof(this->static_mappings[index]); + } else { + /* Allocate a page for the extra mappings. */ + if (this->mappings == nullptr) { + KPageBuffer *page_buffer = KPageBuffer::Allocate(); + R_UNLESS(page_buffer != nullptr, svc::ResultOutOfMemory()); + + this->mappings = reinterpret_cast(page_buffer); + } + + mapping = std::addressof(this->mappings[index - NumStaticMappings]); + } + + /* Set the mapping. */ + mapping->Set(client, server, size, state); + + return ResultSuccess(); + } + + Result KSessionRequest::SessionMappings::PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) { + MESOSPHERE_ASSERT(this->num_recv == 0); + MESOSPHERE_ASSERT(this->num_exch == 0); + return this->PushMap(client, server, size, state, this->num_send++); + } + + Result KSessionRequest::SessionMappings::PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) { + MESOSPHERE_ASSERT(this->num_exch == 0); + return this->PushMap(client, server, size, state, this->num_send + this->num_recv++); + } + + Result KSessionRequest::SessionMappings::PushExchange(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) { + return this->PushMap(client, server, size, state, this->num_send + this->num_recv + this->num_exch++); + } + + void KSessionRequest::SessionMappings::Finalize() { + if (this->mappings) { + KPageBuffer::Free(reinterpret_cast(this->mappings)); + this->mappings = nullptr; + } + } + +} diff --git a/libraries/libmesosphere/source/kern_k_shared_memory.cpp b/libraries/libmesosphere/source/kern_k_shared_memory.cpp new file mode 100644 index 000000000..641e8c4f6 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_shared_memory.cpp @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + Result KSharedMemory::Initialize(KProcess *owner, size_t size, ams::svc::MemoryPermission own_perm, ams::svc::MemoryPermission rem_perm) { + MESOSPHERE_ASSERT_THIS(); + + /* Set members. */ + this->owner_process_id = owner->GetId(); + this->owner_perm = own_perm; + this->remote_perm = rem_perm; + + /* Get the number of pages. */ + const size_t num_pages = util::DivideUp(size, PageSize); + MESOSPHERE_ASSERT(num_pages > 0); + + /* Get the resource limit. */ + KResourceLimit *reslimit = owner->GetResourceLimit(); + + /* Reserve memory for ourselves. */ + KScopedResourceReservation memory_reservation(reslimit, ams::svc::LimitableResource_PhysicalMemoryMax, size); + R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Allocate the memory. */ + R_TRY(Kernel::GetMemoryManager().Allocate(std::addressof(this->page_group), num_pages, owner->GetAllocateOption())); + + /* Commit our reservation. */ + memory_reservation.Commit(); + + /* Set our resource limit. */ + this->resource_limit = reslimit; + this->resource_limit->Open(); + + /* Open the memory. */ + this->page_group.Open(); + + /* Mark initialized. */ + this->is_initialized = true; + + /* Clear all pages in the memory. */ + for (const auto &block : this->page_group) { + std::memset(GetVoidPointer(block.GetAddress()), 0, block.GetSize()); + } + + return ResultSuccess(); + } + + void KSharedMemory::Finalize() { + MESOSPHERE_ASSERT_THIS(); + + /* Get the number of pages. */ + const size_t num_pages = this->page_group.GetNumPages(); + const size_t size = num_pages * PageSize; + + /* Close and finalize the page group. */ + this->page_group.Close(); + this->page_group.Finalize(); + + /* Release the memory reservation. */ + this->resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, size); + this->resource_limit->Close(); + + /* Perform inherited finalization. */ + KAutoObjectWithSlabHeapAndContainer::Finalize(); + } + + Result KSharedMemory::Map(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process, ams::svc::MemoryPermission map_perm) { + MESOSPHERE_ASSERT_THIS(); + + /* Validate the size. */ + R_UNLESS(this->page_group.GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + + /* Validate the permission. */ + const ams::svc::MemoryPermission test_perm = (process->GetId() == this->owner_process_id) ? this->owner_perm : this->remote_perm; + if (test_perm == ams::svc::MemoryPermission_DontCare) { + MESOSPHERE_ASSERT(map_perm == ams::svc::MemoryPermission_Read || map_perm == ams::svc::MemoryPermission_ReadWrite); + } else { + R_UNLESS(map_perm == test_perm, svc::ResultInvalidNewMemoryPermission()); + } + + /* Map the memory. */ + return table->MapPageGroup(address, this->page_group, KMemoryState_Shared, ConvertToKMemoryPermission(map_perm)); + } + + Result KSharedMemory::Unmap(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process) { + MESOSPHERE_ASSERT_THIS(); + + /* Validate the size. */ + R_UNLESS(this->page_group.GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + + /* Unmap the memory. */ + return table->UnmapPageGroup(address, this->page_group, KMemoryState_Shared); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_synchronization.cpp b/libraries/libmesosphere/source/kern_k_synchronization.cpp index 46817e035..8f611269d 100644 --- a/libraries/libmesosphere/source/kern_k_synchronization.cpp +++ b/libraries/libmesosphere/source/kern_k_synchronization.cpp @@ -20,7 +20,87 @@ namespace ams::kern { Result KSynchronization::Wait(s32 *out_index, KSynchronizationObject **objects, const s32 num_objects, s64 timeout) { MESOSPHERE_ASSERT_THIS(); - MESOSPHERE_UNIMPLEMENTED(); + /* Allocate space on stack for thread iterators. */ + KSynchronizationObject::iterator *thread_iters = static_cast(__builtin_alloca(sizeof(KSynchronizationObject::iterator) * num_objects)); + + /* Prepare for wait. */ + KThread *thread = GetCurrentThreadPointer(); + s32 sync_index = -1; + KHardwareTimer *timer; + + { + /* Setup the scheduling lock and sleep. */ + KScopedSchedulerLockAndSleep slp(std::addressof(timer), thread, timeout); + + /* Check if any of the objects are already signaled. */ + for (auto i = 0; i < num_objects; ++i) { + AMS_ASSERT(objects[i] != nullptr); + + if (objects[i]->IsSignaled()) { + *out_index = i; + slp.CancelSleep(); + return ResultSuccess(); + } + } + + /* Check if the timeout is zero. */ + if (timeout == 0) { + slp.CancelSleep(); + return svc::ResultTimedOut(); + } + + /* Check if the thread should terminate. */ + if (thread->IsTerminationRequested()) { + slp.CancelSleep(); + return svc::ResultTerminationRequested(); + } + + /* Check if waiting was canceled. */ + if (thread->IsWaitCancelled()) { + slp.CancelSleep(); + thread->ClearWaitCancelled(); + return svc::ResultCancelled(); + } + + /* Add the waiters. */ + for (auto i = 0; i < num_objects; ++i) { + thread_iters[i] = objects[i]->RegisterWaitingThread(thread); + } + + /* Mark the thread as waiting. */ + thread->SetCancellable(); + thread->SetSyncedObject(nullptr, svc::ResultTimedOut()); + thread->SetState(KThread::ThreadState_Waiting); + } + + /* The lock/sleep is done, so we should be able to get our result. */ + + /* Thread is no longer cancellable. */ + thread->ClearCancellable(); + + /* Cancel the timer as needed. */ + if (timer != nullptr) { + timer->CancelTask(thread); + } + + /* Get the wait result. */ + Result wait_result; + { + KScopedSchedulerLock lk; + KSynchronizationObject *synced_obj; + wait_result = thread->GetWaitResult(std::addressof(synced_obj)); + + for (auto i = 0; i < num_objects; ++i) { + objects[i]->UnregisterWaitingThread(thread_iters[i]); + if (objects[i] == synced_obj) { + sync_index = i; + } + } + } + + /* Set output. */ + *out_index = sync_index; + return wait_result; } void KSynchronization::OnAvailable(KSynchronizationObject *object) { diff --git a/libraries/libmesosphere/source/kern_k_synchronization_object.cpp b/libraries/libmesosphere/source/kern_k_synchronization_object.cpp index 9b093f1a8..ce94f95eb 100644 --- a/libraries/libmesosphere/source/kern_k_synchronization_object.cpp +++ b/libraries/libmesosphere/source/kern_k_synchronization_object.cpp @@ -32,6 +32,19 @@ namespace ams::kern { void KSynchronizationObject::Finalize() { MESOSPHERE_ASSERT_THIS(); + /* If auditing, ensure that the object has no waiters. */ + #if defined(MESOSPHERE_BUILD_FOR_AUDITING) + { + KScopedSchedulerLock sl; + + auto end = this->end(); + for (auto it = this->begin(); it != end; ++it) { + KThread *thread = std::addressof(*it); + MESOSPHERE_LOG("KSynchronizationObject::Finalize(%p) with %p (id=%ld) waiting.\n", this, thread, thread->GetId()); + } + } + #endif + this->OnFinalizeSynchronizationObject(); KAutoObject::Finalize(); } @@ -39,16 +52,42 @@ namespace ams::kern { void KSynchronizationObject::DebugWaiters() { MESOSPHERE_ASSERT_THIS(); - MESOSPHERE_TODO("Do useful debug operation here."); + /* If debugging, dump the list of waiters. */ + #if defined(MESOSPHERE_BUILD_FOR_DEBUGGING) + { + KScopedSchedulerLock sl; + + MESOSPHERE_RELEASE_LOG("Threads waiting on %p:\n", this); + + bool has_waiters = false; + auto end = this->end(); + for (auto it = this->begin(); it != end; ++it) { + KThread *thread = std::addressof(*it); + + if (KProcess *process = thread->GetOwnerProcess(); process != nullptr) { + MESOSPHERE_RELEASE_LOG(" %p tid=%ld pid=%ld (%s)\n", thread, thread->GetId(), process->GetId(), process->GetName()); + } else { + MESOSPHERE_RELEASE_LOG(" %p tid=%ld (Kernel)\n", thread, thread->GetId()); + } + + has_waiters = true; + } + + /* If we didn't have any waiters, print so. */ + if (!has_waiters) { + MESOSPHERE_RELEASE_LOG(" None\n"); + } + } + #endif } - KSynchronizationObject::iterator KSynchronizationObject::AddWaiterThread(KThread *thread) { + KSynchronizationObject::iterator KSynchronizationObject::RegisterWaitingThread(KThread *thread) { MESOSPHERE_ASSERT_THIS(); return this->thread_list.insert(this->thread_list.end(), *thread); } - KSynchronizationObject::iterator KSynchronizationObject::RemoveWaiterThread(KSynchronizationObject::iterator it) { + KSynchronizationObject::iterator KSynchronizationObject::UnregisterWaitingThread(KSynchronizationObject::iterator it) { MESOSPHERE_ASSERT_THIS(); return this->thread_list.erase(it); diff --git a/libraries/libmesosphere/source/kern_k_thread.cpp b/libraries/libmesosphere/source/kern_k_thread.cpp index 2fb52b283..ae84126fa 100644 --- a/libraries/libmesosphere/source/kern_k_thread.cpp +++ b/libraries/libmesosphere/source/kern_k_thread.cpp @@ -94,7 +94,7 @@ namespace ams::kern { /* Set parent and condvar tree. */ this->parent = nullptr; - this->cond_var = nullptr; + this->condvar_tree = nullptr; /* Set sync booleans. */ this->signaled = false; @@ -143,7 +143,7 @@ namespace ams::kern { this->num_kernel_waiters = 0; this->entrypoint = reinterpret_cast(func); - /* We don't need a release (probably), and we've spent no time on the cpu. */ + /* We haven't released our resource limit hint, and we've spent no time on the cpu. */ this->resource_limit_release_hint = 0; this->cpu_time = 0; @@ -251,7 +251,49 @@ namespace ams::kern { } void KThread::Finalize() { - MESOSPHERE_UNIMPLEMENTED(); + MESOSPHERE_ASSERT_THIS(); + + /* If the thread has an owner process, unregister it. */ + if (this->parent != nullptr) { + this->parent->UnregisterThread(this); + } + + /* If the thread has a local region, delete it. */ + if (this->tls_address != Null) { + MESOSPHERE_R_ABORT_UNLESS(this->parent->DeleteThreadLocalRegion(this->tls_address)); + } + + /* Release any waiters. */ + { + MESOSPHERE_ASSERT(this->lock_owner == nullptr); + KScopedSchedulerLock sl; + + auto it = this->waiter_list.begin(); + while (it != this->waiter_list.end()) { + /* The thread shouldn't be a kernel waiter. */ + MESOSPHERE_ASSERT(!IsKernelAddressKey(it->GetAddressKey())); + it->SetLockOwner(nullptr); + it->SetSyncedObject(nullptr, svc::ResultInvalidState()); + it->Wakeup(); + it = this->waiter_list.erase(it); + } + } + + /* Finalize the thread context. */ + this->thread_context.Finalize(); + + /* Cleanup the kernel stack. */ + if (this->kernel_stack_top != nullptr) { + CleanupKernelStack(reinterpret_cast(this->kernel_stack_top)); + } + + /* Decrement the parent process's thread count. */ + if (this->parent != nullptr) { + this->parent->DecrementThreadCount(); + } + + /* Perform inherited finalization. */ + KAutoObjectWithSlabHeapAndContainer::Finalize(); } bool KThread::IsSignaled() const { @@ -278,8 +320,167 @@ namespace ams::kern { this->Wakeup(); } + void KThread::StartTermination() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Release user exception and unpin, if relevant. */ + if (this->parent != nullptr) { + this->parent->ReleaseUserException(this); + if (this->parent->GetPinnedThread(GetCurrentCoreId()) == this) { + KScheduler::UnpinCurrentThread(this->parent); + } + } + + /* Set state to terminated. */ + this->SetState(KThread::ThreadState_Terminated); + + /* Clear the thread's status as running in parent. */ + if (this->parent != nullptr) { + this->parent->ClearRunningThread(this); + } + + /* Signal. */ + this->signaled = true; + this->NotifyAvailable(); + + /* Call the on thread termination handler. */ + KThreadContext::OnThreadTerminating(this); + + /* Clear previous thread in KScheduler. */ + KScheduler::ClearPreviousThread(this); + + /* Register terminated dpc flag. */ + this->RegisterDpc(DpcFlag_Terminated); + } + + void KThread::FinishTermination() { + MESOSPHERE_ASSERT_THIS(); + + /* Ensure that the thread is not executing on any core. */ + if (this->parent != nullptr) { + for (size_t i = 0; i < cpu::NumCores; ++i) { + KThread *core_thread; + do { + core_thread = Kernel::GetCurrentContext(i).current_thread.load(std::memory_order_acquire); + } while (core_thread == this); + } + } + + /* Close the thread. */ + this->Close(); + } + void KThread::DoWorkerTask() { - MESOSPHERE_UNIMPLEMENTED(); + /* Finish the termination that was begun by Exit(). */ + this->FinishTermination(); + } + + void KThread::Pin() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Set ourselves as pinned. */ + this->GetStackParameters().is_pinned = true; + + /* Disable core migration. */ + MESOSPHERE_ASSERT(this->num_core_migration_disables == 0); + { + ++this->num_core_migration_disables; + + /* Save our ideal state to restore when we're unpinned. */ + this->original_ideal_core_id = this->ideal_core_id; + this->original_affinity_mask = this->affinity_mask; + + /* Bind ourselves to this core. */ + const s32 active_core = this->GetActiveCore(); + const s32 current_core = GetCurrentCoreId(); + + this->SetActiveCore(current_core); + this->ideal_core_id = current_core; + + this->affinity_mask.SetAffinityMask(1ul << current_core); + + if (active_core != current_core || this->affinity_mask.GetAffinityMask() != this->original_affinity_mask.GetAffinityMask()) { + KScheduler::OnThreadAffinityMaskChanged(this, this->original_affinity_mask, active_core); + } + } + + /* Disallow performing thread suspension. */ + { + /* Update our allow flags. */ + this->suspend_allowed_flags &= ~(1 << (SuspendType_Thread + ThreadState_SuspendShift)); + + /* Update our state. */ + const ThreadState old_state = this->thread_state; + this->thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); + if (this->thread_state != old_state) { + KScheduler::OnThreadStateChanged(this, old_state); + } + } + + /* Update our SVC access permissions. */ + MESOSPHERE_ASSERT(this->parent != nullptr); + this->parent->CopyPinnedSvcPermissionsTo(this->GetStackParameters()); + } + + void KThread::Unpin() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Set ourselves as unpinned. */ + this->GetStackParameters().is_pinned = false; + + /* Enable core migration. */ + MESOSPHERE_ASSERT(this->num_core_migration_disables == 1); + { + --this->num_core_migration_disables; + + /* Restore our original state. */ + const KAffinityMask old_mask = this->affinity_mask; + + this->ideal_core_id = this->original_ideal_core_id; + this->affinity_mask = this->original_affinity_mask; + + if (this->affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + const s32 active_core = this->GetActiveCore(); + + if (!this->affinity_mask.GetAffinity(active_core)) { + if (this->ideal_core_id >= 0) { + this->SetActiveCore(this->ideal_core_id); + } else { + this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->affinity_mask.GetAffinityMask())); + } + } + KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core); + } + } + + /* Allow performing thread suspension (if termination hasn't been requested). */ + { + /* Update our allow flags. */ + if (!this->IsTerminationRequested()) { + this->suspend_allowed_flags |= (1 << (SuspendType_Thread + ThreadState_SuspendShift)); + } + + /* Update our state. */ + const ThreadState old_state = this->thread_state; + this->thread_state = static_cast(this->GetSuspendFlags() | (old_state & ThreadState_Mask)); + if (this->thread_state != old_state) { + KScheduler::OnThreadStateChanged(this, old_state); + } + } + + /* Update our SVC access permissions. */ + MESOSPHERE_ASSERT(this->parent != nullptr); + this->parent->CopyUnpinnedSvcPermissionsTo(this->GetStackParameters()); + + /* Resume any threads that began waiting on us while we were pinned. */ + for (auto it = this->pinned_waiter_list.begin(); it != this->pinned_waiter_list.end(); ++it) { + if (it->GetState() == ThreadState_Waiting) { + it->SetState(ThreadState_Runnable); + } + } } void KThread::DisableCoreMigration() { @@ -293,7 +494,7 @@ namespace ams::kern { this->original_ideal_core_id = this->ideal_core_id; this->original_affinity_mask = this->affinity_mask; - /* Bind outselves to this core. */ + /* Bind ourselves to this core. */ const s32 active_core = this->GetActiveCore(); this->ideal_core_id = active_core; this->affinity_mask.SetAffinityMask(1ul << active_core); @@ -315,7 +516,7 @@ namespace ams::kern { /* Restore our ideals. */ this->ideal_core_id = this->original_ideal_core_id; - this->original_affinity_mask = this->affinity_mask; + this->affinity_mask = this->original_affinity_mask; if (this->affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { const s32 active_core = this->GetActiveCore(); @@ -332,6 +533,144 @@ namespace ams::kern { } } + Result KThread::GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask) { + MESOSPHERE_ASSERT_THIS(); + { + KScopedSchedulerLock sl; + MESOSPHERE_ASSERT(this->num_core_migration_disables >= 0); + + /* Select between core mask and original core mask. */ + if (this->num_core_migration_disables == 0) { + *out_ideal_core = this->ideal_core_id; + *out_affinity_mask = this->affinity_mask.GetAffinityMask(); + } else { + *out_ideal_core = this->original_ideal_core_id; + *out_affinity_mask = this->original_affinity_mask.GetAffinityMask(); + } + } + + return ResultSuccess(); + } + + Result KThread::SetCoreMask(int32_t ideal_core, u64 affinity_mask) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this->parent != nullptr); + MESOSPHERE_ASSERT(affinity_mask != 0); + KScopedLightLock lk(this->activity_pause_lock); + + /* Set the core mask. */ + { + KScopedSchedulerLock sl; + MESOSPHERE_ASSERT(this->num_core_migration_disables >= 0); + + /* If the core id is no-update magic, preserve the ideal core id. */ + if (ideal_core == ams::svc::IdealCoreNoUpdate) { + if (this->num_core_migration_disables == 0) { + ideal_core = this->ideal_core_id; + } else { + ideal_core = this->original_ideal_core_id; + } + + R_UNLESS(((1ul << ideal_core) & affinity_mask) != 0, svc::ResultInvalidCombination()); + } + + /* If we haven't disabled migration, perform an affinity change. */ + if (this->num_core_migration_disables == 0) { + const KAffinityMask old_mask = this->affinity_mask; + + /* Set our new ideals. */ + this->ideal_core_id = ideal_core; + this->affinity_mask.SetAffinityMask(affinity_mask); + + if (this->affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) { + const s32 active_core = this->GetActiveCore(); + + if (active_core >= 0) { + if (!this->affinity_mask.GetAffinity(active_core)) { + this->SetActiveCore(this->ideal_core_id); + } else { + this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(this->affinity_mask.GetAffinityMask())); + } + } + KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core); + } + } else { + /* Otherwise, we edit the original affinity for restoration later. */ + this->original_ideal_core_id = ideal_core; + this->original_affinity_mask.SetAffinityMask(affinity_mask); + } + } + + /* Update the pinned waiter list. */ + { + bool retry_update; + bool thread_is_pinned = false; + do { + /* Lock the scheduler. */ + KScopedSchedulerLock sl; + + /* Don't do any further management if our termination has been requested. */ + R_SUCCEED_IF(this->IsTerminationRequested()); + + /* By default, we won't need to retry. */ + retry_update = false; + + /* Check if the thread is currently running. */ + bool thread_is_current = false; + s32 thread_core; + for (thread_core = 0; thread_core < static_cast(cpu::NumCores); ++thread_core) { + if (Kernel::GetCurrentContext(thread_core).current_thread == this) { + thread_is_current = true; + break; + } + } + + /* If the thread is currently running, check whether it's no longer allowed under the new mask. */ + if (thread_is_current && ((1ul << thread_core) & affinity_mask) == 0) { + /* If the thread is pinned, we want to wait until it's not pinned. */ + if (this->GetStackParameters().is_pinned) { + /* Verify that the current thread isn't terminating. */ + R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested()); + + /* Note that the thread was pinned. */ + thread_is_pinned = true; + + /* Wait until the thread isn't pinned any more. */ + this->pinned_waiter_list.push_back(GetCurrentThread()); + GetCurrentThread().SetState(ThreadState_Waiting); + } else { + /* If the thread isn't pinned, release the scheduler lock and retry until it's not current. */ + retry_update = true; + } + } + } while (retry_update); + + /* If the thread was pinned, it no longer is, and we should remove the current thread from our waiter list. */ + if (thread_is_pinned) { + /* Lock the scheduler. */ + KScopedSchedulerLock sl; + + /* Remove from the list. */ + this->pinned_waiter_list.erase(this->pinned_waiter_list.iterator_to(GetCurrentThread())); + } + } + + return ResultSuccess(); + } + + void KThread::SetBasePriority(s32 priority) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(ams::svc::HighestThreadPriority <= priority && priority <= ams::svc::LowestThreadPriority); + + KScopedSchedulerLock sl; + + /* Change our base priority. */ + this->base_priority = priority; + + /* Perform a priority restoration. */ + RestorePriority(this); + } + Result KThread::SetPriorityToIdle() { MESOSPHERE_ASSERT_THIS(); @@ -374,10 +713,31 @@ namespace ams::kern { } } + void KThread::WaitCancel() { + MESOSPHERE_ASSERT_THIS(); + + KScopedSchedulerLock sl; + + /* Check if we're waiting and cancellable. */ + if (this->GetState() == ThreadState_Waiting && this->cancellable) { + if (this->sleeping_queue != nullptr) { + this->sleeping_queue->WakeupThread(this); + this->wait_cancelled = true; + } else { + this->SetSyncedObject(nullptr, svc::ResultCancelled()); + this->SetState(ThreadState_Runnable); + this->wait_cancelled = false; + } + } else { + /* Otherwise, note that we cancelled a wait. */ + this->wait_cancelled = true; + } + } + void KThread::TrySuspend() { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); - MESOSPHERE_ASSERT(this->IsSuspended()); + MESOSPHERE_ASSERT(this->IsSuspendRequested()); /* Ensure that we have no waiters. */ if (this->GetNumKernelWaiters() > 0) { @@ -392,7 +752,7 @@ namespace ams::kern { void KThread::Suspend() { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); - MESOSPHERE_ASSERT(this->IsSuspended()); + MESOSPHERE_ASSERT(this->IsSuspendRequested()); /* Set our suspend flags in state. */ const auto old_state = this->thread_state; @@ -414,6 +774,108 @@ namespace ams::kern { KScheduler::OnThreadStateChanged(this, old_state); } + Result KThread::SetActivity(ams::svc::ThreadActivity activity) { + /* Lock ourselves. */ + KScopedLightLock lk(this->activity_pause_lock); + + /* Set the activity. */ + { + /* Lock the scheduler. */ + KScopedSchedulerLock sl; + + /* Verify our state. */ + const auto cur_state = this->GetState(); + R_UNLESS((cur_state == ThreadState_Waiting || cur_state == ThreadState_Runnable), svc::ResultInvalidState()); + + /* Either pause or resume. */ + if (activity == ams::svc::ThreadActivity_Paused) { + /* Verify that we're not suspended. */ + R_UNLESS(!this->IsSuspendRequested(SuspendType_Thread), svc::ResultInvalidState()); + + /* Suspend. */ + this->RequestSuspend(SuspendType_Thread); + } else { + MESOSPHERE_ASSERT(activity == ams::svc::ThreadActivity_Runnable); + + /* Verify that we're suspended. */ + R_UNLESS(this->IsSuspendRequested(SuspendType_Thread), svc::ResultInvalidState()); + + /* Resume. */ + this->Resume(SuspendType_Thread); + } + } + + /* If the thread is now paused, update the pinned waiter list. */ + if (activity == ams::svc::ThreadActivity_Paused) { + bool thread_is_pinned = false; + bool thread_is_current; + do { + /* Lock the scheduler. */ + KScopedSchedulerLock sl; + + /* Don't do any further management if our termination has been requested. */ + R_SUCCEED_IF(this->IsTerminationRequested()); + + /* Check whether the thread is pinned. */ + if (this->GetStackParameters().is_pinned) { + /* Verify that the current thread isn't terminating. */ + R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested()); + + /* Note that the thread was pinned and not current. */ + thread_is_pinned = true; + thread_is_current = false; + + /* Wait until the thread isn't pinned any more. */ + this->pinned_waiter_list.push_back(GetCurrentThread()); + GetCurrentThread().SetState(ThreadState_Waiting); + } else { + /* Check if the thread is currently running. */ + /* If it is, we'll need to retry. */ + thread_is_current = false; + + for (auto i = 0; i < static_cast(cpu::NumCores); ++i) { + if (Kernel::GetCurrentContext(i).current_thread == this) { + thread_is_current = true; + break; + } + } + } + } while (thread_is_current); + + /* If the thread was pinned, it no longer is, and we should remove the current thread from our waiter list. */ + if (thread_is_pinned) { + /* Lock the scheduler. */ + KScopedSchedulerLock sl; + + /* Remove from the list. */ + this->pinned_waiter_list.erase(this->pinned_waiter_list.iterator_to(GetCurrentThread())); + } + } + + return ResultSuccess(); + } + + Result KThread::GetThreadContext3(ams::svc::ThreadContext *out) { + /* Lock ourselves. */ + KScopedLightLock lk(this->activity_pause_lock); + + /* Get the context. */ + { + /* Lock the scheduler. */ + KScopedSchedulerLock sl; + + /* Verify that we're suspended. */ + R_UNLESS(this->IsSuspendRequested(SuspendType_Thread), svc::ResultInvalidState()); + + /* If we're not terminating, get the thread's user context. */ + if (!this->IsTerminationRequested()) { + GetUserContext(out, this); + } + } + + return ResultSuccess(); + } + void KThread::AddWaiterImpl(KThread *thread) { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); @@ -467,8 +929,8 @@ namespace ams::kern { } /* Ensure we don't violate condition variable red black tree invariants. */ - if (auto *cond_var = thread->GetConditionVariable(); cond_var != nullptr) { - cond_var->BeforeUpdatePriority(thread); + if (auto *cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { + BeforeUpdatePriority(cv_tree, thread); } /* Change the priority. */ @@ -476,8 +938,8 @@ namespace ams::kern { thread->SetPriority(new_priority); /* Restore the condition variable, if relevant. */ - if (auto *cond_var = thread->GetConditionVariable(); cond_var != nullptr) { - cond_var->AfterUpdatePriority(thread); + if (auto *cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) { + AfterUpdatePriority(cv_tree, thread); } /* Update the scheduler. */ @@ -586,11 +1048,132 @@ namespace ams::kern { void KThread::Exit() { MESOSPHERE_ASSERT_THIS(); - MESOSPHERE_UNIMPLEMENTED(); + MESOSPHERE_ASSERT(this == GetCurrentThreadPointer()); + + /* Call the debug callback. */ + KDebug::OnExitThread(this); + + /* Release the thread resource hint from parent. */ + if (this->parent != nullptr) { + this->parent->ReleaseResource(ams::svc::LimitableResource_ThreadCountMax, 0, 1); + this->resource_limit_release_hint = true; + } + + /* Perform termination. */ + { + KScopedSchedulerLock sl; + + /* Disallow all suspension. */ + this->suspend_allowed_flags = 0; + + /* Start termination. */ + this->StartTermination(); + + /* Register the thread as a work task. */ + KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this); + } MESOSPHERE_PANIC("KThread::Exit() would return"); } + void KThread::Terminate() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this != GetCurrentThreadPointer()); + + /* Request the thread terminate. */ + if (const auto new_state = this->RequestTerminate(); new_state != ThreadState_Terminated) { + /* If the thread isn't terminated, wait for it to terminate. */ + s32 index; + KSynchronizationObject *objects[] = { this }; + Kernel::GetSynchronization().Wait(std::addressof(index), objects, 1, ams::svc::WaitInfinite); + } + } + + KThread::ThreadState KThread::RequestTerminate() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this != GetCurrentThreadPointer()); + + KScopedSchedulerLock sl; + + /* Determine if this is the first termination request. */ + const bool first_request = [&] ALWAYS_INLINE_LAMBDA () -> bool { + /* Perform an atomic compare-and-swap from false to true. */ + bool expected = false; + do { + if (expected) { + return false; + } + } while (!this->termination_requested.compare_exchange_weak(expected, true)); + + return true; + }(); + + /* If this is the first request, start termination procedure. */ + if (first_request) { + /* If the thread is in initialized state, just change state to terminated. */ + if (this->GetState() == ThreadState_Initialized) { + this->thread_state = ThreadState_Terminated; + return ThreadState_Terminated; + } + + /* Register the terminating dpc. */ + this->RegisterDpc(DpcFlag_Terminating); + + /* If the thread is suspended, continue it. */ + if (this->IsSuspended()) { + this->suspend_allowed_flags = 0; + this->Continue(); + } + + /* Change the thread's priority to be higher than any system thread's. */ + if (this->GetBasePriority() >= ams::svc::SystemThreadPriorityHighest) { + this->SetBasePriority(ams::svc::SystemThreadPriorityHighest - 1); + } + + /* If the thread is runnable, send a termination interrupt to other cores. */ + if (this->GetState() == ThreadState_Runnable) { + if (const u64 core_mask = this->affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) { + Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_ThreadTerminate, core_mask); + } + } + + /* Wake up the thread. */ + this->SetSyncedObject(nullptr, svc::ResultTerminationRequested()); + this->Wakeup(); + } + + return this->GetState(); + } + + Result KThread::Sleep(s64 timeout) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread()); + MESOSPHERE_ASSERT(this == GetCurrentThreadPointer()); + MESOSPHERE_ASSERT(timeout > 0); + + KHardwareTimer *timer; + { + /* Setup the scheduling lock and sleep. */ + KScopedSchedulerLockAndSleep slp(std::addressof(timer), this, timeout); + + /* Check if the thread should terminate. */ + if (this->IsTerminationRequested()) { + slp.CancelSleep(); + return svc::ResultTerminationRequested(); + } + + /* Mark the thread as waiting. */ + this->SetState(KThread::ThreadState_Waiting); + } + + /* The lock/sleep is done. */ + + /* Cancel the timer. */ + timer->CancelTask(this); + + return ResultSuccess(); + } + void KThread::SetState(ThreadState state) { MESOSPHERE_ASSERT_THIS(); @@ -607,4 +1190,66 @@ namespace ams::kern { return std::addressof(this->GetContext()); } + KThread *KThread::GetThreadFromId(u64 thread_id) { + /* Lock the list. */ + KThread::ListAccessor accessor; + const auto end = accessor.end(); + + /* Define helper object to find the thread. */ + class IdObjectHelper : public KAutoObjectWithListContainer::ListType::value_type { + private: + u64 id; + public: + constexpr explicit IdObjectHelper(u64 id) : id(id) { /* ... */ } + virtual u64 GetId() const override { return this->id; } + }; + + /* Find the object with the right id. */ + const auto it = accessor.find(IdObjectHelper(thread_id)); + + /* Check to make sure we found the thread. */ + if (it == end) { + return nullptr; + } + + /* Get the thread. */ + KThread *thread = static_cast(std::addressof(*it)); + + /* Open the thread. */ + if (AMS_LIKELY(thread->Open())) { + MESOSPHERE_ASSERT(thread->GetId() == thread_id); + return thread; + } + + /* We failed to find the thread. */ + return nullptr; + } + + Result KThread::GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer out_thread_ids, s32 max_out_count) { + /* Lock the list. */ + KThread::ListAccessor accessor; + const auto end = accessor.end(); + + /* Iterate over the list. */ + s32 count = 0; + for (auto it = accessor.begin(); it != end; ++it) { + /* If we're within array bounds, write the id. */ + if (count < max_out_count) { + /* Get the thread id. */ + KThread *thread = static_cast(std::addressof(*it)); + const u64 id = thread->GetId(); + + /* Copy the id to userland. */ + R_TRY(out_thread_ids.CopyArrayElementFrom(std::addressof(id), count)); + } + + /* Increment the count. */ + ++count; + } + + /* We successfully iterated the list. */ + *out_num_threads = count; + return ResultSuccess(); + } + } diff --git a/libraries/libmesosphere/source/kern_k_transfer_memory.cpp b/libraries/libmesosphere/source/kern_k_transfer_memory.cpp new file mode 100644 index 000000000..39a3e197d --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_transfer_memory.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + Result KTransferMemory::Initialize(KProcessAddress addr, size_t size, ams::svc::MemoryPermission own_perm) { + MESOSPHERE_ASSERT_THIS(); + + /* Set members. */ + this->owner = GetCurrentProcessPointer(); + + /* Initialize the page group. */ + auto &page_table = this->owner->GetPageTable(); + new (GetPointer(this->page_group)) KPageGroup(page_table.GetBlockInfoManager()); + + /* Ensure that our page group's state is valid on exit. */ + auto pg_guard = SCOPE_GUARD { GetReference(this->page_group).~KPageGroup(); }; + + /* Lock the memory. */ + R_TRY(page_table.LockForTransferMemory(GetPointer(this->page_group), addr, size, ConvertToKMemoryPermission(own_perm))); + + /* Set remaining tracking members. */ + this->owner->Open(); + this->owner_perm = own_perm; + this->address = addr; + this->is_initialized = true; + this->is_mapped = false; + + /* We succeeded. */ + pg_guard.Cancel(); + return ResultSuccess(); + } + + void KTransferMemory::Finalize() { + MESOSPHERE_ASSERT_THIS(); + + /* Unlock. */ + if (!this->is_mapped) { + const size_t size = GetReference(this->page_group).GetNumPages() * PageSize; + MESOSPHERE_R_ABORT_UNLESS(this->owner->GetPageTable().UnlockForTransferMemory(this->address, size, GetReference(this->page_group))); + } + + /* Close the page group. */ + GetReference(this->page_group).Close(); + GetReference(this->page_group).Finalize(); + + /* Perform inherited finalization. */ + KAutoObjectWithSlabHeapAndContainer::Finalize(); + } + + void KTransferMemory::PostDestroy(uintptr_t arg) { + KProcess *owner = reinterpret_cast(arg); + owner->ReleaseResource(ams::svc::LimitableResource_TransferMemoryCountMax, 1); + owner->Close(); + } + + Result KTransferMemory::Map(KProcessAddress address, size_t size, ams::svc::MemoryPermission map_perm) { + MESOSPHERE_ASSERT_THIS(); + + /* Validate the size. */ + R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + + /* Validate the permission. */ + R_UNLESS(this->owner_perm == map_perm, svc::ResultInvalidState()); + + /* Lock ourselves. */ + KScopedLightLock lk(this->lock); + + /* Ensure we're not already mapped. */ + R_UNLESS(!this->is_mapped, svc::ResultInvalidState()); + + /* Map the memory. */ + const KMemoryState state = (this->owner_perm == ams::svc::MemoryPermission_None) ? KMemoryState_Transfered : KMemoryState_SharedTransfered; + R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(this->page_group), state, KMemoryPermission_UserReadWrite)); + + /* Mark ourselves as mapped. */ + this->is_mapped = true; + + return ResultSuccess(); + } + + Result KTransferMemory::Unmap(KProcessAddress address, size_t size) { + MESOSPHERE_ASSERT_THIS(); + + /* Validate the size. */ + R_UNLESS(GetReference(this->page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize()); + + /* Lock ourselves. */ + KScopedLightLock lk(this->lock); + + /* Unmap the memory. */ + const KMemoryState state = (this->owner_perm == ams::svc::MemoryPermission_None) ? KMemoryState_Transfered : KMemoryState_SharedTransfered; + R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(this->page_group), state)); + + /* Mark ourselves as unmapped. */ + MESOSPHERE_ASSERT(this->is_mapped); + this->is_mapped = false; + + return ResultSuccess(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_wait_object.cpp b/libraries/libmesosphere/source/kern_k_wait_object.cpp index dc7ef1f13..6055cf17e 100644 --- a/libraries/libmesosphere/source/kern_k_wait_object.cpp +++ b/libraries/libmesosphere/source/kern_k_wait_object.cpp @@ -18,7 +18,86 @@ namespace ams::kern { void KWaitObject::OnTimer() { - MESOSPHERE_UNIMPLEMENTED(); + MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread()); + + /* Wake up all the waiting threads. */ + Entry *entry = std::addressof(this->root); + while (true) { + /* Get the next thread. */ + KThread *thread = entry->GetNext(); + if (thread == nullptr) { + break; + } + + /* Wake it up. */ + thread->Wakeup(); + + /* Advance. */ + entry = std::addressof(thread->GetSleepingQueueEntry()); + } + } + + Result KWaitObject::Synchronize(s64 timeout) { + /* Perform the wait. */ + KHardwareTimer *timer = nullptr; + KThread *cur_thread = GetCurrentThreadPointer(); + { + KScopedSchedulerLock sl; + + /* Check that the thread isn't terminating. */ + R_UNLESS(!cur_thread->IsTerminationRequested(), svc::ResultTerminationRequested()); + + /* Verify that nothing else is already waiting on the object. */ + if (timeout > 0) { + R_UNLESS(!this->timer_used, svc::ResultBusy()); + } + + /* Check that we're not already in use. */ + if (timeout >= 0) { + /* Verify the timer isn't already in use. */ + R_UNLESS(!this->timer_used, svc::ResultBusy()); + } + + /* If we need to, register our timeout. */ + if (timeout > 0) { + /* Mark that we're using the timer. */ + this->timer_used = true; + + /* Use the timer. */ + timer = std::addressof(Kernel::GetHardwareTimer()); + timer->RegisterAbsoluteTask(this, timeout); + } + + if (timeout == 0) { + /* If we're timed out immediately, just wake up the thread. */ + this->OnTimer(); + } else { + /* Otherwise, sleep until the timeout occurs. */ + this->Enqueue(cur_thread); + cur_thread->SetState(KThread::ThreadState_Waiting); + cur_thread->SetSyncedObject(nullptr, svc::ResultTimedOut()); + } + } + + /* Cleanup as necessary. */ + { + KScopedSchedulerLock sl; + + /* Remove from the timer. */ + if (timeout > 0) { + MESOSPHERE_ASSERT(this->timer_used); + MESOSPHERE_ASSERT(timer != nullptr); + timer->CancelTask(this); + this->timer_used = false; + } + + /* Remove the thread from our queue. */ + if (timeout != 0) { + this->Remove(cur_thread); + } + } + + return ResultSuccess(); } } diff --git a/libraries/libmesosphere/source/kern_k_writable_event.cpp b/libraries/libmesosphere/source/kern_k_writable_event.cpp new file mode 100644 index 000000000..76950456a --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_writable_event.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KWritableEvent::Initialize(KEvent *p) { + /* Set parent, open a reference to the readable event. */ + this->parent = p; + this->parent->GetReadableEvent().Open(); + } + + Result KWritableEvent::Signal() { + return this->parent->GetReadableEvent().Signal(); + } + + Result KWritableEvent::Clear() { + return this->parent->GetReadableEvent().Clear(); + } + + void KWritableEvent::Destroy() { + /* Close our references. */ + this->parent->GetReadableEvent().Close(); + this->parent->Close(); + } + +} diff --git a/libraries/libmesosphere/source/kern_kernel.cpp b/libraries/libmesosphere/source/kern_kernel.cpp index 5c1c5a676..f9aa6f747 100644 --- a/libraries/libmesosphere/source/kern_kernel.cpp +++ b/libraries/libmesosphere/source/kern_kernel.cpp @@ -116,11 +116,13 @@ namespace ams::kern { } void Kernel::PrintLayout() { + const auto target_fw = kern::GetTargetFirmware(); + /* Print out the kernel version. */ - /* TODO: target firmware, if we support that? */ MESOSPHERE_LOG("Horizon Kernel (Mesosphere)\n"); MESOSPHERE_LOG("Built: %s %s\n", __DATE__, __TIME__); MESOSPHERE_LOG("Atmosphere version: %d.%d.%d-%s\n", ATMOSPHERE_RELEASE_VERSION, ATMOSPHERE_GIT_REVISION); + MESOSPHERE_LOG("Target Firmware: %d.%d.%d\n", (target_fw >> 24) & 0xFF, (target_fw >> 16) & 0xFF, (target_fw >> 8) & 0xFF); MESOSPHERE_LOG("Supported OS version: %d.%d.%d\n", ATMOSPHERE_SUPPORTED_HOS_VERSION_MAJOR, ATMOSPHERE_SUPPORTED_HOS_VERSION_MINOR, ATMOSPHERE_SUPPORTED_HOS_VERSION_MICRO); MESOSPHERE_LOG("\n"); diff --git a/libraries/libmesosphere/source/kern_panic.cpp b/libraries/libmesosphere/source/kern_panic.cpp index a60e88093..fc4322e7d 100644 --- a/libraries/libmesosphere/source/kern_panic.cpp +++ b/libraries/libmesosphere/source/kern_panic.cpp @@ -120,6 +120,9 @@ namespace ams::kern { ::std::va_list vl; va_start(vl, format); MESOSPHERE_RELEASE_LOG("Core[%d]: Kernel Panic at %s:%d\n", GetCurrentCoreId(), file, line); + if (KProcess *cur_process = GetCurrentProcessPointer(); cur_process != nullptr) { + MESOSPHERE_RELEASE_LOG("Core[%d]: Current Process: %s\n", GetCurrentCoreId(), cur_process->GetName()); + } MESOSPHERE_RELEASE_VLOG(format, vl); MESOSPHERE_RELEASE_LOG("\n"); va_end(vl); diff --git a/libraries/libmesosphere/source/svc/kern_svc_activity.cpp b/libraries/libmesosphere/source/svc/kern_svc_activity.cpp index 68a9e5e0a..fe1ba168f 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_activity.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_activity.cpp @@ -21,28 +21,81 @@ namespace ams::kern::svc { namespace { + constexpr bool IsValidThreadActivity(ams::svc::ThreadActivity thread_activity) { + switch (thread_activity) { + case ams::svc::ThreadActivity_Runnable: + case ams::svc::ThreadActivity_Paused: + return true; + default: + return false; + } + } + constexpr bool IsValidProcessActivity(ams::svc::ProcessActivity process_activity) { + switch (process_activity) { + case ams::svc::ProcessActivity_Runnable: + case ams::svc::ProcessActivity_Paused: + return true; + default: + return false; + } + } + + Result SetThreadActivity(ams::svc::Handle thread_handle, ams::svc::ThreadActivity thread_activity) { + /* Validate the activity. */ + R_UNLESS(IsValidThreadActivity(thread_activity), svc::ResultInvalidEnumValue()); + + /* Get the thread from its handle. */ + KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject(thread_handle); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle()); + + /* Check that the activity is being set on a non-current thread for the current process. */ + R_UNLESS(thread->GetOwnerProcess() == GetCurrentProcessPointer(), svc::ResultInvalidHandle()); + R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(), svc::ResultBusy()); + + /* Set the activity. */ + R_TRY(thread->SetActivity(thread_activity)); + + return ResultSuccess(); + } + + Result SetProcessActivity(ams::svc::Handle process_handle, ams::svc::ProcessActivity process_activity) { + /* Validate the activity. */ + R_UNLESS(IsValidProcessActivity(process_activity), svc::ResultInvalidEnumValue()); + + /* Get the process from its handle. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Check that the activity isn't being set on the current process. */ + R_UNLESS(process.GetPointerUnsafe() != GetCurrentProcessPointer(), svc::ResultBusy()); + + /* Set the activity. */ + R_TRY(process->SetActivity(process_activity)); + + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ Result SetThreadActivity64(ams::svc::Handle thread_handle, ams::svc::ThreadActivity thread_activity) { - MESOSPHERE_PANIC("Stubbed SvcSetThreadActivity64 was called."); + return SetThreadActivity(thread_handle, thread_activity); } Result SetProcessActivity64(ams::svc::Handle process_handle, ams::svc::ProcessActivity process_activity) { - MESOSPHERE_PANIC("Stubbed SvcSetProcessActivity64 was called."); + return SetProcessActivity(process_handle, process_activity); } /* ============================= 64From32 ABI ============================= */ Result SetThreadActivity64From32(ams::svc::Handle thread_handle, ams::svc::ThreadActivity thread_activity) { - MESOSPHERE_PANIC("Stubbed SvcSetThreadActivity64From32 was called."); + return SetThreadActivity(thread_handle, thread_activity); } Result SetProcessActivity64From32(ams::svc::Handle process_handle, ams::svc::ProcessActivity process_activity) { - MESOSPHERE_PANIC("Stubbed SvcSetProcessActivity64From32 was called."); + return SetProcessActivity(process_handle, process_activity); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_address_arbiter.cpp b/libraries/libmesosphere/source/svc/kern_svc_address_arbiter.cpp index 3943f0421..0fdb4780e 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_address_arbiter.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_address_arbiter.cpp @@ -21,28 +21,86 @@ namespace ams::kern::svc { namespace { + constexpr bool IsKernelAddress(uintptr_t address) { + return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd; + } + constexpr bool IsValidSignalType(ams::svc::SignalType type) { + switch (type) { + case ams::svc::SignalType_Signal: + case ams::svc::SignalType_SignalAndIncrementIfEqual: + case ams::svc::SignalType_SignalAndModifyByWaitingCountIfEqual: + return true; + default: + return false; + } + } + + constexpr bool IsValidArbitrationType(ams::svc::ArbitrationType type) { + switch (type) { + case ams::svc::ArbitrationType_WaitIfLessThan: + case ams::svc::ArbitrationType_DecrementAndWaitIfLessThan: + case ams::svc::ArbitrationType_WaitIfEqual: + return true; + default: + return false; + } + } + + Result WaitForAddress(uintptr_t address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) { + /* Validate input. */ + R_UNLESS(AMS_LIKELY(!IsKernelAddress(address)), svc::ResultInvalidCurrentMemory()); + R_UNLESS(util::IsAligned(address, sizeof(int32_t)), svc::ResultInvalidAddress()); + R_UNLESS(IsValidArbitrationType(arb_type), svc::ResultInvalidEnumValue()); + + /* Convert timeout from nanoseconds to ticks. */ + s64 timeout; + if (timeout_ns > 0) { + const ams::svc::Tick offset_tick(TimeSpan::FromNanoSeconds(timeout_ns)); + if (AMS_LIKELY(offset_tick > 0)) { + timeout = KHardwareTimer::GetTick() + offset_tick + 2; + if (AMS_UNLIKELY(timeout <= 0)) { + timeout = std::numeric_limits::max(); + } + } else { + timeout = std::numeric_limits::max(); + } + } else { + timeout = timeout_ns; + } + + return GetCurrentProcess().WaitAddressArbiter(address, arb_type, value, timeout); + } + + Result SignalToAddress(uintptr_t address, ams::svc::SignalType signal_type, int32_t value, int32_t count) { + /* Validate input. */ + R_UNLESS(AMS_LIKELY(!IsKernelAddress(address)), svc::ResultInvalidCurrentMemory()); + R_UNLESS(util::IsAligned(address, sizeof(int32_t)), svc::ResultInvalidAddress()); + R_UNLESS(IsValidSignalType(signal_type), svc::ResultInvalidEnumValue()); + + return GetCurrentProcess().SignalAddressArbiter(address, signal_type, value, count); + } } /* ============================= 64 ABI ============================= */ Result WaitForAddress64(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) { - MESOSPHERE_PANIC("Stubbed SvcWaitForAddress64 was called."); + return WaitForAddress(address, arb_type, value, timeout_ns); } Result SignalToAddress64(ams::svc::Address address, ams::svc::SignalType signal_type, int32_t value, int32_t count) { - MESOSPHERE_PANIC("Stubbed SvcSignalToAddress64 was called."); + return SignalToAddress(address, signal_type, value, count); } /* ============================= 64From32 ABI ============================= */ Result WaitForAddress64From32(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int32_t value, int64_t timeout_ns) { - MESOSPHERE_PANIC("Stubbed SvcWaitForAddress64From32 was called."); + return WaitForAddress(address, arb_type, value, timeout_ns); } Result SignalToAddress64From32(ams::svc::Address address, ams::svc::SignalType signal_type, int32_t value, int32_t count) { - MESOSPHERE_PANIC("Stubbed SvcSignalToAddress64From32 was called."); + return SignalToAddress(address, signal_type, value, count); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_address_translation.cpp b/libraries/libmesosphere/source/svc/kern_svc_address_translation.cpp index ce2eb87ea..93b43565c 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_address_translation.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_address_translation.cpp @@ -21,28 +21,143 @@ namespace ams::kern::svc { namespace { + Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out_info, uintptr_t address) { + /* NOTE: In 10.0.0, Nintendo stubbed this SVC. Should we do so? */ + /* R_UNLESS(GetTargetFirmware() < TargetFirmware_10_0_0, svc::ResultInvalidCurrentMemory()); */ + /* Get reference to page table. */ + auto &pt = GetCurrentProcess().GetPageTable(); + + /* Check that the address is valid. */ + R_UNLESS(pt.Contains(address, 1), svc::ResultInvalidCurrentMemory()); + + /* Query the physical mapping. */ + R_TRY(pt.QueryPhysicalAddress(out_info, address)); + + return ResultSuccess(); + } + + Result QueryIoMapping(uintptr_t *out_address, size_t *out_size, uint64_t phys_addr, size_t size) { + /* Declare variables we'll populate. */ + KProcessAddress found_address = Null; + size_t found_size = 0; + + /* Get reference to page table. */ + auto &pt = GetCurrentProcess().GetPageTable(); + + /* Check whether the address is aligned. */ + const bool aligned = util::IsAligned(phys_addr, PageSize); + + auto QueryIoMappingFromPageTable = [&] ALWAYS_INLINE_LAMBDA (uint64_t phys_addr, size_t size) -> Result { + /* The size must be non-zero. */ + R_UNLESS(size > 0, svc::ResultInvalidSize()); + + /* The request must not overflow. */ + R_UNLESS((phys_addr < phys_addr + size), svc::ResultNotFound()); + + /* Query the mapping. */ + R_TRY(pt.QueryIoMapping(std::addressof(found_address), phys_addr, size)); + + /* Use the size as the found size. */ + found_size = size; + + return ResultSuccess(); + }; + + if (aligned) { + /* Query the input. */ + R_TRY(QueryIoMappingFromPageTable(phys_addr, size)); + } else { + if (kern::GetTargetFirmware() < TargetFirmware_8_0_0 && phys_addr >= PageSize) { + /* Query the aligned-down page. */ + const size_t offset = phys_addr & (PageSize - 1); + R_TRY(QueryIoMappingFromPageTable(phys_addr - offset, size + offset)); + + /* Adjust the output address. */ + found_address += offset; + } else { + /* Newer kernel only allows unaligned addresses when they're special enum members. */ + R_UNLESS(phys_addr < PageSize, svc::ResultNotFound()); + + /* Try to find the memory region. */ + const KMemoryRegion *region; + switch (static_cast(phys_addr)) { + case ams::svc::MemoryRegionType_KernelTraceBuffer: + region = KMemoryLayout::TryGetKernelTraceBufferRegion(); + break; + case ams::svc::MemoryRegionType_OnMemoryBootImage: + region = KMemoryLayout::TryGetOnMemoryBootImageRegion(); + break; + case ams::svc::MemoryRegionType_DTB: + region = KMemoryLayout::TryGetDTBRegion(); + break; + default: + region = nullptr; + break; + } + + /* Ensure that we found the region. */ + R_UNLESS(region != nullptr, svc::ResultNotFound()); + + R_TRY(pt.QueryStaticMapping(std::addressof(found_address), region->GetAddress(), region->GetSize())); + found_size = region->GetSize(); + } + } + + /* We succeeded. */ + MESOSPHERE_ASSERT(found_address != Null); + MESOSPHERE_ASSERT(found_size != 0); + if (out_address != nullptr) { + *out_address = GetInteger(found_address); + } + if (out_size != nullptr) { + *out_size = found_size; + } + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ Result QueryPhysicalAddress64(ams::svc::lp64::PhysicalMemoryInfo *out_info, ams::svc::Address address) { - MESOSPHERE_PANIC("Stubbed SvcQueryPhysicalAddress64 was called."); + return QueryPhysicalAddress(out_info, address); } - Result QueryIoMapping64(ams::svc::Address *out_address, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcQueryIoMapping64 was called."); + Result QueryIoMapping64(ams::svc::Address *out_address, ams::svc::Size *out_size, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) { + static_assert(sizeof(*out_address) == sizeof(uintptr_t)); + static_assert(sizeof(*out_size) == sizeof(size_t)); + return QueryIoMapping(reinterpret_cast(out_address), reinterpret_cast(out_size), physical_address, size); + } + + Result LegacyQueryIoMapping64(ams::svc::Address *out_address, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) { + static_assert(sizeof(*out_address) == sizeof(uintptr_t)); + return QueryIoMapping(reinterpret_cast(out_address), nullptr, physical_address, size); } /* ============================= 64From32 ABI ============================= */ Result QueryPhysicalAddress64From32(ams::svc::ilp32::PhysicalMemoryInfo *out_info, ams::svc::Address address) { - MESOSPHERE_PANIC("Stubbed SvcQueryPhysicalAddress64From32 was called."); + ams::svc::PhysicalMemoryInfo info = {}; + R_TRY(QueryPhysicalAddress(std::addressof(info), address)); + + *out_info = { + .physical_address = info.physical_address, + .virtual_address = static_cast(info.virtual_address), + .size = static_cast(info.size), + }; + return ResultSuccess(); } - Result QueryIoMapping64From32(ams::svc::Address *out_address, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcQueryIoMapping64From32 was called."); + Result QueryIoMapping64From32(ams::svc::Address *out_address, ams::svc::Size *out_size, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) { + static_assert(sizeof(*out_address) == sizeof(uintptr_t)); + static_assert(sizeof(*out_size) == sizeof(size_t)); + return QueryIoMapping(reinterpret_cast(out_address), reinterpret_cast(out_size), physical_address, size); + } + + Result LegacyQueryIoMapping64From32(ams::svc::Address *out_address, ams::svc::PhysicalAddress physical_address, ams::svc::Size size) { + static_assert(sizeof(*out_address) == sizeof(uintptr_t)); + return QueryIoMapping(reinterpret_cast(out_address), nullptr, physical_address, size); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_cache.cpp b/libraries/libmesosphere/source/svc/kern_svc_cache.cpp index 9ba6d2463..8deb3f595 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_cache.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_cache.cpp @@ -21,52 +21,198 @@ namespace ams::kern::svc { namespace { + class CacheOperation { + public: + virtual void Operate(void *address, size_t size) const = 0; + }; + Result DoProcessCacheOperation(const CacheOperation &operation, KProcessPageTable &page_table, uintptr_t address, size_t size) { + /* Determine aligned extents. */ + const uintptr_t aligned_start = util::AlignDown(address, PageSize); + const uintptr_t aligned_end = util::AlignUp(address + size, PageSize); + const size_t num_pages = (aligned_end - aligned_start) / PageSize; + + /* Create a page group for the process's memory. */ + KPageGroup pg(page_table.GetBlockInfoManager()); + + /* Make and open the page group. */ + R_TRY(page_table.MakeAndOpenPageGroup(std::addressof(pg), + aligned_start, num_pages, + KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, + KMemoryPermission_UserRead, KMemoryPermission_UserRead, + KMemoryAttribute_Uncached, KMemoryAttribute_None)); + + /* Ensure we don't leak references to the pages we're operating on. */ + ON_SCOPE_EXIT { pg.Close(); }; + + /* Operate on all the blocks. */ + uintptr_t cur_address = aligned_start; + size_t remaining = size; + for (const auto &block : pg) { + /* Get the block extents. */ + KVirtualAddress operate_address = block.GetAddress(); + size_t operate_size = block.GetSize(); + + /* Adjust to remain within range. */ + if (cur_address < address) { + operate_address += (address - cur_address); + } + if (operate_size > remaining) { + operate_size = remaining; + } + + /* Operate. */ + operation.Operate(GetVoidPointer(operate_address), operate_size); + + /* Advance. */ + cur_address += block.GetSize(); + remaining -= operate_size; + } + MESOSPHERE_ASSERT(remaining == 0); + + return ResultSuccess(); + } + + void FlushEntireDataCache() { + /* Flushing cache takes up to 1ms, so determine our minimum end tick. */ + const s64 timeout = KHardwareTimer::GetTick() + ams::svc::Tick(TimeSpan::FromMilliSeconds(1)); + + /* Flush the entire data cache. */ + cpu::FlushEntireDataCache(); + + /* Wait for 1ms to have passed. */ + while (KHardwareTimer::GetTick() < timeout) { + cpu::Yield(); + } + } + + Result FlushDataCache(uintptr_t address, size_t size) { + /* Succeed if there's nothing to do. */ + R_SUCCEED_IF(size == 0); + + /* Validate that the region is within range. */ + R_UNLESS(GetCurrentProcess().GetPageTable().Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Flush the cache. */ + R_TRY(cpu::FlushDataCache(reinterpret_cast(address), size)); + + return ResultSuccess(); + } + + Result InvalidateProcessDataCache(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { + /* Validate address/size. */ + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS(address == static_cast(address), svc::ResultInvalidCurrentMemory()); + R_UNLESS(size == static_cast(size), svc::ResultInvalidCurrentMemory()); + + /* Get the process from its handle. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Invalidate the cache. */ + R_TRY(process->GetPageTable().InvalidateProcessDataCache(address, size)); + + return ResultSuccess(); + } + + Result StoreProcessDataCache(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { + /* Validate address/size. */ + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS(address == static_cast(address), svc::ResultInvalidCurrentMemory()); + R_UNLESS(size == static_cast(size), svc::ResultInvalidCurrentMemory()); + + /* Get the process from its handle. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Verify the region is within range. */ + auto &page_table = process->GetPageTable(); + R_UNLESS(page_table.Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Perform the operation. */ + if (process.GetPointerUnsafe() == GetCurrentProcessPointer()) { + return cpu::StoreDataCache(reinterpret_cast(address), size); + } else { + class StoreCacheOperation : public CacheOperation { + public: + virtual void Operate(void *address, size_t size) const override { cpu::StoreDataCache(address, size); } + } operation; + + return DoProcessCacheOperation(operation, page_table, address, size); + } + } + + Result FlushProcessDataCache(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { + /* Validate address/size. */ + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS(address == static_cast(address), svc::ResultInvalidCurrentMemory()); + R_UNLESS(size == static_cast(size), svc::ResultInvalidCurrentMemory()); + + /* Get the process from its handle. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Verify the region is within range. */ + auto &page_table = process->GetPageTable(); + R_UNLESS(page_table.Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Perform the operation. */ + if (process.GetPointerUnsafe() == GetCurrentProcessPointer()) { + return cpu::FlushDataCache(reinterpret_cast(address), size); + } else { + class FlushCacheOperation : public CacheOperation { + public: + virtual void Operate(void *address, size_t size) const override { cpu::FlushDataCache(address, size); } + } operation; + + return DoProcessCacheOperation(operation, page_table, address, size); + } + } } /* ============================= 64 ABI ============================= */ void FlushEntireDataCache64() { - MESOSPHERE_PANIC("Stubbed SvcFlushEntireDataCache64 was called."); + return FlushEntireDataCache(); } Result FlushDataCache64(ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcFlushDataCache64 was called."); + return FlushDataCache(address, size); } Result InvalidateProcessDataCache64(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { - MESOSPHERE_PANIC("Stubbed SvcInvalidateProcessDataCache64 was called."); + return InvalidateProcessDataCache(process_handle, address, size); } Result StoreProcessDataCache64(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { - MESOSPHERE_PANIC("Stubbed SvcStoreProcessDataCache64 was called."); + return StoreProcessDataCache(process_handle, address, size); } Result FlushProcessDataCache64(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { - MESOSPHERE_PANIC("Stubbed SvcFlushProcessDataCache64 was called."); + return FlushProcessDataCache(process_handle, address, size); } /* ============================= 64From32 ABI ============================= */ void FlushEntireDataCache64From32() { - MESOSPHERE_PANIC("Stubbed SvcFlushEntireDataCache64From32 was called."); + return FlushEntireDataCache(); } Result FlushDataCache64From32(ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcFlushDataCache64From32 was called."); + return FlushDataCache(address, size); } Result InvalidateProcessDataCache64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { - MESOSPHERE_PANIC("Stubbed SvcInvalidateProcessDataCache64From32 was called."); + return InvalidateProcessDataCache(process_handle, address, size); } Result StoreProcessDataCache64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { - MESOSPHERE_PANIC("Stubbed SvcStoreProcessDataCache64From32 was called."); + return StoreProcessDataCache(process_handle, address, size); } Result FlushProcessDataCache64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size) { - MESOSPHERE_PANIC("Stubbed SvcFlushProcessDataCache64From32 was called."); + return FlushProcessDataCache(process_handle, address, size); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_code_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_code_memory.cpp index e47135698..f217950da 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_code_memory.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_code_memory.cpp @@ -21,28 +21,143 @@ namespace ams::kern::svc { namespace { + constexpr bool IsValidMapCodeMemoryPermission(ams::svc::MemoryPermission perm) { + return perm == ams::svc::MemoryPermission_ReadWrite; + } + constexpr bool IsValidMapToOwnerCodeMemoryPermission(ams::svc::MemoryPermission perm) { + return perm == ams::svc::MemoryPermission_Read || perm == ams::svc::MemoryPermission_ReadExecute; + } + + constexpr bool IsValidUnmapCodeMemoryPermission(ams::svc::MemoryPermission perm) { + return perm == ams::svc::MemoryPermission_None; + } + + constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(ams::svc::MemoryPermission perm) { + return perm == ams::svc::MemoryPermission_None; + } + + Result CreateCodeMemory(ams::svc::Handle *out, uintptr_t address, size_t size) { + /* Validate address / size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + + /* Create the code memory. */ + KCodeMemory *code_mem = KCodeMemory::Create(); + R_UNLESS(code_mem != nullptr, svc::ResultOutOfResource()); + ON_SCOPE_EXIT { code_mem->Close(); }; + + /* Verify that the region is in range. */ + R_UNLESS(GetCurrentProcess().GetPageTable().Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Initialize the code memory. */ + R_TRY(code_mem->Initialize(address, size)); + + /* Register the code memory. */ + R_TRY(KCodeMemory::Register(code_mem)); + + /* Add the code memory to the handle table. */ + R_TRY(GetCurrentProcess().GetHandleTable().Add(out, code_mem)); + + return ResultSuccess(); + } + + Result ControlCodeMemory(ams::svc::Handle code_memory_handle, ams::svc::CodeMemoryOperation operation, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) { + /* Validate the address / size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(address == static_cast(address), svc::ResultInvalidCurrentMemory()); + R_UNLESS(size == static_cast(size), svc::ResultInvalidCurrentMemory()); + + /* Get the code memory from its handle. */ + KScopedAutoObject code_mem = GetCurrentProcess().GetHandleTable().GetObject(code_memory_handle); + R_UNLESS(code_mem.IsNotNull(), svc::ResultInvalidHandle()); + + /* NOTE: Here, Atmosphere extends the SVC to allow code memory operations on one's own process. */ + /* This enables homebrew usage of these SVCs for JIT. */ + /* R_UNLESS(code_mem->GetOwner() != GetCurrentProcessPointer(), svc::ResultInvalidHandle()); */ + + /* Perform the operation. */ + switch (operation) { + case ams::svc::CodeMemoryOperation_Map: + { + /* Check that the region is in range. */ + R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_CodeOut), svc::ResultInvalidMemoryRegion()); + + /* Check the memory permission. */ + R_UNLESS(IsValidMapCodeMemoryPermission(perm), svc::ResultInvalidNewMemoryPermission()); + + /* Map the memory. */ + R_TRY(code_mem->Map(address, size)); + } + break; + case ams::svc::CodeMemoryOperation_Unmap: + { + /* Check that the region is in range. */ + R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_CodeOut), svc::ResultInvalidMemoryRegion()); + + /* Check the memory permission. */ + R_UNLESS(IsValidUnmapCodeMemoryPermission(perm), svc::ResultInvalidNewMemoryPermission()); + + /* Unmap the memory. */ + R_TRY(code_mem->Unmap(address, size)); + } + break; + case ams::svc::CodeMemoryOperation_MapToOwner: + { + /* Check that the region is in range. */ + R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_GeneratedCode), svc::ResultInvalidMemoryRegion()); + + /* Check the memory permission. */ + R_UNLESS(IsValidMapToOwnerCodeMemoryPermission(perm), svc::ResultInvalidNewMemoryPermission()); + + /* Map the memory to its owner. */ + R_TRY(code_mem->MapToOwner(address, size, perm)); + } + break; + case ams::svc::CodeMemoryOperation_UnmapFromOwner: + { + /* Check that the region is in range. */ + R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_GeneratedCode), svc::ResultInvalidMemoryRegion()); + + /* Check the memory permission. */ + R_UNLESS(IsValidUnmapFromOwnerCodeMemoryPermission(perm), svc::ResultInvalidNewMemoryPermission()); + + /* Unmap the memory from its owner. */ + R_TRY(code_mem->UnmapFromOwner(address, size)); + } + break; + default: + return svc::ResultInvalidEnumValue(); + } + + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ Result CreateCodeMemory64(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcCreateCodeMemory64 was called."); + return CreateCodeMemory(out_handle, address, size); } Result ControlCodeMemory64(ams::svc::Handle code_memory_handle, ams::svc::CodeMemoryOperation operation, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) { - MESOSPHERE_PANIC("Stubbed SvcControlCodeMemory64 was called."); + return ControlCodeMemory(code_memory_handle, operation, address, size, perm); } /* ============================= 64From32 ABI ============================= */ Result CreateCodeMemory64From32(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcCreateCodeMemory64From32 was called."); + return CreateCodeMemory(out_handle, address, size); } Result ControlCodeMemory64From32(ams::svc::Handle code_memory_handle, ams::svc::CodeMemoryOperation operation, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) { - MESOSPHERE_PANIC("Stubbed SvcControlCodeMemory64From32 was called."); + return ControlCodeMemory(code_memory_handle, operation, address, size, perm); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_condition_variable.cpp b/libraries/libmesosphere/source/svc/kern_svc_condition_variable.cpp index 8307dccae..660149aec 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_condition_variable.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_condition_variable.cpp @@ -21,28 +21,60 @@ namespace ams::kern::svc { namespace { + constexpr bool IsKernelAddress(uintptr_t address) { + return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd; + } + Result WaitProcessWideKeyAtomic(uintptr_t address, uintptr_t cv_key, uint32_t tag, int64_t timeout_ns) { + /* Validate input. */ + R_UNLESS(AMS_LIKELY(!IsKernelAddress(address)), svc::ResultInvalidCurrentMemory()); + R_UNLESS(util::IsAligned(address, sizeof(int32_t)), svc::ResultInvalidAddress()); + + /* Convert timeout from nanoseconds to ticks. */ + s64 timeout; + if (timeout_ns > 0) { + const ams::svc::Tick offset_tick(TimeSpan::FromNanoSeconds(timeout_ns)); + if (AMS_LIKELY(offset_tick > 0)) { + timeout = KHardwareTimer::GetTick() + offset_tick + 2; + if (AMS_UNLIKELY(timeout <= 0)) { + timeout = std::numeric_limits::max(); + } + } else { + timeout = std::numeric_limits::max(); + } + } else { + timeout = timeout_ns; + } + + /* Wait on the condition variable. */ + return GetCurrentProcess().WaitConditionVariable(address, util::AlignDown(cv_key, sizeof(u32)), tag, timeout); + } + + void SignalProcessWideKey(uintptr_t cv_key, int32_t count) { + /* Signal the condition variable. */ + return GetCurrentProcess().SignalConditionVariable(util::AlignDown(cv_key, sizeof(u32)), count); + } } /* ============================= 64 ABI ============================= */ Result WaitProcessWideKeyAtomic64(ams::svc::Address address, ams::svc::Address cv_key, uint32_t tag, int64_t timeout_ns) { - MESOSPHERE_PANIC("Stubbed SvcWaitProcessWideKeyAtomic64 was called."); + return WaitProcessWideKeyAtomic(address, cv_key, tag, timeout_ns); } void SignalProcessWideKey64(ams::svc::Address cv_key, int32_t count) { - MESOSPHERE_PANIC("Stubbed SvcSignalProcessWideKey64 was called."); + return SignalProcessWideKey(cv_key, count); } /* ============================= 64From32 ABI ============================= */ Result WaitProcessWideKeyAtomic64From32(ams::svc::Address address, ams::svc::Address cv_key, uint32_t tag, int64_t timeout_ns) { - MESOSPHERE_PANIC("Stubbed SvcWaitProcessWideKeyAtomic64From32 was called."); + return WaitProcessWideKeyAtomic(address, cv_key, tag, timeout_ns); } void SignalProcessWideKey64From32(ams::svc::Address cv_key, int32_t count) { - MESOSPHERE_PANIC("Stubbed SvcSignalProcessWideKey64From32 was called."); + return SignalProcessWideKey(cv_key, count); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_debug.cpp b/libraries/libmesosphere/source/svc/kern_svc_debug.cpp index c995bd015..846b749f6 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_debug.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_debug.cpp @@ -21,108 +21,477 @@ namespace ams::kern::svc { namespace { + constexpr inline int32_t MaximumDebuggableThreadCount = 0x60; + Result DebugActiveProcess(ams::svc::Handle *out_handle, uint64_t process_id) { + /* Get the process from its id. */ + KProcess *process = KProcess::GetProcessFromId(process_id); + R_UNLESS(process != nullptr, svc::ResultInvalidProcessId()); + + /* Close the reference we opened to the process on scope exit. */ + ON_SCOPE_EXIT { process->Close(); }; + + /* Check that the debugging is allowed. */ + if (!process->IsPermittedDebug()) { + R_UNLESS(GetCurrentProcess().CanForceDebug(), svc::ResultInvalidState()); + } + + /* Disallow debugging one's own processs, to prevent softlocks. */ + R_UNLESS(process != GetCurrentProcessPointer(), svc::ResultInvalidState()); + + /* Get the current handle table. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); + + /* Create a new debug object. */ + KDebug *debug = KDebug::Create(); + R_UNLESS(debug != nullptr, svc::ResultOutOfResource()); + ON_SCOPE_EXIT { debug->Close(); }; + + /* Initialize the debug object. */ + debug->Initialize(); + + /* Register the debug object. */ + KDebug::Register(debug); + + /* Try to attach to the target process. */ + R_TRY(debug->Attach(process)); + + /* Add the new debug object to the handle table. */ + R_TRY(handle_table.Add(out_handle, debug)); + + return ResultSuccess(); + } + + Result BreakDebugProcess(ams::svc::Handle debug_handle) { + /* Only allow invoking the svc on development hardware. */ + R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented()); + + /* Get the debug object. */ + KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); + R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); + + /* Break the process. */ + R_TRY(debug->BreakProcess()); + + return ResultSuccess(); + } + + Result TerminateDebugProcess(ams::svc::Handle debug_handle) { + /* Only allow invoking the svc on development hardware. */ + R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented()); + + /* Get the debug object. */ + KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); + R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); + + /* Terminate the process. */ + R_TRY(debug->TerminateProcess()); + + return ResultSuccess(); + } + + template + Result GetDebugEvent(KUserPointer out_info, ams::svc::Handle debug_handle) { + /* Get the debug object. */ + KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); + R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); + + /* Create and clear a new event info. */ + EventInfoType info; + std::memset(std::addressof(info), 0, sizeof(info)); + + /* Get the next info from the debug object. */ + R_TRY(debug->GetDebugEventInfo(std::addressof(info))); + + /* Copy the info out to the user. */ + R_TRY(out_info.CopyFrom(std::addressof(info))); + + return ResultSuccess(); + } + + Result ContinueDebugEventImpl(ams::svc::Handle debug_handle, uint32_t flags, const uint64_t *thread_ids, int32_t num_thread_ids) { + /* Get the debug object. */ + KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); + R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); + + /* Continue the event. */ + R_TRY(debug->ContinueDebug(flags, thread_ids, num_thread_ids)); + + return ResultSuccess(); + } + + Result ContinueDebugEvent(ams::svc::Handle debug_handle, uint32_t flags, KUserPointer user_thread_ids, int32_t num_thread_ids) { + /* Only allow invoking the svc on development hardware. */ + R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented()); + + /* Verify that the flags are valid. */ + R_UNLESS((flags | ams::svc::ContinueFlag_AllMask) == ams::svc::ContinueFlag_AllMask, svc::ResultInvalidEnumValue()); + + /* Verify that continue all and continue others flags are exclusive. */ + constexpr u32 AllAndOthersMask = ams::svc::ContinueFlag_ContinueAll | ams::svc::ContinueFlag_ContinueOthers; + R_UNLESS((flags & AllAndOthersMask) != AllAndOthersMask, svc::ResultInvalidEnumValue()); + + /* Verify that the number of thread ids is valid. */ + R_UNLESS((0 <= num_thread_ids && num_thread_ids <= MaximumDebuggableThreadCount), svc::ResultOutOfRange()); + + /* Copy the threads from userspace. */ + uint64_t thread_ids[MaximumDebuggableThreadCount]; + if (num_thread_ids > 0) { + R_TRY(user_thread_ids.CopyArrayTo(thread_ids, num_thread_ids)); + } + + /* Continue the event. */ + R_TRY(ContinueDebugEventImpl(debug_handle, flags, thread_ids, num_thread_ids)); + + return ResultSuccess(); + } + + Result LegacyContinueDebugEvent(ams::svc::Handle debug_handle, uint32_t flags, uint64_t thread_id) { + /* Only allow invoking the svc on development hardware. */ + R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented()); + + /* Verify that the flags are valid. */ + R_UNLESS((flags | ams::svc::ContinueFlag_AllMask) == ams::svc::ContinueFlag_AllMask, svc::ResultInvalidEnumValue()); + + /* Verify that continue all and continue others flags are exclusive. */ + constexpr u32 AllAndOthersMask = ams::svc::ContinueFlag_ContinueAll | ams::svc::ContinueFlag_ContinueOthers; + R_UNLESS((flags & AllAndOthersMask) != AllAndOthersMask, svc::ResultInvalidEnumValue()); + + /* Continue the event. */ + R_TRY(ContinueDebugEventImpl(debug_handle, flags, std::addressof(thread_id), 1)); + + return ResultSuccess(); + } + + Result GetDebugThreadContext(KUserPointer out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) { + /* Validate the context flags. */ + R_UNLESS((context_flags | ams::svc::ThreadContextFlag_All) == ams::svc::ThreadContextFlag_All, svc::ResultInvalidEnumValue()); + + /* Get the debug object. */ + KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); + R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the thread context. */ + ams::svc::ThreadContext context = {}; + R_TRY(debug->GetThreadContext(std::addressof(context), thread_id, context_flags)); + + /* Copy the context to userspace. */ + R_TRY(out_context.CopyFrom(std::addressof(context))); + + return ResultSuccess(); + } + + Result SetDebugThreadContext(ams::svc::Handle debug_handle, uint64_t thread_id, KUserPointer user_context, uint32_t context_flags) { + /* Only allow invoking the svc on development hardware. */ + R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented()); + + /* Validate the context flags. */ + R_UNLESS((context_flags | ams::svc::ThreadContextFlag_All) == ams::svc::ThreadContextFlag_All, svc::ResultInvalidEnumValue()); + + /* Copy the thread context from userspace. */ + ams::svc::ThreadContext context; + R_TRY(user_context.CopyTo(std::addressof(context))); + + /* Get the debug object. */ + KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); + R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); + + /* Set the thread context. */ + R_TRY(debug->SetThreadContext(context, thread_id, context_flags)); + + return ResultSuccess(); + } + + Result QueryDebugProcessMemory(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, uintptr_t address) { + /* Get the debug object. */ + KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); + R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); + + /* Query the mapping's info. */ + R_TRY(debug->QueryMemoryInfo(out_memory_info, out_page_info, address)); + + return ResultSuccess(); + } + + template + Result QueryDebugProcessMemory(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, uint64_t address) { + /* Get an ams::svc::MemoryInfo for the region. */ + ams::svc::MemoryInfo info = {}; + R_TRY(QueryDebugProcessMemory(std::addressof(info), out_page_info, debug_handle, address)); + + /* Copy the info to userspace. */ + if constexpr (std::same_as) { + R_TRY(out_memory_info.CopyFrom(std::addressof(info))); + } else { + /* Convert the info. */ + T converted_info = {}; + static_assert(std::same_as); + static_assert(std::same_as); + + converted_info.addr = info.addr; + converted_info.size = info.size; + converted_info.state = info.state; + converted_info.attr = info.attr; + converted_info.perm = info.perm; + converted_info.ipc_refcount = info.ipc_refcount; + converted_info.device_refcount = info.device_refcount; + + /* Copy it. */ + R_TRY(out_memory_info.CopyFrom(std::addressof(converted_info))); + } + + return ResultSuccess(); + } + + Result ReadDebugProcessMemory(uintptr_t buffer, ams::svc::Handle debug_handle, uintptr_t address, size_t size) { + /* Validate address / size. */ + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS((buffer < buffer + size), svc::ResultInvalidCurrentMemory()); + + /* Get the debug object. */ + KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); + R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); + + /* Read the memory. */ + R_TRY(debug->ReadMemory(buffer, address, size)); + + return ResultSuccess(); + } + + Result WriteDebugProcessMemory(ams::svc::Handle debug_handle, uintptr_t buffer, uintptr_t address, size_t size) { + /* Validate address / size. */ + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS((buffer < buffer + size), svc::ResultInvalidCurrentMemory()); + + /* Get the debug object. */ + KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); + R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); + + /* Write the memory. */ + R_TRY(debug->WriteMemory(buffer, address, size)); + + return ResultSuccess(); + } + + Result SetHardwareBreakPoint(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) { + /* Only allow invoking the svc on development hardware. */ + R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented()); + + /* Set the breakpoint. */ + R_TRY(KDebug::SetHardwareBreakPoint(name, flags, value)); + + return ResultSuccess(); + } + + Result GetDebugThreadParam(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) { + /* Get the debug object. */ + KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); + R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the thread from its id. */ + KScopedAutoObject thread = KThread::GetThreadFromId(thread_id); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidThreadId()); + + /* Get the process from the debug object. */ + KScopedAutoObject process = debug->GetProcess(); + R_UNLESS(process.IsNotNull(), svc::ResultProcessTerminated()); + + /* Verify that the process is the thread's parent. */ + R_UNLESS(process.GetPointerUnsafe() == thread->GetOwnerProcess(), svc::ResultInvalidThreadId()); + + /* Get the parameter. */ + switch (param) { + case ams::svc::DebugThreadParam_Priority: + { + /* Get the priority. */ + *out_32 = thread->GetPriority(); + } + break; + case ams::svc::DebugThreadParam_State: + { + /* Get the thread state and suspend status. */ + KThread::ThreadState state; + bool suspended_user; + bool suspended_debug; + { + KScopedSchedulerLock sl; + + state = thread->GetState(); + suspended_user = thread->IsSuspendRequested(KThread::SuspendType_Thread); + suspended_debug = thread->IsSuspendRequested(KThread::SuspendType_Debug); + } + + /* Set the suspend flags. */ + *out_32 = 0; + if (suspended_user) { + *out_32 |= ams::svc::ThreadSuspend_User; + } + if (suspended_debug) { + *out_32 |= ams::svc::ThreadSuspend_Debug; + } + + /* Set the state. */ + switch (state) { + case KThread::ThreadState_Initialized: + { + *out_64 = ams::svc::ThreadState_Initializing; + } + break; + case KThread::ThreadState_Waiting: + { + *out_64 = ams::svc::ThreadState_Waiting; + } + break; + case KThread::ThreadState_Runnable: + { + *out_64 = ams::svc::ThreadState_Running; + } + break; + case KThread::ThreadState_Terminated: + { + *out_64 = ams::svc::ThreadState_Terminated; + } + break; + default: + return svc::ResultInvalidState(); + } + } + break; + case ams::svc::DebugThreadParam_IdealCore: + { + /* Get the ideal core. */ + *out_32 = thread->GetIdealCore(); + } + break; + case ams::svc::DebugThreadParam_CurrentCore: + { + /* Get the current core. */ + *out_32 = thread->GetActiveCore(); + } + break; + case ams::svc::DebugThreadParam_AffinityMask: + { + /* Get the affinity mask. */ + *out_32 = thread->GetAffinityMask().GetAffinityMask(); + } + break; + default: + return ams::svc::ResultInvalidEnumValue(); + } + + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ Result DebugActiveProcess64(ams::svc::Handle *out_handle, uint64_t process_id) { - MESOSPHERE_PANIC("Stubbed SvcDebugActiveProcess64 was called."); + return DebugActiveProcess(out_handle, process_id); } Result BreakDebugProcess64(ams::svc::Handle debug_handle) { - MESOSPHERE_PANIC("Stubbed SvcBreakDebugProcess64 was called."); + return BreakDebugProcess(debug_handle); } Result TerminateDebugProcess64(ams::svc::Handle debug_handle) { - MESOSPHERE_PANIC("Stubbed SvcTerminateDebugProcess64 was called."); + return TerminateDebugProcess(debug_handle); } Result GetDebugEvent64(KUserPointer out_info, ams::svc::Handle debug_handle) { - MESOSPHERE_PANIC("Stubbed SvcGetDebugEvent64 was called."); + return GetDebugEvent(out_info, debug_handle); } Result ContinueDebugEvent64(ams::svc::Handle debug_handle, uint32_t flags, KUserPointer thread_ids, int32_t num_thread_ids) { - MESOSPHERE_PANIC("Stubbed SvcContinueDebugEvent64 was called."); + return ContinueDebugEvent(debug_handle, flags, thread_ids, num_thread_ids); + } + + Result LegacyContinueDebugEvent64(ams::svc::Handle debug_handle, uint32_t flags, uint64_t thread_id) { + return LegacyContinueDebugEvent(debug_handle, flags, thread_id); } Result GetDebugThreadContext64(KUserPointer out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) { - MESOSPHERE_PANIC("Stubbed SvcGetDebugThreadContext64 was called."); + return GetDebugThreadContext(out_context, debug_handle, thread_id, context_flags); } Result SetDebugThreadContext64(ams::svc::Handle debug_handle, uint64_t thread_id, KUserPointer context, uint32_t context_flags) { - MESOSPHERE_PANIC("Stubbed SvcSetDebugThreadContext64 was called."); + return SetDebugThreadContext(debug_handle, thread_id, context, context_flags); } - Result QueryDebugProcessMemory64(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, ams::svc::Address address) { - MESOSPHERE_PANIC("Stubbed SvcQueryDebugProcessMemory64 was called."); + Result QueryDebugProcessMemory64(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, ams::svc::Address address) { + return QueryDebugProcessMemory(out_memory_info, out_page_info, debug_handle, address); } Result ReadDebugProcessMemory64(ams::svc::Address buffer, ams::svc::Handle debug_handle, ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcReadDebugProcessMemory64 was called."); + return ReadDebugProcessMemory(buffer, debug_handle, address, size); } Result WriteDebugProcessMemory64(ams::svc::Handle debug_handle, ams::svc::Address buffer, ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcWriteDebugProcessMemory64 was called."); + return WriteDebugProcessMemory(debug_handle, buffer, address, size); } Result SetHardwareBreakPoint64(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) { - MESOSPHERE_PANIC("Stubbed SvcSetHardwareBreakPoint64 was called."); + return SetHardwareBreakPoint(name, flags, value); } Result GetDebugThreadParam64(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) { - MESOSPHERE_PANIC("Stubbed SvcGetDebugThreadParam64 was called."); + return GetDebugThreadParam(out_64, out_32, debug_handle, thread_id, param); } /* ============================= 64From32 ABI ============================= */ Result DebugActiveProcess64From32(ams::svc::Handle *out_handle, uint64_t process_id) { - MESOSPHERE_PANIC("Stubbed SvcDebugActiveProcess64From32 was called."); + return DebugActiveProcess(out_handle, process_id); } Result BreakDebugProcess64From32(ams::svc::Handle debug_handle) { - MESOSPHERE_PANIC("Stubbed SvcBreakDebugProcess64From32 was called."); + return BreakDebugProcess(debug_handle); } Result TerminateDebugProcess64From32(ams::svc::Handle debug_handle) { - MESOSPHERE_PANIC("Stubbed SvcTerminateDebugProcess64From32 was called."); + return TerminateDebugProcess(debug_handle); } Result GetDebugEvent64From32(KUserPointer out_info, ams::svc::Handle debug_handle) { - MESOSPHERE_PANIC("Stubbed SvcGetDebugEvent64From32 was called."); + return GetDebugEvent(out_info, debug_handle); } Result ContinueDebugEvent64From32(ams::svc::Handle debug_handle, uint32_t flags, KUserPointer thread_ids, int32_t num_thread_ids) { - MESOSPHERE_PANIC("Stubbed SvcContinueDebugEvent64From32 was called."); + return ContinueDebugEvent(debug_handle, flags, thread_ids, num_thread_ids); + } + + Result LegacyContinueDebugEvent64From32(ams::svc::Handle debug_handle, uint32_t flags, uint64_t thread_id) { + return LegacyContinueDebugEvent(debug_handle, flags, thread_id); } Result GetDebugThreadContext64From32(KUserPointer out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) { - MESOSPHERE_PANIC("Stubbed SvcGetDebugThreadContext64From32 was called."); + return GetDebugThreadContext(out_context, debug_handle, thread_id, context_flags); } Result SetDebugThreadContext64From32(ams::svc::Handle debug_handle, uint64_t thread_id, KUserPointer context, uint32_t context_flags) { - MESOSPHERE_PANIC("Stubbed SvcSetDebugThreadContext64From32 was called."); + return SetDebugThreadContext(debug_handle, thread_id, context, context_flags); } - Result QueryDebugProcessMemory64From32(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, ams::svc::Address address) { - MESOSPHERE_PANIC("Stubbed SvcQueryDebugProcessMemory64From32 was called."); + Result QueryDebugProcessMemory64From32(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, ams::svc::Address address) { + return QueryDebugProcessMemory(out_memory_info, out_page_info, debug_handle, address); } Result ReadDebugProcessMemory64From32(ams::svc::Address buffer, ams::svc::Handle debug_handle, ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcReadDebugProcessMemory64From32 was called."); + return ReadDebugProcessMemory(buffer, debug_handle, address, size); } Result WriteDebugProcessMemory64From32(ams::svc::Handle debug_handle, ams::svc::Address buffer, ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcWriteDebugProcessMemory64From32 was called."); + return WriteDebugProcessMemory(debug_handle, buffer, address, size); } Result SetHardwareBreakPoint64From32(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) { - MESOSPHERE_PANIC("Stubbed SvcSetHardwareBreakPoint64From32 was called."); + return SetHardwareBreakPoint(name, flags, value); } Result GetDebugThreadParam64From32(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) { - MESOSPHERE_PANIC("Stubbed SvcGetDebugThreadParam64From32 was called."); + return GetDebugThreadParam(out_64, out_32, debug_handle, thread_id, param); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_debug_string.cpp b/libraries/libmesosphere/source/svc/kern_svc_debug_string.cpp index 547ae5f14..f337c4e5d 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_debug_string.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_debug_string.cpp @@ -21,20 +21,29 @@ namespace ams::kern::svc { namespace { + Result OutputDebugString(KUserPointer debug_str, size_t len) { + /* Succeed immediately if there's nothing to output. */ + R_SUCCEED_IF(len == 0); + /* Ensure that the data being output is in range. */ + R_UNLESS(GetCurrentProcess().GetPageTable().Contains(KProcessAddress(debug_str.GetUnsafePointer()), len), svc::ResultInvalidCurrentMemory()); + + /* Output the string. */ + return KDebugLog::PrintUserString(debug_str, len); + } } /* ============================= 64 ABI ============================= */ Result OutputDebugString64(KUserPointer debug_str, ams::svc::Size len) { - MESOSPHERE_PANIC("Stubbed SvcOutputDebugString64 was called."); + return OutputDebugString(debug_str, len); } /* ============================= 64From32 ABI ============================= */ Result OutputDebugString64From32(KUserPointer debug_str, ams::svc::Size len) { - MESOSPHERE_PANIC("Stubbed SvcOutputDebugString64From32 was called."); + return OutputDebugString(debug_str, len); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_device_address_space.cpp b/libraries/libmesosphere/source/svc/kern_svc_device_address_space.cpp index 60cbebde9..84f36b0bf 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_device_address_space.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_device_address_space.cpp @@ -21,68 +21,209 @@ namespace ams::kern::svc { namespace { + constexpr inline u64 DeviceAddressSpaceAlignMask = (1ul << 22) - 1; + constexpr bool IsProcessAndDeviceAligned(uint64_t process_address, uint64_t device_address) { + return (process_address & DeviceAddressSpaceAlignMask) == (device_address & DeviceAddressSpaceAlignMask); + } + + Result CreateDeviceAddressSpace(ams::svc::Handle *out, uint64_t das_address, uint64_t das_size) { + /* Validate input. */ + R_UNLESS(util::IsAligned(das_address, PageSize), svc::ResultInvalidMemoryRegion()); + R_UNLESS(util::IsAligned(das_size, PageSize), svc::ResultInvalidMemoryRegion()); + R_UNLESS(das_size > 0, svc::ResultInvalidMemoryRegion()); + R_UNLESS((das_address < das_address + das_size), svc::ResultInvalidMemoryRegion()); + + /* Create the device address space. */ + KDeviceAddressSpace *das = KDeviceAddressSpace::Create(); + R_UNLESS(das != nullptr, svc::ResultOutOfResource()); + ON_SCOPE_EXIT { das->Close(); }; + + /* Initialize the device address space. */ + R_TRY(das->Initialize(das_address, das_size)); + + /* Register the device address space. */ + R_TRY(KDeviceAddressSpace::Register(das)); + + /* Add to the handle table. */ + R_TRY(GetCurrentProcess().GetHandleTable().Add(out, das)); + + return ResultSuccess(); + } + + Result AttachDeviceAddressSpace(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) { + /* Get the device address space. */ + KScopedAutoObject das = GetCurrentProcess().GetHandleTable().GetObject(das_handle); + R_UNLESS(das.IsNotNull(), svc::ResultInvalidHandle()); + + /* Attach. */ + return das->Attach(device_name); + } + + Result DetachDeviceAddressSpace(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) { + /* Get the device address space. */ + KScopedAutoObject das = GetCurrentProcess().GetHandleTable().GetObject(das_handle); + R_UNLESS(das.IsNotNull(), svc::ResultInvalidHandle()); + + /* Detach. */ + return das->Detach(device_name); + } + + constexpr bool IsValidDeviceMemoryPermission(ams::svc::MemoryPermission device_perm) { + switch (device_perm) { + case ams::svc::MemoryPermission_Read: + case ams::svc::MemoryPermission_Write: + case ams::svc::MemoryPermission_ReadWrite: + return true; + default: + return false; + } + } + + Result MapDeviceAddressSpace(size_t *out_mapped_size, ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings) { + /* Validate input. */ + R_UNLESS(util::IsAligned(process_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(device_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((process_address < process_address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS((device_address < device_address + size), svc::ResultInvalidMemoryRegion()); + R_UNLESS((process_address == static_cast(process_address)), svc::ResultInvalidCurrentMemory()); + R_UNLESS(IsValidDeviceMemoryPermission(device_perm), svc::ResultInvalidNewMemoryPermission()); + + /* Get the device address space. */ + KScopedAutoObject das = GetCurrentProcess().GetHandleTable().GetObject(das_handle); + R_UNLESS(das.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the process. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Validate that the process address is within range. */ + auto &page_table = process->GetPageTable(); + R_UNLESS(page_table.Contains(process_address, size), svc::ResultInvalidCurrentMemory()); + + /* Map. */ + return das->Map(out_mapped_size, std::addressof(page_table), KProcessAddress(process_address), size, device_address, device_perm, refresh_mappings); + } + + Result MapDeviceAddressSpaceAligned(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { + /* Validate input. */ + R_UNLESS(util::IsAligned(process_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(device_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(IsProcessAndDeviceAligned(process_address, device_address), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((process_address < process_address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS((device_address < device_address + size), svc::ResultInvalidMemoryRegion()); + R_UNLESS((process_address == static_cast(process_address)), svc::ResultInvalidCurrentMemory()); + R_UNLESS(IsValidDeviceMemoryPermission(device_perm), svc::ResultInvalidNewMemoryPermission()); + + /* Get the device address space. */ + KScopedAutoObject das = GetCurrentProcess().GetHandleTable().GetObject(das_handle); + R_UNLESS(das.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the process. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Validate that the process address is within range. */ + auto &page_table = process->GetPageTable(); + R_UNLESS(page_table.Contains(process_address, size), svc::ResultInvalidCurrentMemory()); + + /* Map. */ + return das->MapAligned(std::addressof(page_table), KProcessAddress(process_address), size, device_address, device_perm); + } + + Result UnmapDeviceAddressSpace(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, size_t size, uint64_t device_address) { + /* Validate input. */ + R_UNLESS(util::IsAligned(process_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(device_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((process_address < process_address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS((device_address < device_address + size), svc::ResultInvalidMemoryRegion()); + R_UNLESS((process_address == static_cast(process_address)), svc::ResultInvalidCurrentMemory()); + + /* Get the device address space. */ + KScopedAutoObject das = GetCurrentProcess().GetHandleTable().GetObject(das_handle); + R_UNLESS(das.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the process. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Validate that the process address is within range. */ + auto &page_table = process->GetPageTable(); + R_UNLESS(page_table.Contains(process_address, size), svc::ResultInvalidCurrentMemory()); + + return das->Unmap(std::addressof(page_table), KProcessAddress(process_address), size, device_address); + } } /* ============================= 64 ABI ============================= */ Result CreateDeviceAddressSpace64(ams::svc::Handle *out_handle, uint64_t das_address, uint64_t das_size) { - MESOSPHERE_PANIC("Stubbed SvcCreateDeviceAddressSpace64 was called."); + return CreateDeviceAddressSpace(out_handle, das_address, das_size); } Result AttachDeviceAddressSpace64(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) { - MESOSPHERE_PANIC("Stubbed SvcAttachDeviceAddressSpace64 was called."); + return AttachDeviceAddressSpace(device_name, das_handle); } Result DetachDeviceAddressSpace64(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) { - MESOSPHERE_PANIC("Stubbed SvcDetachDeviceAddressSpace64 was called."); + return DetachDeviceAddressSpace(device_name, das_handle); } Result MapDeviceAddressSpaceByForce64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { - MESOSPHERE_PANIC("Stubbed SvcMapDeviceAddressSpaceByForce64 was called."); + size_t dummy_map_size; + return MapDeviceAddressSpace(std::addressof(dummy_map_size), das_handle, process_handle, process_address, size, device_address, device_perm, false); } Result MapDeviceAddressSpaceAligned64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { - MESOSPHERE_PANIC("Stubbed SvcMapDeviceAddressSpaceAligned64 was called."); + return MapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, device_perm); } Result MapDeviceAddressSpace64(ams::svc::Size *out_mapped_size, ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { - MESOSPHERE_PANIC("Stubbed SvcMapDeviceAddressSpace64 was called."); + static_assert(sizeof(*out_mapped_size) == sizeof(size_t)); + return MapDeviceAddressSpace(reinterpret_cast(out_mapped_size), das_handle, process_handle, process_address, size, device_address, device_perm, true); } Result UnmapDeviceAddressSpace64(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address) { - MESOSPHERE_PANIC("Stubbed SvcUnmapDeviceAddressSpace64 was called."); + return UnmapDeviceAddressSpace(das_handle, process_handle, process_address, size, device_address); } /* ============================= 64From32 ABI ============================= */ Result CreateDeviceAddressSpace64From32(ams::svc::Handle *out_handle, uint64_t das_address, uint64_t das_size) { - MESOSPHERE_PANIC("Stubbed SvcCreateDeviceAddressSpace64From32 was called."); + return CreateDeviceAddressSpace(out_handle, das_address, das_size); } Result AttachDeviceAddressSpace64From32(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) { - MESOSPHERE_PANIC("Stubbed SvcAttachDeviceAddressSpace64From32 was called."); + return AttachDeviceAddressSpace(device_name, das_handle); } Result DetachDeviceAddressSpace64From32(ams::svc::DeviceName device_name, ams::svc::Handle das_handle) { - MESOSPHERE_PANIC("Stubbed SvcDetachDeviceAddressSpace64From32 was called."); + return DetachDeviceAddressSpace(device_name, das_handle); } Result MapDeviceAddressSpaceByForce64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { - MESOSPHERE_PANIC("Stubbed SvcMapDeviceAddressSpaceByForce64From32 was called."); + size_t dummy_map_size; + return MapDeviceAddressSpace(std::addressof(dummy_map_size), das_handle, process_handle, process_address, size, device_address, device_perm, false); } Result MapDeviceAddressSpaceAligned64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { - MESOSPHERE_PANIC("Stubbed SvcMapDeviceAddressSpaceAligned64From32 was called."); + return MapDeviceAddressSpaceAligned(das_handle, process_handle, process_address, size, device_address, device_perm); } Result MapDeviceAddressSpace64From32(ams::svc::Size *out_mapped_size, ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address, ams::svc::MemoryPermission device_perm) { - MESOSPHERE_PANIC("Stubbed SvcMapDeviceAddressSpace64From32 was called."); + static_assert(sizeof(*out_mapped_size) == sizeof(size_t)); + return MapDeviceAddressSpace(reinterpret_cast(out_mapped_size), das_handle, process_handle, process_address, size, device_address, device_perm, true); } Result UnmapDeviceAddressSpace64From32(ams::svc::Handle das_handle, ams::svc::Handle process_handle, uint64_t process_address, ams::svc::Size size, uint64_t device_address) { - MESOSPHERE_PANIC("Stubbed SvcUnmapDeviceAddressSpace64From32 was called."); + return UnmapDeviceAddressSpace(das_handle, process_handle, process_address, size, device_address); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_event.cpp b/libraries/libmesosphere/source/svc/kern_svc_event.cpp index 77aead4d5..4a2f68eff 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_event.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_event.cpp @@ -21,36 +21,110 @@ namespace ams::kern::svc { namespace { + Result SignalEvent(ams::svc::Handle event_handle) { + /* Get the current handle table. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); + /* Get the writable event. */ + KScopedAutoObject writable_event = handle_table.GetObject(event_handle); + R_UNLESS(writable_event.IsNotNull(), svc::ResultInvalidHandle()); + + return writable_event->Signal(); + } + + Result ClearEvent(ams::svc::Handle event_handle) { + /* Get the current handle table. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); + + /* Try to clear the writable event. */ + { + KScopedAutoObject writable_event = handle_table.GetObject(event_handle); + if (writable_event.IsNotNull()) { + return writable_event->Clear(); + } + } + + /* Try to clear the readable event. */ + { + KScopedAutoObject readable_event = handle_table.GetObject(event_handle); + if (readable_event.IsNotNull()) { + return readable_event->Clear(); + } + } + + return svc::ResultInvalidHandle(); + } + + Result CreateEvent(ams::svc::Handle *out_write, ams::svc::Handle *out_read) { + /* Get the current process and handle table. */ + auto &process = GetCurrentProcess(); + auto &handle_table = process.GetHandleTable(); + + /* Reserve a new event from the process resource limit. */ + KScopedResourceReservation event_reservation(std::addressof(process), ams::svc::LimitableResource_EventCountMax); + R_UNLESS(event_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Create a new event. */ + KEvent *event = KEvent::Create(); + R_UNLESS(event != nullptr, svc::ResultOutOfResource()); + + /* Initialize the event. */ + event->Initialize(); + + /* Commit the event reservation. */ + event_reservation.Commit(); + + /* Ensure that we clean up the event (and its only references are handle table) on function end. */ + ON_SCOPE_EXIT { + event->GetWritableEvent().Close(); + event->GetReadableEvent().Close(); + }; + + /* Register the event. */ + R_TRY(KEvent::Register(event)); + + /* Add the writable event to the handle table. */ + R_TRY(handle_table.Add(out_write, std::addressof(event->GetWritableEvent()))); + + /* Ensure that we maintaing a clean handle state on exit. */ + auto handle_guard = SCOPE_GUARD { handle_table.Remove(*out_write); }; + + /* Add the readable event to the handle table. */ + R_TRY(handle_table.Add(out_read, std::addressof(event->GetReadableEvent()))); + + /* We succeeded! */ + handle_guard.Cancel(); + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ Result SignalEvent64(ams::svc::Handle event_handle) { - MESOSPHERE_PANIC("Stubbed SvcSignalEvent64 was called."); + return SignalEvent(event_handle); } Result ClearEvent64(ams::svc::Handle event_handle) { - MESOSPHERE_PANIC("Stubbed SvcClearEvent64 was called."); + return ClearEvent(event_handle); } Result CreateEvent64(ams::svc::Handle *out_write_handle, ams::svc::Handle *out_read_handle) { - MESOSPHERE_PANIC("Stubbed SvcCreateEvent64 was called."); + return CreateEvent(out_write_handle, out_read_handle); } /* ============================= 64From32 ABI ============================= */ Result SignalEvent64From32(ams::svc::Handle event_handle) { - MESOSPHERE_PANIC("Stubbed SvcSignalEvent64From32 was called."); + return SignalEvent(event_handle); } Result ClearEvent64From32(ams::svc::Handle event_handle) { - MESOSPHERE_PANIC("Stubbed SvcClearEvent64From32 was called."); + return ClearEvent(event_handle); } Result CreateEvent64From32(ams::svc::Handle *out_write_handle, ams::svc::Handle *out_read_handle) { - MESOSPHERE_PANIC("Stubbed SvcCreateEvent64From32 was called."); + return CreateEvent(out_write_handle, out_read_handle); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_exception.cpp b/libraries/libmesosphere/source/svc/kern_svc_exception.cpp index 4df8e6d6b..f5c859a88 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_exception.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_exception.cpp @@ -21,28 +21,68 @@ namespace ams::kern::svc { namespace { + [[maybe_unused]] void PrintBreak(ams::svc::BreakReason break_reason) { + /* Print that break was called. */ + MESOSPHERE_RELEASE_LOG("%s: svc::Break(%d) was called, pid=%ld, tid=%ld\n", GetCurrentProcess().GetName(), static_cast(break_reason), GetCurrentProcess().GetId(), GetCurrentThread().GetId()); + /* Print the current thread's registers. */ + /* TODO: KDebug::PrintRegisters(); */ + + /* Print a backtrace. */ + /* TODO: KDebug::PrintBacktrace(); */ + } + + void Break(ams::svc::BreakReason break_reason, uintptr_t address, size_t size) { + /* Determine whether the break is only a notification. */ + const bool is_notification = (break_reason & ams::svc::BreakReason_NotificationOnlyFlag) != 0; + + /* If the break isn't a notification, print it. */ + if (!is_notification) { + #ifdef MESOSPHERE_BUILD_FOR_DEBUGGING + PrintBreak(break_reason); + #endif + } + + /* If the current process is attached to debugger, try to notify it. */ + if (GetCurrentProcess().IsAttachedToDebugger()) { + if (R_SUCCEEDED(KDebug::BreakIfAttached(break_reason, address, size))) { + /* If we attached, set the pc to the instruction before the current one and return. */ + KDebug::SetPreviousProgramCounter(); + return; + } + } + + /* If the break is only a notification, we're done. */ + if (is_notification) { + return; + } + + /* Print that break was called. */ + MESOSPHERE_RELEASE_LOG("Break() called. %016lx\n", GetCurrentProcess().GetProgramId()); + + /* Try to enter JIT debug state. */ + if (GetCurrentProcess().EnterJitDebug(ams::svc::DebugEvent_Exception, ams::svc::DebugException_UserBreak, KDebug::GetProgramCounter(GetCurrentThread()), break_reason, address, size)) { + /* We entered JIT debug, so set the pc to the instruction before the current one and return. */ + KDebug::SetPreviousProgramCounter(); + return; + } + + /* Exit the current process. */ + GetCurrentProcess().Exit(); + } } /* ============================= 64 ABI ============================= */ void Break64(ams::svc::BreakReason break_reason, ams::svc::Address arg, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcBreak64 was called."); - } - - void ReturnFromException64(ams::Result result) { - MESOSPHERE_PANIC("Stubbed SvcReturnFromException64 was called."); + return Break(break_reason, arg, size); } /* ============================= 64From32 ABI ============================= */ void Break64From32(ams::svc::BreakReason break_reason, ams::svc::Address arg, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcBreak64From32 was called."); - } - - void ReturnFromException64From32(ams::Result result) { - MESOSPHERE_PANIC("Stubbed SvcReturnFromException64From32 was called."); + return Break(break_reason, arg, size); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_info.cpp b/libraries/libmesosphere/source/svc/kern_svc_info.cpp index 84f330e3b..15dd55a5f 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_info.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_info.cpp @@ -21,19 +21,44 @@ namespace ams::kern::svc { namespace { - Result GetInfo(u64 *out, ams::svc::InfoType info_type, ams::svc::Handle handle, u64 info_subtype) { - MESOSPHERE_LOG("GetInfo(%p, %u, %08x, %lu) was called\n", out, static_cast(info_type), static_cast(handle), info_subtype); - ON_SCOPE_EXIT{ MESOSPHERE_LOG("GetInfo returned %016lx\n", *out); }; + Result GetInitialProcessIdRange(u64 *out, ams::svc::InitialProcessIdRangeInfo info) { + switch (info) { + case ams::svc::InitialProcessIdRangeInfo_Minimum: + MESOSPHERE_ABORT_UNLESS(GetInitialProcessIdMin() <= GetInitialProcessIdMax()); + *out = GetInitialProcessIdMin(); + break; + case ams::svc::InitialProcessIdRangeInfo_Maximum: + MESOSPHERE_ABORT_UNLESS(GetInitialProcessIdMin() <= GetInitialProcessIdMax()); + *out = GetInitialProcessIdMax(); + break; + default: + return svc::ResultInvalidCombination(); + } + return ResultSuccess(); + } + + Result GetInfo(u64 *out, ams::svc::InfoType info_type, ams::svc::Handle handle, u64 info_subtype) { switch (info_type) { + case ams::svc::InfoType_CoreMask: + case ams::svc::InfoType_PriorityMask: case ams::svc::InfoType_AliasRegionAddress: case ams::svc::InfoType_AliasRegionSize: case ams::svc::InfoType_HeapRegionAddress: case ams::svc::InfoType_HeapRegionSize: + case ams::svc::InfoType_TotalMemorySize: + case ams::svc::InfoType_UsedMemorySize: case ams::svc::InfoType_AslrRegionAddress: case ams::svc::InfoType_AslrRegionSize: case ams::svc::InfoType_StackRegionAddress: case ams::svc::InfoType_StackRegionSize: + case ams::svc::InfoType_SystemResourceSizeTotal: + case ams::svc::InfoType_SystemResourceSizeUsed: + case ams::svc::InfoType_ProgramId: + case ams::svc::InfoType_UserExceptionContextAddress: + case ams::svc::InfoType_TotalNonSystemMemorySize: + case ams::svc::InfoType_UsedNonSystemMemorySize: + case ams::svc::InfoType_IsApplication: { /* These info types don't support non-zero subtypes. */ R_UNLESS(info_subtype == 0, svc::ResultInvalidCombination()); @@ -43,6 +68,12 @@ namespace ams::kern::svc { R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); switch (info_type) { + case ams::svc::InfoType_CoreMask: + *out = process->GetCoreMask(); + break; + case ams::svc::InfoType_PriorityMask: + *out = process->GetPriorityMask(); + break; case ams::svc::InfoType_AliasRegionAddress: *out = GetInteger(process->GetPageTable().GetAliasRegionStart()); break; @@ -55,6 +86,12 @@ namespace ams::kern::svc { case ams::svc::InfoType_HeapRegionSize: *out = process->GetPageTable().GetHeapRegionSize(); break; + case ams::svc::InfoType_TotalMemorySize: + *out = process->GetTotalUserPhysicalMemorySize(); + break; + case ams::svc::InfoType_UsedMemorySize: + *out = process->GetUsedUserPhysicalMemorySize(); + break; case ams::svc::InfoType_AslrRegionAddress: *out = GetInteger(process->GetPageTable().GetAliasCodeRegionStart()); break; @@ -67,37 +104,194 @@ namespace ams::kern::svc { case ams::svc::InfoType_StackRegionSize: *out = process->GetPageTable().GetStackRegionSize(); break; + case ams::svc::InfoType_SystemResourceSizeTotal: + *out = process->GetTotalSystemResourceSize(); + break; + case ams::svc::InfoType_SystemResourceSizeUsed: + *out = process->GetUsedSystemResourceSize(); + break; + case ams::svc::InfoType_ProgramId: + *out = process->GetProgramId(); + break; + case ams::svc::InfoType_UserExceptionContextAddress: + *out = GetInteger(process->GetProcessLocalRegionAddress()); + break; + case ams::svc::InfoType_TotalNonSystemMemorySize: + *out = process->GetTotalNonSystemUserPhysicalMemorySize(); + break; + case ams::svc::InfoType_UsedNonSystemMemorySize: + *out = process->GetUsedNonSystemUserPhysicalMemorySize(); + break; + case ams::svc::InfoType_IsApplication: + *out = process->IsApplication(); + break; MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); } } break; + case ams::svc::InfoType_DebuggerAttached: + { + /* Verify the input handle is invalid. */ + R_UNLESS(handle == ams::svc::InvalidHandle, svc::ResultInvalidHandle()); + + /* Verify the sub-type is valid. */ + R_UNLESS(info_subtype == 0, svc::ResultInvalidCombination()); + + /* Get whether debugger is attached. */ + *out = GetCurrentProcess().GetDebugObject() != nullptr; + } + break; + case ams::svc::InfoType_ResourceLimit: + { + /* Verify the input handle is invalid. */ + R_UNLESS(handle == ams::svc::InvalidHandle, svc::ResultInvalidHandle()); + + /* Verify the sub-type is valid. */ + R_UNLESS(info_subtype == 0, svc::ResultInvalidCombination()); + + /* Get the handle table and resource limit. */ + KHandleTable &handle_table = GetCurrentProcess().GetHandleTable(); + KResourceLimit *resource_limit = GetCurrentProcess().GetResourceLimit(); + + if (resource_limit != nullptr) { + /* Get a new handle for the resource limit. */ + ams::svc::Handle tmp; + R_TRY(handle_table.Add(std::addressof(tmp), resource_limit)); + + /* Set the output. */ + *out = tmp; + } else { + /* Set the output. */ + *out = ams::svc::InvalidHandle; + } + } + break; + case ams::svc::InfoType_IdleTickCount: + { + /* Verify the input handle is invalid. */ + R_UNLESS(handle == ams::svc::InvalidHandle, svc::ResultInvalidHandle()); + + /* Verify the requested core is valid. */ + const bool core_valid = (info_subtype == static_cast(-1ul)) || (info_subtype == static_cast(GetCurrentCoreId())); + R_UNLESS(core_valid, svc::ResultInvalidCombination()); + + /* Get the idle tick count. */ + *out = Kernel::GetScheduler().GetIdleThread()->GetCpuTime(); + } + break; + case ams::svc::InfoType_RandomEntropy: + { + /* Verify the input handle is invalid. */ + R_UNLESS(handle == ams::svc::InvalidHandle, svc::ResultInvalidHandle()); + + /* Verify the requested entropy is valid. */ + R_UNLESS(info_subtype < 4, svc::ResultInvalidCombination()); + + /* Get the entropy. */ + *out = GetCurrentProcess().GetRandomEntropy(info_subtype); + } + break; + case ams::svc::InfoType_InitialProcessIdRange: + { + /* NOTE: This info type was added in 4.0.0, and removed in 5.0.0. */ + R_UNLESS(GetTargetFirmware() < TargetFirmware_5_0_0, svc::ResultInvalidEnumValue()); + + /* Verify the input handle is invalid. */ + R_UNLESS(handle == ams::svc::InvalidHandle, svc::ResultInvalidHandle()); + + /* Get the process id range. */ + R_TRY(GetInitialProcessIdRange(out, static_cast(info_subtype))); + } + break; + case ams::svc::InfoType_ThreadTickCount: + { + /* Verify the requested core is valid. */ + const bool core_valid = (info_subtype == static_cast(-1ul)) || (info_subtype < cpu::NumCores); + R_UNLESS(core_valid, svc::ResultInvalidCombination()); + + /* Get the thread from its handle. */ + KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject(handle); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the tick count. */ + s64 tick_count; + if (info_subtype == static_cast(-1ul)) { + tick_count = thread->GetCpuTime(); + if (GetCurrentThreadPointer() == thread.GetPointerUnsafe()) { + const s64 cur_tick = KHardwareTimer::GetTick(); + const s64 prev_switch = Kernel::GetScheduler().GetLastContextSwitchTime(); + tick_count += (cur_tick - prev_switch); + } + } else { + tick_count = thread->GetCpuTime(static_cast(info_subtype)); + if (GetCurrentThreadPointer() == thread.GetPointerUnsafe() && static_cast(info_subtype) == GetCurrentCoreId()) { + const s64 cur_tick = KHardwareTimer::GetTick(); + const s64 prev_switch = Kernel::GetScheduler().GetLastContextSwitchTime(); + tick_count += (cur_tick - prev_switch); + } + } + + /* Set the output. */ + *out = tick_count; + } + break; default: + { + /* For debug, log the invalid info call. */ + MESOSPHERE_LOG("GetInfo(%p, %u, %08x, %lu) was called\n", out, static_cast(info_type), static_cast(handle), info_subtype); + } return svc::ResultInvalidEnumValue(); } return ResultSuccess(); } - Result GetSystemInfo(u64 *out, ams::svc::SystemInfoType info_type, ams::svc::Handle handle, u64 info_subtype) { - MESOSPHERE_LOG("GetSystemInfo(%p, %u, %08x, %lu) was called\n", out, static_cast(info_type), static_cast(handle), info_subtype); - ON_SCOPE_EXIT{ MESOSPHERE_LOG("GetSystemInfo returned %016lx\n", *out); }; + constexpr bool IsValidMemoryPool(u64 pool) { + switch (static_cast(pool)) { + case KMemoryManager::Pool_Application: + case KMemoryManager::Pool_Applet: + case KMemoryManager::Pool_System: + case KMemoryManager::Pool_SystemNonSecure: + return true; + default: + return false; + } + } + Result GetSystemInfo(u64 *out, ams::svc::SystemInfoType info_type, ams::svc::Handle handle, u64 info_subtype) { switch (info_type) { + case ams::svc::SystemInfoType_TotalPhysicalMemorySize: + case ams::svc::SystemInfoType_UsedPhysicalMemorySize: + { + /* Verify the input handle is invalid. */ + R_UNLESS(handle == ams::svc::InvalidHandle, svc::ResultInvalidHandle()); + + /* Verify the sub-type is valid. */ + R_UNLESS(IsValidMemoryPool(info_subtype), svc::ResultInvalidCombination()); + + /* Convert to pool. */ + const auto pool = static_cast(info_subtype); + + /* Get the memory size. */ + auto &mm = Kernel::GetMemoryManager(); + switch (info_type) { + case ams::svc::SystemInfoType_TotalPhysicalMemorySize: + *out = mm.GetSize(pool); + break; + case ams::svc::SystemInfoType_UsedPhysicalMemorySize: + *out = mm.GetSize(pool) - mm.GetFreeSize(pool); + break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + break; case ams::svc::SystemInfoType_InitialProcessIdRange: { + /* Verify the handle is invalid. */ R_UNLESS(handle == ams::svc::InvalidHandle, svc::ResultInvalidHandle()); - switch (static_cast(info_subtype)) { - case ams::svc::InitialProcessIdRangeInfo_Minimum: - MESOSPHERE_ABORT_UNLESS(GetInitialProcessIdMin() <= GetInitialProcessIdMax()); - *out = GetInitialProcessIdMin(); - break; - case ams::svc::InitialProcessIdRangeInfo_Maximum: - MESOSPHERE_ABORT_UNLESS(GetInitialProcessIdMin() <= GetInitialProcessIdMax()); - *out = GetInitialProcessIdMax(); - break; - default: - return svc::ResultInvalidCombination(); - } + + /* Get the process id range. */ + R_TRY(GetInitialProcessIdRange(out, static_cast(info_subtype))); } break; default: diff --git a/libraries/libmesosphere/source/svc/kern_svc_interrupt_event.cpp b/libraries/libmesosphere/source/svc/kern_svc_interrupt_event.cpp index f724e223e..6019820d8 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_interrupt_event.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_interrupt_event.cpp @@ -21,20 +21,56 @@ namespace ams::kern::svc { namespace { + constexpr bool IsValidInterruptType(ams::svc::InterruptType type) { + switch (type) { + case ams::svc::InterruptType_Edge: + case ams::svc::InterruptType_Level: + return true; + default: + return false; + } + } + Result CreateInterruptEvent(ams::svc::Handle *out, int32_t interrupt_id, ams::svc::InterruptType type) { + /* Validate the type. */ + R_UNLESS(IsValidInterruptType(type), svc::ResultInvalidEnumValue()); + + /* Check whether the interrupt is allowed. */ + auto &process = GetCurrentProcess(); + R_UNLESS(process.IsPermittedInterrupt(interrupt_id), svc::ResultNotFound()); + + /* Get the current handle table. */ + auto &handle_table = process.GetHandleTable(); + + /* Create the interrupt event. */ + KInterruptEvent *event = KInterruptEvent::Create(); + R_UNLESS(event != nullptr, svc::ResultOutOfResource()); + ON_SCOPE_EXIT { event->Close(); }; + + /* Initialize the event. */ + R_TRY(event->Initialize(interrupt_id, type)); + + /* Register the event. */ + R_TRY(KInterruptEvent::Register(event)); + + /* Add the event to the handle table. */ + R_TRY(handle_table.Add(out, event)); + + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ Result CreateInterruptEvent64(ams::svc::Handle *out_read_handle, int32_t interrupt_id, ams::svc::InterruptType interrupt_type) { - MESOSPHERE_PANIC("Stubbed SvcCreateInterruptEvent64 was called."); + return CreateInterruptEvent(out_read_handle, interrupt_id, interrupt_type); } /* ============================= 64From32 ABI ============================= */ Result CreateInterruptEvent64From32(ams::svc::Handle *out_read_handle, int32_t interrupt_id, ams::svc::InterruptType interrupt_type) { - MESOSPHERE_PANIC("Stubbed SvcCreateInterruptEvent64From32 was called."); + return CreateInterruptEvent(out_read_handle, interrupt_id, interrupt_type); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_ipc.cpp b/libraries/libmesosphere/source/svc/kern_svc_ipc.cpp index 0d51e6a55..bacdbd6db 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_ipc.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_ipc.cpp @@ -21,52 +21,287 @@ namespace ams::kern::svc { namespace { + ALWAYS_INLINE Result SendSyncRequestImpl(uintptr_t message, size_t buffer_size, ams::svc::Handle session_handle) { + /* Get the client session. */ + KScopedAutoObject session = GetCurrentProcess().GetHandleTable().GetObject(session_handle); + R_UNLESS(session.IsNotNull(), svc::ResultInvalidHandle()); + /* Get the parent, and persist a reference to it until we're done. */ + KScopedAutoObject parent = session->GetParent(); + MESOSPHERE_ASSERT(parent.IsNotNull()); + + /* Send the request. */ + return session->SendSyncRequest(message, buffer_size); + } + + ALWAYS_INLINE Result ReplyAndReceiveImpl(int32_t *out_index, uintptr_t message, size_t buffer_size, KPhysicalAddress message_paddr, KSynchronizationObject **objs, int32_t num_objects, ams::svc::Handle reply_target, int64_t timeout_ns) { + /* Reply to the target, if one is specified. */ + if (reply_target != ams::svc::InvalidHandle) { + KScopedAutoObject session = GetCurrentProcess().GetHandleTable().GetObject(reply_target); + R_UNLESS(session.IsNotNull(), svc::ResultInvalidHandle()); + + /* If we fail to reply, we want to set the output index to -1. */ + auto reply_idx_guard = SCOPE_GUARD { *out_index = -1; }; + + /* Send the reply. */ + R_TRY(session->SendReply(message, buffer_size, message_paddr)); + + /* Cancel our guard. */ + reply_idx_guard.Cancel(); + } + + /* Receive a message. */ + { + /* Convert the timeout from nanoseconds to ticks. */ + /* NOTE: Nintendo does not use this conversion logic in WaitSynchronization... */ + s64 timeout; + if (timeout_ns > 0) { + const ams::svc::Tick offset_tick(TimeSpan::FromNanoSeconds(timeout_ns)); + if (AMS_LIKELY(offset_tick > 0)) { + timeout = KHardwareTimer::GetTick() + offset_tick + 2; + if (AMS_UNLIKELY(timeout <= 0)) { + timeout = std::numeric_limits::max(); + } + } else { + timeout = std::numeric_limits::max(); + } + } else { + timeout = timeout_ns; + } + + /* Wait for a message. */ + while (true) { + s32 index; + Result result = Kernel::GetSynchronization().Wait(std::addressof(index), objs, num_objects, timeout); + if (svc::ResultTimedOut::Includes(result)) { + return result; + } + + if (R_SUCCEEDED(result)) { + KServerSession *session = objs[index]->DynamicCast(); + if (session != nullptr) { + result = session->ReceiveRequest(message, buffer_size, message_paddr); + if (svc::ResultNotFound::Includes(result)) { + continue; + } + } + } + + *out_index = index; + return result; + } + } + } + + ALWAYS_INLINE Result ReplyAndReceiveImpl(int32_t *out_index, uintptr_t message, size_t buffer_size, KPhysicalAddress message_paddr, KUserPointer user_handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) { + /* Ensure number of handles is valid. */ + R_UNLESS(0 <= num_handles && num_handles <= ams::svc::ArgumentHandleCountMax, svc::ResultOutOfRange()); + + /* Get the synchronization context. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); + KSynchronizationObject **objs = GetCurrentThread().GetSynchronizationObjectBuffer(); + ams::svc::Handle *handles = GetCurrentThread().GetHandleBuffer(); + + /* Copy user handles. */ + if (num_handles > 0) { + /* Ensure that we can try to get the handles. */ + R_UNLESS(GetCurrentProcess().GetPageTable().Contains(KProcessAddress(user_handles.GetUnsafePointer()), num_handles * sizeof(ams::svc::Handle)), svc::ResultInvalidPointer()); + + /* Get the handles. */ + R_TRY(user_handles.CopyArrayTo(handles, num_handles)); + + /* Convert the handles to objects. */ + R_UNLESS(handle_table.GetMultipleObjects(objs, handles, num_handles), svc::ResultInvalidHandle()); + } + + /* Ensure handles are closed when we're done. */ + ON_SCOPE_EXIT { + for (auto i = 0; i < num_handles; ++i) { + objs[i]->Close(); + } + }; + + return ReplyAndReceiveImpl(out_index, message, buffer_size, message_paddr, objs, num_handles, reply_target, timeout_ns); + } + + ALWAYS_INLINE Result SendSyncRequest(ams::svc::Handle session_handle) { + return SendSyncRequestImpl(0, 0, session_handle); + } + + ALWAYS_INLINE Result SendSyncRequestWithUserBuffer(uintptr_t message, size_t buffer_size, ams::svc::Handle session_handle) { + /* Validate that the message buffer is page aligned and does not overflow. */ + R_UNLESS(util::IsAligned(message, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(buffer_size > 0, svc::ResultInvalidSize()); + R_UNLESS(util::IsAligned(buffer_size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(message < message + buffer_size, svc::ResultInvalidCurrentMemory()); + + /* Get the process page table. */ + auto &page_table = GetCurrentProcess().GetPageTable(); + + /* Lock the mesage buffer. */ + R_TRY(page_table.LockForIpcUserBuffer(nullptr, message, buffer_size)); + + /* Ensure that even if we fail, we unlock the message buffer when done. */ + auto unlock_guard = SCOPE_GUARD { page_table.UnlockForIpcUserBuffer(message, buffer_size); }; + + /* Send the request. */ + MESOSPHERE_ASSERT(message != 0); + R_TRY(SendSyncRequestImpl(message, buffer_size, session_handle)); + + /* We sent the request successfully, so cancel our guard and check the unlock result. */ + unlock_guard.Cancel(); + return page_table.UnlockForIpcUserBuffer(message, buffer_size); + } + + ALWAYS_INLINE Result SendAsyncRequestWithUserBufferImpl(ams::svc::Handle *out_event_handle, uintptr_t message, size_t buffer_size, ams::svc::Handle session_handle) { + /* Get the process and handle table. */ + auto &process = GetCurrentProcess(); + auto &handle_table = process.GetHandleTable(); + + /* Reserve a new event from the process resource limit. */ + KScopedResourceReservation event_reservation(std::addressof(process), ams::svc::LimitableResource_EventCountMax); + R_UNLESS(event_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Get the client session. */ + KScopedAutoObject session = GetCurrentProcess().GetHandleTable().GetObject(session_handle); + R_UNLESS(session.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the parent, and persist a reference to it until we're done. */ + KScopedAutoObject parent = session->GetParent(); + MESOSPHERE_ASSERT(parent.IsNotNull()); + + /* Create a new event. */ + KEvent *event = KEvent::Create(); + R_UNLESS(event != nullptr, svc::ResultOutOfResource()); + + /* Initialize the event. */ + event->Initialize(); + + /* Commit our reservation. */ + event_reservation.Commit(); + + /* At end of scope, kill the standing references to the sub events. */ + ON_SCOPE_EXIT { + event->GetReadableEvent().Close(); + event->GetWritableEvent().Close(); + }; + + /* Register the event. */ + R_TRY(KEvent::Register(event)); + + /* Add the readable event to the handle table. */ + R_TRY(handle_table.Add(out_event_handle, std::addressof(event->GetReadableEvent()))); + + /* Ensure that if we fail to send the request, we close the readable handle. */ + auto read_guard = SCOPE_GUARD { handle_table.Remove(*out_event_handle); }; + + /* Send the async request. */ + R_TRY(session->SendAsyncRequest(std::addressof(event->GetWritableEvent()), message, buffer_size)); + + /* We succeeded. */ + read_guard.Cancel(); + return ResultSuccess(); + } + + ALWAYS_INLINE Result SendAsyncRequestWithUserBuffer(ams::svc::Handle *out_event_handle, uintptr_t message, size_t buffer_size, ams::svc::Handle session_handle) { + /* Validate that the message buffer is page aligned and does not overflow. */ + R_UNLESS(util::IsAligned(message, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(buffer_size > 0, svc::ResultInvalidSize()); + R_UNLESS(util::IsAligned(buffer_size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(message < message + buffer_size, svc::ResultInvalidCurrentMemory()); + + /* Get the process page table. */ + auto &page_table = GetCurrentProcess().GetPageTable(); + + /* Lock the mesage buffer. */ + R_TRY(page_table.LockForIpcUserBuffer(nullptr, message, buffer_size)); + + /* Ensure that if we fail, we unlock the message buffer. */ + auto unlock_guard = SCOPE_GUARD { page_table.UnlockForIpcUserBuffer(message, buffer_size); }; + + /* Send the request. */ + MESOSPHERE_ASSERT(message != 0); + R_TRY(SendAsyncRequestWithUserBufferImpl(out_event_handle, message, buffer_size, session_handle)); + + /* We sent the request successfully. */ + unlock_guard.Cancel(); + return ResultSuccess(); + } + + ALWAYS_INLINE Result ReplyAndReceive(int32_t *out_index, KUserPointer handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) { + return ReplyAndReceiveImpl(out_index, 0, 0, Null, handles, num_handles, reply_target, timeout_ns); + } + + ALWAYS_INLINE Result ReplyAndReceiveWithUserBuffer(int32_t *out_index, uintptr_t message, size_t buffer_size, KUserPointer handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) { + /* Validate that the message buffer is page aligned and does not overflow. */ + R_UNLESS(util::IsAligned(message, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(buffer_size > 0, svc::ResultInvalidSize()); + R_UNLESS(util::IsAligned(buffer_size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(message < message + buffer_size, svc::ResultInvalidCurrentMemory()); + + /* Get the process page table. */ + auto &page_table = GetCurrentProcess().GetPageTable(); + + /* Lock the mesage buffer, getting its physical address. */ + KPhysicalAddress message_paddr; + R_TRY(page_table.LockForIpcUserBuffer(std::addressof(message_paddr), message, buffer_size)); + + /* Ensure that even if we fail, we unlock the message buffer when done. */ + auto unlock_guard = SCOPE_GUARD { page_table.UnlockForIpcUserBuffer(message, buffer_size); }; + + /* Send the request. */ + MESOSPHERE_ASSERT(message != 0); + R_TRY(ReplyAndReceiveImpl(out_index, message, buffer_size, message_paddr, handles, num_handles, reply_target, timeout_ns)); + + /* We sent the request successfully, so cancel our guard and check the unlock result. */ + unlock_guard.Cancel(); + return page_table.UnlockForIpcUserBuffer(message, buffer_size); + } } /* ============================= 64 ABI ============================= */ Result SendSyncRequest64(ams::svc::Handle session_handle) { - MESOSPHERE_PANIC("Stubbed SvcSendSyncRequest64 was called."); + return SendSyncRequest(session_handle); } Result SendSyncRequestWithUserBuffer64(ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) { - MESOSPHERE_PANIC("Stubbed SvcSendSyncRequestWithUserBuffer64 was called."); + return SendSyncRequestWithUserBuffer(message_buffer, message_buffer_size, session_handle); } Result SendAsyncRequestWithUserBuffer64(ams::svc::Handle *out_event_handle, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) { - MESOSPHERE_PANIC("Stubbed SvcSendAsyncRequestWithUserBuffer64 was called."); + return SendAsyncRequestWithUserBuffer(out_event_handle, message_buffer, message_buffer_size, session_handle); } Result ReplyAndReceive64(int32_t *out_index, KUserPointer handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) { - MESOSPHERE_PANIC("Stubbed SvcReplyAndReceive64 was called."); + return ReplyAndReceive(out_index, handles, num_handles, reply_target, timeout_ns); } Result ReplyAndReceiveWithUserBuffer64(int32_t *out_index, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, KUserPointer handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) { - MESOSPHERE_PANIC("Stubbed SvcReplyAndReceiveWithUserBuffer64 was called."); + return ReplyAndReceiveWithUserBuffer(out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns); } /* ============================= 64From32 ABI ============================= */ Result SendSyncRequest64From32(ams::svc::Handle session_handle) { - MESOSPHERE_PANIC("Stubbed SvcSendSyncRequest64From32 was called."); + return SendSyncRequest(session_handle); } Result SendSyncRequestWithUserBuffer64From32(ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) { - MESOSPHERE_PANIC("Stubbed SvcSendSyncRequestWithUserBuffer64From32 was called."); + return SendSyncRequestWithUserBuffer(message_buffer, message_buffer_size, session_handle); } Result SendAsyncRequestWithUserBuffer64From32(ams::svc::Handle *out_event_handle, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) { - MESOSPHERE_PANIC("Stubbed SvcSendAsyncRequestWithUserBuffer64From32 was called."); + return SendAsyncRequestWithUserBuffer(out_event_handle, message_buffer, message_buffer_size, session_handle); } Result ReplyAndReceive64From32(int32_t *out_index, KUserPointer handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) { - MESOSPHERE_PANIC("Stubbed SvcReplyAndReceive64From32 was called."); + return ReplyAndReceive(out_index, handles, num_handles, reply_target, timeout_ns); } Result ReplyAndReceiveWithUserBuffer64From32(int32_t *out_index, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, KUserPointer handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) { - MESOSPHERE_PANIC("Stubbed SvcReplyAndReceiveWithUserBuffer64From32 was called."); + return ReplyAndReceiveWithUserBuffer(out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_kernel_debug.cpp b/libraries/libmesosphere/source/svc/kern_svc_kernel_debug.cpp index ced75f3e2..8d18f46e6 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_kernel_debug.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_kernel_debug.cpp @@ -21,28 +21,55 @@ namespace ams::kern::svc { namespace { + void KernelDebug(ams::svc::KernelDebugType kern_debug_type, uint64_t arg0, uint64_t arg1, uint64_t arg2) { + #ifdef ATMOSPHERE_BUILD_FOR_DEBUGGING + { + /* TODO: Implement Kernel Debugging. */ + } + #endif + } + void ChangeKernelTraceState(ams::svc::KernelTraceState kern_trace_state) { + #ifdef ATMOSPHERE_BUILD_FOR_DEBUGGING + { + switch (kern_trace_state) { + case ams::svc::KernelTraceState_Enabled: + { + /* TODO: MESOSPHERE_KTRACE_RESUME(); */ + } + break; + case ams::svc::KernelTraceState_Disabled: + { + /* TODO: MESOSPHERE_KTRACE_PAUSE(); */ + } + break; + default: + break; + } + } + #endif + } } /* ============================= 64 ABI ============================= */ void KernelDebug64(ams::svc::KernelDebugType kern_debug_type, uint64_t arg0, uint64_t arg1, uint64_t arg2) { - MESOSPHERE_PANIC("Stubbed SvcKernelDebug64 was called."); + return KernelDebug(kern_debug_type, arg0, arg1, arg2); } void ChangeKernelTraceState64(ams::svc::KernelTraceState kern_trace_state) { - MESOSPHERE_PANIC("Stubbed SvcChangeKernelTraceState64 was called."); + return ChangeKernelTraceState(kern_trace_state); } /* ============================= 64From32 ABI ============================= */ void KernelDebug64From32(ams::svc::KernelDebugType kern_debug_type, uint64_t arg0, uint64_t arg1, uint64_t arg2) { - MESOSPHERE_PANIC("Stubbed SvcKernelDebug64From32 was called."); + return KernelDebug(kern_debug_type, arg0, arg1, arg2); } void ChangeKernelTraceState64From32(ams::svc::KernelTraceState kern_trace_state) { - MESOSPHERE_PANIC("Stubbed SvcChangeKernelTraceState64From32 was called."); + return ChangeKernelTraceState(kern_trace_state); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_light_ipc.cpp b/libraries/libmesosphere/source/svc/kern_svc_light_ipc.cpp index 8e80e3932..6796e674e 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_light_ipc.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_light_ipc.cpp @@ -21,28 +21,48 @@ namespace ams::kern::svc { namespace { + ALWAYS_INLINE Result SendSyncRequestLight(ams::svc::Handle session_handle, u32 *args) { + /* Get the light client session from its handle. */ + KScopedAutoObject session = GetCurrentProcess().GetHandleTable().GetObject(session_handle); + R_UNLESS(session.IsNotNull(), svc::ResultInvalidHandle()); + /* Send the request. */ + R_TRY(session->SendSyncRequest(args)); + + return ResultSuccess(); + } + + ALWAYS_INLINE Result ReplyAndReceiveLight(ams::svc::Handle session_handle, u32 *args) { + /* Get the light server session from its handle. */ + KScopedAutoObject session = GetCurrentProcess().GetHandleTable().GetObject(session_handle); + R_UNLESS(session.IsNotNull(), svc::ResultInvalidHandle()); + + /* Handle the request. */ + R_TRY(session->ReplyAndReceive(args)); + + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ - Result SendSyncRequestLight64(ams::svc::Handle session_handle) { - MESOSPHERE_PANIC("Stubbed SvcSendSyncRequestLight64 was called."); + Result SendSyncRequestLight64(ams::svc::Handle session_handle, u32 *args) { + return SendSyncRequestLight(session_handle, args); } - Result ReplyAndReceiveLight64(ams::svc::Handle handle) { - MESOSPHERE_PANIC("Stubbed SvcReplyAndReceiveLight64 was called."); + Result ReplyAndReceiveLight64(ams::svc::Handle session_handle, u32 *args) { + return ReplyAndReceiveLight(session_handle, args); } /* ============================= 64From32 ABI ============================= */ - Result SendSyncRequestLight64From32(ams::svc::Handle session_handle) { - MESOSPHERE_PANIC("Stubbed SvcSendSyncRequestLight64From32 was called."); + Result SendSyncRequestLight64From32(ams::svc::Handle session_handle, u32 *args) { + return SendSyncRequestLight(session_handle, args); } - Result ReplyAndReceiveLight64From32(ams::svc::Handle handle) { - MESOSPHERE_PANIC("Stubbed SvcReplyAndReceiveLight64From32 was called."); + Result ReplyAndReceiveLight64From32(ams::svc::Handle session_handle, u32 *args) { + return ReplyAndReceiveLight(session_handle, args); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_lock.cpp b/libraries/libmesosphere/source/svc/kern_svc_lock.cpp index 1c264dcc4..32c6a67b1 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_lock.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_lock.cpp @@ -21,28 +21,46 @@ namespace ams::kern::svc { namespace { + constexpr bool IsKernelAddress(uintptr_t address) { + return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd; + } + Result ArbitrateLock(ams::svc::Handle thread_handle, uintptr_t address, uint32_t tag) { + /* Validate the input address. */ + R_UNLESS(!IsKernelAddress(address), svc::ResultInvalidCurrentMemory()); + R_UNLESS(util::IsAligned(address, sizeof(u32)), svc::ResultInvalidAddress()); + + return GetCurrentProcess().WaitForAddress(thread_handle, address, tag); + } + + Result ArbitrateUnlock(uintptr_t address) { + /* Validate the input address. */ + R_UNLESS(!IsKernelAddress(address), svc::ResultInvalidCurrentMemory()); + R_UNLESS(util::IsAligned(address, sizeof(u32)), svc::ResultInvalidAddress()); + + return GetCurrentProcess().SignalToAddress(address); + } } /* ============================= 64 ABI ============================= */ Result ArbitrateLock64(ams::svc::Handle thread_handle, ams::svc::Address address, uint32_t tag) { - MESOSPHERE_PANIC("Stubbed SvcArbitrateLock64 was called."); + return ArbitrateLock(thread_handle, address, tag); } Result ArbitrateUnlock64(ams::svc::Address address) { - MESOSPHERE_PANIC("Stubbed SvcArbitrateUnlock64 was called."); + return ArbitrateUnlock(address); } /* ============================= 64From32 ABI ============================= */ Result ArbitrateLock64From32(ams::svc::Handle thread_handle, ams::svc::Address address, uint32_t tag) { - MESOSPHERE_PANIC("Stubbed SvcArbitrateLock64From32 was called."); + return ArbitrateLock(thread_handle, address, tag); } Result ArbitrateUnlock64From32(ams::svc::Address address) { - MESOSPHERE_PANIC("Stubbed SvcArbitrateUnlock64From32 was called."); + return ArbitrateUnlock(address); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_memory.cpp index a3770ddf5..fafee3650 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_memory.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_memory.cpp @@ -21,44 +21,139 @@ namespace ams::kern::svc { namespace { + constexpr bool IsValidSetMemoryPermission(ams::svc::MemoryPermission perm) { + switch (perm) { + case ams::svc::MemoryPermission_None: + case ams::svc::MemoryPermission_Read: + case ams::svc::MemoryPermission_ReadWrite: + return true; + default: + return false; + } + } + Result SetMemoryPermission(uintptr_t address, size_t size, ams::svc::MemoryPermission perm) { + /* Validate address / size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + + /* Validate the permission. */ + R_UNLESS(IsValidSetMemoryPermission(perm), svc::ResultInvalidNewMemoryPermission()); + + /* Validate that the region is in range for the current process. */ + auto &page_table = GetCurrentProcess().GetPageTable(); + R_UNLESS(page_table.Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Set the memory attribute. */ + return page_table.SetMemoryPermission(address, size, perm); + } + + Result SetMemoryAttribute(uintptr_t address, size_t size, uint32_t mask, uint32_t attr) { + /* Validate address / size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + + /* Validate the attribute and mask. */ + constexpr u32 SupportedMask = ams::svc::MemoryAttribute_Uncached; + R_UNLESS((mask | attr) == mask, svc::ResultInvalidCombination()); + R_UNLESS((mask | attr | SupportedMask) == SupportedMask, svc::ResultInvalidCombination()); + + /* Validate that the region is in range for the current process. */ + auto &page_table = GetCurrentProcess().GetPageTable(); + R_UNLESS(page_table.Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Set the memory attribute. */ + return page_table.SetMemoryAttribute(address, size, mask, attr); + } + + Result MapMemory(uintptr_t dst_address, uintptr_t src_address, size_t size) { + /* Validate that addresses are page aligned. */ + R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress()); + + /* Validate that size is positive and page aligned. */ + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + + /* Ensure that neither mapping overflows. */ + R_UNLESS(src_address < src_address + size, svc::ResultInvalidCurrentMemory()); + R_UNLESS(dst_address < dst_address + size, svc::ResultInvalidCurrentMemory()); + + /* Get the page table we're operating on. */ + auto &page_table = GetCurrentProcess().GetPageTable(); + + /* Ensure that the memory we're mapping is in range. */ + R_UNLESS(page_table.Contains(src_address, size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(page_table.CanContain(dst_address, size, KMemoryState_Stack), svc::ResultInvalidMemoryRegion()); + + /* Map the memory. */ + return page_table.MapMemory(dst_address, src_address, size); + } + + Result UnmapMemory(uintptr_t dst_address, uintptr_t src_address, size_t size) { + /* Validate that addresses are page aligned. */ + R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress()); + + /* Validate that size is positive and page aligned. */ + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + + /* Ensure that neither mapping overflows. */ + R_UNLESS(src_address < src_address + size, svc::ResultInvalidCurrentMemory()); + R_UNLESS(dst_address < dst_address + size, svc::ResultInvalidCurrentMemory()); + + /* Get the page table we're operating on. */ + auto &page_table = GetCurrentProcess().GetPageTable(); + + /* Ensure that the memory we're unmapping is in range. */ + R_UNLESS(page_table.Contains(src_address, size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(page_table.CanContain(dst_address, size, KMemoryState_Stack), svc::ResultInvalidMemoryRegion()); + + /* Unmap the memory. */ + return page_table.UnmapMemory(dst_address, src_address, size); + } } /* ============================= 64 ABI ============================= */ Result SetMemoryPermission64(ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission perm) { - MESOSPHERE_PANIC("Stubbed SvcSetMemoryPermission64 was called."); + return SetMemoryPermission(address, size, perm); } Result SetMemoryAttribute64(ams::svc::Address address, ams::svc::Size size, uint32_t mask, uint32_t attr) { - MESOSPHERE_PANIC("Stubbed SvcSetMemoryAttribute64 was called."); + return SetMemoryAttribute(address, size, mask, attr); } Result MapMemory64(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcMapMemory64 was called."); + return MapMemory(dst_address, src_address, size); } Result UnmapMemory64(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapMemory64 was called."); + return UnmapMemory(dst_address, src_address, size); } /* ============================= 64From32 ABI ============================= */ Result SetMemoryPermission64From32(ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission perm) { - MESOSPHERE_PANIC("Stubbed SvcSetMemoryPermission64From32 was called."); + return SetMemoryPermission(address, size, perm); } Result SetMemoryAttribute64From32(ams::svc::Address address, ams::svc::Size size, uint32_t mask, uint32_t attr) { - MESOSPHERE_PANIC("Stubbed SvcSetMemoryAttribute64From32 was called."); + return SetMemoryAttribute(address, size, mask, attr); } Result MapMemory64From32(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcMapMemory64From32 was called."); + return MapMemory(dst_address, src_address, size); } Result UnmapMemory64From32(ams::svc::Address dst_address, ams::svc::Address src_address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapMemory64From32 was called."); + return UnmapMemory(dst_address, src_address, size); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_physical_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_physical_memory.cpp index c7cb9a7c6..1f97e4aca 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_physical_memory.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_physical_memory.cpp @@ -21,6 +21,22 @@ namespace ams::kern::svc { namespace { + Result SetHeapSize(uintptr_t *out_address, size_t size) { + MESOSPHERE_LOG("%s: SetHeapSize(%012zx)\n", GetCurrentProcess().GetName(), size); + + /* Validate size. */ + R_UNLESS(util::IsAligned(size, ams::svc::HeapSizeAlignment), svc::ResultInvalidSize()); + R_UNLESS(size < ams::kern::MainMemorySize, svc::ResultInvalidSize()); + + /* Set the heap size. */ + KProcessAddress address; + R_TRY(GetCurrentProcess().GetPageTable().SetHeapSize(std::addressof(address), size)); + + /* Set the output. */ + *out_address = GetInteger(address); + return ResultSuccess(); + } + Result SetUnsafeLimit(size_t limit) { /* Ensure the size is aligned. */ R_UNLESS(util::IsAligned(limit, PageSize), svc::ResultInvalidSize()); @@ -32,28 +48,110 @@ namespace ams::kern::svc { return Kernel::GetUnsafeMemory().SetLimitSize(limit); } + Result MapPhysicalMemory(uintptr_t address, size_t size) { + /* Validate address / size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion()); + + /* Verify that the process has system resource. */ + auto &process = GetCurrentProcess(); + R_UNLESS(process.GetTotalSystemResourceSize() > 0, svc::ResultInvalidState()); + + /* Verify that the region is in range. */ + auto &page_table = process.GetPageTable(); + R_UNLESS(page_table.IsInAliasRegion(address, size), svc::ResultInvalidMemoryRegion()); + + /* Map the memory. */ + R_TRY(page_table.MapPhysicalMemory(address, size)); + + return ResultSuccess(); + } + + Result UnmapPhysicalMemory(uintptr_t address, size_t size) { + /* Validate address / size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidMemoryRegion()); + + /* Verify that the process has system resource. */ + auto &process = GetCurrentProcess(); + R_UNLESS(process.GetTotalSystemResourceSize() > 0, svc::ResultInvalidState()); + + /* Verify that the region is in range. */ + auto &page_table = process.GetPageTable(); + R_UNLESS(page_table.IsInAliasRegion(address, size), svc::ResultInvalidMemoryRegion()); + + /* Unmap the memory. */ + R_TRY(page_table.UnmapPhysicalMemory(address, size)); + + return ResultSuccess(); + } + + Result MapPhysicalMemoryUnsafe(uintptr_t address, size_t size) { + /* Validate address / size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + + /* Verify that the region is in range. */ + auto &process = GetCurrentProcess(); + auto &page_table = process.GetPageTable(); + R_UNLESS(page_table.IsInUnsafeAliasRegion(address, size), svc::ResultInvalidMemoryRegion()); + + /* Verify that the process isn't already using the unsafe pool. */ + R_UNLESS(process.GetMemoryPool() != KMemoryManager::Pool_Unsafe, svc::ResultInvalidMemoryPool()); + + /* Map the memory. */ + R_TRY(page_table.MapPhysicalMemoryUnsafe(address, size)); + + return ResultSuccess(); + } + + Result UnmapPhysicalMemoryUnsafe(uintptr_t address, size_t size) { + /* Validate address / size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + + /* Verify that the region is in range. */ + auto &process = GetCurrentProcess(); + auto &page_table = process.GetPageTable(); + R_UNLESS(page_table.IsInUnsafeAliasRegion(address, size), svc::ResultInvalidMemoryRegion()); + + /* Unmap the memory. */ + R_TRY(page_table.UnmapPhysicalMemoryUnsafe(address, size)); + + return ResultSuccess(); + } + } /* ============================= 64 ABI ============================= */ Result SetHeapSize64(ams::svc::Address *out_address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcSetHeapSize64 was called."); + static_assert(sizeof(*out_address) == sizeof(uintptr_t)); + return SetHeapSize(reinterpret_cast(out_address), size); } Result MapPhysicalMemory64(ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcMapPhysicalMemory64 was called."); + return MapPhysicalMemory(address, size); } Result UnmapPhysicalMemory64(ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapPhysicalMemory64 was called."); + return UnmapPhysicalMemory(address, size); } Result MapPhysicalMemoryUnsafe64(ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcMapPhysicalMemoryUnsafe64 was called."); + return MapPhysicalMemoryUnsafe(address, size); } Result UnmapPhysicalMemoryUnsafe64(ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapPhysicalMemoryUnsafe64 was called."); + return UnmapPhysicalMemoryUnsafe(address, size); } Result SetUnsafeLimit64(ams::svc::Size limit) { @@ -63,23 +161,24 @@ namespace ams::kern::svc { /* ============================= 64From32 ABI ============================= */ Result SetHeapSize64From32(ams::svc::Address *out_address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcSetHeapSize64From32 was called."); + static_assert(sizeof(*out_address) == sizeof(uintptr_t)); + return SetHeapSize(reinterpret_cast(out_address), size); } Result MapPhysicalMemory64From32(ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcMapPhysicalMemory64From32 was called."); + return MapPhysicalMemory(address, size); } Result UnmapPhysicalMemory64From32(ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapPhysicalMemory64From32 was called."); + return UnmapPhysicalMemory(address, size); } Result MapPhysicalMemoryUnsafe64From32(ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcMapPhysicalMemoryUnsafe64From32 was called."); + return MapPhysicalMemoryUnsafe(address, size); } Result UnmapPhysicalMemoryUnsafe64From32(ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapPhysicalMemoryUnsafe64From32 was called."); + return UnmapPhysicalMemoryUnsafe(address, size); } Result SetUnsafeLimit64From32(ams::svc::Size limit) { diff --git a/libraries/libmesosphere/source/svc/kern_svc_port.cpp b/libraries/libmesosphere/source/svc/kern_svc_port.cpp index 2aa297d0e..3b802f27e 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_port.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_port.cpp @@ -31,7 +31,6 @@ namespace ams::kern::svc { R_UNLESS(name[sizeof(name) - 1] == '\x00', svc::ResultOutOfRange()); if (max_sessions > 0) { - MESOSPHERE_LOG("Creating Named Port %s (max sessions = %d)\n", name, max_sessions); /* Get the current handle table. */ auto &handle_table = GetCurrentProcess().GetHandleTable(); @@ -47,7 +46,7 @@ namespace ams::kern::svc { port->Initialize(max_sessions, false, 0); /* Register the port. */ - KPort::Register(port); + R_TRY(KPort::Register(port)); /* Register the handle in the table. */ handle_table.Register(*out_server_handle, std::addressof(port->GetServerPort())); @@ -62,8 +61,6 @@ namespace ams::kern::svc { port->GetClientPort().Close(); register_guard.Cancel(); } else /* if (max_sessions == 0) */ { - MESOSPHERE_LOG("Deleting Named Port %s\n", name); - /* Ensure that this else case is correct. */ MESOSPHERE_AUDIT(max_sessions == 0); @@ -77,16 +74,119 @@ namespace ams::kern::svc { return ResultSuccess(); } + Result CreatePort(ams::svc::Handle *out_server, ams::svc::Handle *out_client, int32_t max_sessions, bool is_light, uintptr_t name) { + /* Ensure max sessions is valid. */ + R_UNLESS(max_sessions > 0, svc::ResultOutOfRange()); + + /* Get the current handle table. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); + + /* Create a new port. */ + KPort *port = KPort::Create(); + R_UNLESS(port != nullptr, svc::ResultOutOfResource()); + + /* Initialize the port. */ + port->Initialize(max_sessions, is_light, name); + + /* Ensure that we clean up the port (and its only references are handle table) on function end. */ + ON_SCOPE_EXIT { + port->GetServerPort().Close(); + port->GetClientPort().Close(); + }; + + /* Register the port. */ + R_TRY(KPort::Register(port)); + + /* Add the client to the handle table. */ + R_TRY(handle_table.Add(out_client, std::addressof(port->GetClientPort()))); + + /* Ensure that we maintaing a clean handle state on exit. */ + auto handle_guard = SCOPE_GUARD { handle_table.Remove(*out_client); }; + + /* Add the server to the handle table. */ + R_TRY(handle_table.Add(out_server, std::addressof(port->GetServerPort()))); + + /* We succeeded! */ + handle_guard.Cancel(); + return ResultSuccess(); + } + + Result ConnectToNamedPort(ams::svc::Handle *out, KUserPointer user_name) { + /* Copy the provided name from user memory to kernel memory. */ + char name[KObjectName::NameLengthMax] = {}; + R_TRY(user_name.CopyStringTo(name, sizeof(name))); + + /* Validate that name is valid. */ + R_UNLESS(name[sizeof(name) - 1] == '\x00', svc::ResultOutOfRange()); + + /* Get the current handle table. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); + + /* Find the client port. */ + auto port = KObjectName::Find(name); + R_UNLESS(port.IsNotNull(), svc::ResultNotFound()); + + /* Reserve a handle for the port. */ + /* NOTE: Nintendo really does write directly to the output handle here. */ + R_TRY(handle_table.Reserve(out)); + auto handle_guard = SCOPE_GUARD { handle_table.Unreserve(*out); }; + + /* Create a session. */ + KClientSession *session; + R_TRY(port->CreateSession(std::addressof(session))); + + /* Register the session in the table, close the extra reference. */ + handle_table.Register(*out, session); + session->Close(); + + /* We succeeded. */ + handle_guard.Cancel(); + return ResultSuccess(); + } + + Result ConnectToPort(ams::svc::Handle *out, ams::svc::Handle port) { + /* Get the current handle table. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); + + /* Get the client port. */ + KScopedAutoObject client_port = handle_table.GetObject(port); + R_UNLESS(client_port.IsNotNull(), svc::ResultInvalidHandle()); + + /* Reserve a handle for the port. */ + /* NOTE: Nintendo really does write directly to the output handle here. */ + R_TRY(handle_table.Reserve(out)); + auto handle_guard = SCOPE_GUARD { handle_table.Unreserve(*out); }; + + /* Create and register session. */ + if (client_port->IsLight()) { + KLightClientSession *session; + R_TRY(client_port->CreateLightSession(std::addressof(session))); + + handle_table.Register(*out, session); + session->Close(); + } else { + KClientSession *session; + R_TRY(client_port->CreateSession(std::addressof(session))); + + handle_table.Register(*out, session); + session->Close(); + } + + /* We succeeded. */ + handle_guard.Cancel(); + return ResultSuccess(); + } + } /* ============================= 64 ABI ============================= */ Result ConnectToNamedPort64(ams::svc::Handle *out_handle, KUserPointer name) { - MESOSPHERE_PANIC("Stubbed SvcConnectToNamedPort64 was called."); + return ConnectToNamedPort(out_handle, name); } Result CreatePort64(ams::svc::Handle *out_server_handle, ams::svc::Handle *out_client_handle, int32_t max_sessions, bool is_light, ams::svc::Address name) { - MESOSPHERE_PANIC("Stubbed SvcCreatePort64 was called."); + return CreatePort(out_server_handle, out_client_handle, max_sessions, is_light, name); } Result ManageNamedPort64(ams::svc::Handle *out_server_handle, KUserPointer name, int32_t max_sessions) { @@ -94,17 +194,17 @@ namespace ams::kern::svc { } Result ConnectToPort64(ams::svc::Handle *out_handle, ams::svc::Handle port) { - MESOSPHERE_PANIC("Stubbed SvcConnectToPort64 was called."); + return ConnectToPort(out_handle, port); } /* ============================= 64From32 ABI ============================= */ Result ConnectToNamedPort64From32(ams::svc::Handle *out_handle, KUserPointer name) { - MESOSPHERE_PANIC("Stubbed SvcConnectToNamedPort64From32 was called."); + return ConnectToNamedPort(out_handle, name); } Result CreatePort64From32(ams::svc::Handle *out_server_handle, ams::svc::Handle *out_client_handle, int32_t max_sessions, bool is_light, ams::svc::Address name) { - MESOSPHERE_PANIC("Stubbed SvcCreatePort64From32 was called."); + return CreatePort(out_server_handle, out_client_handle, max_sessions, is_light, name); } Result ManageNamedPort64From32(ams::svc::Handle *out_server_handle, KUserPointer name, int32_t max_sessions) { @@ -112,7 +212,7 @@ namespace ams::kern::svc { } Result ConnectToPort64From32(ams::svc::Handle *out_handle, ams::svc::Handle port) { - MESOSPHERE_PANIC("Stubbed SvcConnectToPort64From32 was called."); + return ConnectToPort(out_handle, port); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_power_management.cpp b/libraries/libmesosphere/source/svc/kern_svc_power_management.cpp index 41b0b8e87..34cbd32cb 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_power_management.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_power_management.cpp @@ -21,20 +21,22 @@ namespace ams::kern::svc { namespace { - + void SleepSystem() { + return KSystemControl::SleepSystem(); + } } /* ============================= 64 ABI ============================= */ void SleepSystem64() { - MESOSPHERE_PANIC("Stubbed SvcSleepSystem64 was called."); + return SleepSystem(); } /* ============================= 64From32 ABI ============================= */ void SleepSystem64From32() { - MESOSPHERE_PANIC("Stubbed SvcSleepSystem64From32 was called."); + return SleepSystem(); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_process.cpp b/libraries/libmesosphere/source/svc/kern_svc_process.cpp index 4bf47cdda..5369409ee 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_process.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_process.cpp @@ -21,6 +21,15 @@ namespace ams::kern::svc { namespace { + constexpr bool IsValidCoreId(int32_t core_id) { + return (0 <= core_id && core_id < static_cast(cpu::NumCores)); + } + + void ExitProcess() { + GetCurrentProcess().Exit(); + MESOSPHERE_PANIC("Process survived call to exit"); + } + Result GetProcessId(u64 *out_process_id, ams::svc::Handle handle) { /* Get the object from the handle table. */ KScopedAutoObject obj = GetCurrentProcess().GetHandleTable().GetObject(handle); @@ -28,15 +37,19 @@ namespace ams::kern::svc { /* Get the process from the object. */ KProcess *process = nullptr; - if (obj->IsDerivedFrom(KProcess::GetStaticTypeObj())) { + if (KProcess *p = obj->DynamicCast(); p != nullptr) { /* The object is a process, so we can use it directly. */ - process = reinterpret_cast(obj.GetPointerUnsafe()); - } else if (obj->IsDerivedFrom(KThread::GetStaticTypeObj())) { + process = p; + } else if (KThread *t = obj->DynamicCast(); t != nullptr) { /* The object is a thread, so we want to use its parent. */ process = reinterpret_cast(obj.GetPointerUnsafe())->GetOwnerProcess(); - } else if (obj->IsDerivedFrom(KDebug::GetStaticTypeObj())) { + } else if (KDebug *d = obj->DynamicCast(); d != nullptr) { /* The object is a debug, so we want to use the process it's attached to. */ - MESOSPHERE_UNIMPLEMENTED(); + obj = d->GetProcess(); + + if (obj.IsNotNull()) { + process = static_cast(obj.GetPointerUnsafe()); + } } /* Make sure the target process exists. */ @@ -47,14 +60,300 @@ namespace ams::kern::svc { return ResultSuccess(); } + Result GetProcessList(int32_t *out_num_processes, KUserPointer out_process_ids, int32_t max_out_count) { + /* Validate that the out count is valid. */ + R_UNLESS((0 <= max_out_count && max_out_count <= static_cast(std::numeric_limits::max() / sizeof(u64))), svc::ResultOutOfRange()); + /* Validate that the pointer is in range. */ + if (max_out_count > 0) { + R_UNLESS(GetCurrentProcess().GetPageTable().Contains(KProcessAddress(out_process_ids.GetUnsafePointer()), max_out_count * sizeof(u64)), svc::ResultInvalidCurrentMemory()); + } + + /* Get the process list. */ + return KProcess::GetProcessList(out_num_processes, out_process_ids, max_out_count); + } + + Result CreateProcess(ams::svc::Handle *out, const ams::svc::CreateProcessParameter ¶ms, KUserPointer user_caps, int32_t num_caps) { + /* Validate the capabilities pointer. */ + R_UNLESS(num_caps >= 0, svc::ResultInvalidPointer()); + if (num_caps > 0) { + /* Check for overflow. */ + R_UNLESS(((num_caps * sizeof(u32)) / sizeof(u32)) == static_cast(num_caps), svc::ResultInvalidPointer()); + + /* Validate that the pointer is in range. */ + R_UNLESS(GetCurrentProcess().GetPageTable().Contains(KProcessAddress(user_caps.GetUnsafePointer()), num_caps * sizeof(u32)), svc::ResultInvalidPointer()); + } + + /* Validate that the parameter flags are valid. */ + R_UNLESS((params.flags & ~ams::svc::CreateProcessFlag_All) == 0, svc::ResultInvalidEnumValue()); + + /* Validate that 64-bit process is okay. */ + const bool is_64_bit = (params.flags & ams::svc::CreateProcessFlag_Is64Bit) != 0; + if constexpr (sizeof(void *) < sizeof(u64)) { + R_UNLESS(!is_64_bit, svc::ResultInvalidCombination()); + } + + /* Decide on an address space map region. */ + uintptr_t map_start, map_end; + size_t map_size; + switch (params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask) { + case ams::svc::CreateProcessFlag_AddressSpace32Bit: + case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias: + { + map_start = KAddressSpaceInfo::GetAddressSpaceStart(32, KAddressSpaceInfo::Type_MapSmall); + map_size = KAddressSpaceInfo::GetAddressSpaceSize(32, KAddressSpaceInfo::Type_MapSmall); + map_end = map_start + map_size; + } + break; + case ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated: + { + /* 64-bit address space requires 64-bit process. */ + R_UNLESS(is_64_bit, svc::ResultInvalidCombination()); + + map_start = KAddressSpaceInfo::GetAddressSpaceStart(36, KAddressSpaceInfo::Type_MapSmall); + map_size = KAddressSpaceInfo::GetAddressSpaceSize(36, KAddressSpaceInfo::Type_MapSmall); + map_end = map_start + map_size; + } + break; + case ams::svc::CreateProcessFlag_AddressSpace64Bit: + { + /* 64-bit address space requires 64-bit process. */ + R_UNLESS(is_64_bit, svc::ResultInvalidCombination()); + + map_start = KAddressSpaceInfo::GetAddressSpaceStart(39, KAddressSpaceInfo::Type_Map39Bit); + map_end = map_start + KAddressSpaceInfo::GetAddressSpaceSize(39, KAddressSpaceInfo::Type_Map39Bit); + + map_size = KAddressSpaceInfo::GetAddressSpaceSize(39, KAddressSpaceInfo::Type_Heap); + } + break; + default: + return svc::ResultInvalidEnumValue(); + } + + /* Validate the pool partition. */ + /* TODO: 4.0.0 UseSecureMemory flag, pre-4.0.0 behavior. */ + switch (params.flags & ams::svc::CreateProcessFlag_PoolPartitionMask) { + case ams::svc::CreateProcessFlag_PoolPartitionApplication: + case ams::svc::CreateProcessFlag_PoolPartitionApplet: + case ams::svc::CreateProcessFlag_PoolPartitionSystem: + case ams::svc::CreateProcessFlag_PoolPartitionSystemNonSecure: + break; + default: + return svc::ResultInvalidEnumValue(); + } + + /* Check that the code address is aligned. */ + R_UNLESS(util::IsAligned(params.code_address, KProcess::AslrAlignment), svc::ResultInvalidAddress()); + + /* Check that the number of code pages is >= 0. */ + R_UNLESS(params.code_num_pages >= 0, svc::ResultInvalidSize()); + + /* Check that the number of extra resource pages is >= 0. */ + R_UNLESS(params.system_resource_num_pages >= 0, svc::ResultInvalidSize()); + + /* Convert to sizes. */ + const size_t code_num_pages = params.code_num_pages; + const size_t system_resource_num_pages = params.system_resource_num_pages; + const size_t total_pages = code_num_pages + system_resource_num_pages; + const size_t code_size = code_num_pages * PageSize; + const size_t system_resource_size = system_resource_num_pages * PageSize; + const size_t total_size = code_size + system_resource_size; + + /* Check for overflow. */ + R_UNLESS((code_size / PageSize) == code_num_pages, svc::ResultInvalidSize()); + R_UNLESS((system_resource_size / PageSize) == system_resource_num_pages, svc::ResultInvalidSize()); + R_UNLESS((code_num_pages + system_resource_num_pages) >= code_num_pages, svc::ResultOutOfMemory()); + R_UNLESS((total_size / PageSize) == total_pages, svc::ResultInvalidSize()); + + /* Check that the number of pages is valid. */ + R_UNLESS(code_num_pages < (map_size / PageSize), svc::ResultInvalidMemoryRegion()); + + /* Validate that the code falls within the map reigon. */ + R_UNLESS(map_start <= params.code_address, svc::ResultInvalidMemoryRegion()); + R_UNLESS(params.code_address < params.code_address + code_size, svc::ResultInvalidMemoryRegion()); + R_UNLESS(params.code_address + code_size - 1 <= map_end - 1, svc::ResultInvalidMemoryRegion()); + + /* Check that the number of pages is valid for the kernel address space. */ + R_UNLESS(code_num_pages < (kern::MainMemorySize / PageSize), svc::ResultOutOfMemory()); + R_UNLESS(system_resource_num_pages < (kern::MainMemorySize / PageSize), svc::ResultOutOfMemory()); + R_UNLESS(total_pages < (kern::MainMemorySize / PageSize), svc::ResultOutOfMemory()); + + /* Check that optimized memory allocation is used only for applications. */ + const bool optimize_allocs = (params.flags & ams::svc::CreateProcessFlag_OptimizeMemoryAllocation) != 0; + const bool is_application = (params.flags & ams::svc::CreateProcessFlag_IsApplication) != 0; + R_UNLESS(!optimize_allocs || is_application, svc::ResultBusy()); + + /* Get the current handle table. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); + + /* Create the new process. */ + KProcess *process = KProcess::Create(); + R_UNLESS(process != nullptr, svc::ResultOutOfResource()); + + /* Ensure that the only reference to the process is in the handle table when we're done. */ + ON_SCOPE_EXIT { process->Close(); }; + + /* Get the resource limit from the handle. */ + KScopedAutoObject resource_limit = handle_table.GetObject(params.reslimit); + R_UNLESS(resource_limit.IsNotNull() || params.reslimit == ams::svc::InvalidHandle, svc::ResultInvalidHandle()); + + /* Decide on a resource limit for the process. */ + KResourceLimit *process_resource_limit = resource_limit.IsNotNull() ? resource_limit.GetPointerUnsafe() : std::addressof(Kernel::GetSystemResourceLimit()); + + /* Get the pool for the process. */ + /* TODO: 4.0.0 UseSecureMemory flag, pre-4.0.0 behavior. */ + KMemoryManager::Pool pool; + switch (params.flags & ams::svc::CreateProcessFlag_PoolPartitionMask) { + case ams::svc::CreateProcessFlag_PoolPartitionApplication: + pool = KMemoryManager::Pool_Application; + break; + case ams::svc::CreateProcessFlag_PoolPartitionApplet: + pool = KMemoryManager::Pool_Applet; + break; + case ams::svc::CreateProcessFlag_PoolPartitionSystem: + pool = KMemoryManager::Pool_System; + break; + case ams::svc::CreateProcessFlag_PoolPartitionSystemNonSecure: + default: + pool = KMemoryManager::Pool_SystemNonSecure; + break; + } + + /* Initialize the process. */ + R_TRY(process->Initialize(params, user_caps, num_caps, process_resource_limit, pool)); + + /* Register the process. */ + R_TRY(KProcess::Register(process)); + + /* Add the process to the handle table. */ + R_TRY(handle_table.Add(out, process)); + + return ResultSuccess(); + } + + template + Result CreateProcess(ams::svc::Handle *out, KUserPointer user_parameters, KUserPointer user_caps, int32_t num_caps) { + /* Read the parameters from user space. */ + T params; + R_TRY(user_parameters.CopyTo(std::addressof(params))); + + /* Invoke the implementation. */ + if constexpr (std::same_as) { + return CreateProcess(out, params, user_caps, num_caps); + } else { + /* Convert the parameters. */ + ams::svc::CreateProcessParameter converted_params; + static_assert(sizeof(T{}.name) == sizeof(ams::svc::CreateProcessParameter{}.name)); + + std::memcpy(converted_params.name, params.name, sizeof(converted_params.name)); + converted_params.version = params.version; + converted_params.program_id = params.program_id; + converted_params.code_address = params.code_address; + converted_params.code_num_pages = params.code_num_pages; + converted_params.flags = params.flags; + converted_params.reslimit = params.reslimit; + converted_params.system_resource_num_pages = params.system_resource_num_pages; + + /* Invoke. */ + return CreateProcess(out, converted_params, user_caps, num_caps); + } + } + + Result StartProcess(ams::svc::Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size) { + /* Validate stack size. */ + R_UNLESS(main_thread_stack_size == static_cast(main_thread_stack_size), svc::ResultOutOfMemory()); + + /* Get the target process. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Validate the core id. */ + R_UNLESS(IsValidCoreId(core_id), svc::ResultInvalidCoreId()); + R_UNLESS(((1ul << core_id) & process->GetCoreMask()) != 0, svc::ResultInvalidCoreId()); + + /* Validate the priority. */ + R_UNLESS(ams::svc::HighestThreadPriority <= priority && priority <= ams::svc::LowestThreadPriority, svc::ResultInvalidPriority()); + R_UNLESS(process->CheckThreadPriority(priority), svc::ResultInvalidPriority()); + + /* Set the process's ideal processor. */ + process->SetIdealCoreId(core_id); + + /* Run the process. */ + R_TRY(process->Run(priority, static_cast(main_thread_stack_size))); + + /* Open a reference to the process, since it's now running. */ + process->Open(); + + return ResultSuccess(); + } + + Result TerminateProcess(ams::svc::Handle process_handle) { + /* Get the target process. */ + KProcess *process = GetCurrentProcess().GetHandleTable().GetObject(process_handle).ReleasePointerUnsafe(); + R_UNLESS(process != nullptr, svc::ResultInvalidHandle()); + + if (process != GetCurrentProcessPointer()) { + /* We're terminating another process. Close our reference after terminating the process. */ + ON_SCOPE_EXIT { process->Close(); }; + + /* Terminate the process. */ + R_TRY(process->Terminate()); + } else { + /* We're terminating ourselves. Close our reference immediately. */ + process->Close(); + + /* Exit. */ + ExitProcess(); + } + + return ResultSuccess(); + } + + Result GetProcessInfo(int64_t *out, ams::svc::Handle process_handle, ams::svc::ProcessInfoType info_type) { + /* Get the target process. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the info. */ + switch (info_type) { + case ams::svc::ProcessInfoType_ProcessState: + { + /* Get the process's state. */ + KProcess::State state; + { + KScopedLightLock proc_lk(process->GetStateLock()); + KScopedSchedulerLock sl; + + state = process->GetState(); + } + + /* Convert to svc state. */ + switch (state) { + case KProcess::State_Created: *out = ams::svc::ProcessState_Created; break; + case KProcess::State_CreatedAttached: *out = ams::svc::ProcessState_CreatedAttached; break; + case KProcess::State_Running: *out = ams::svc::ProcessState_Running; break; + case KProcess::State_Crashed: *out = ams::svc::ProcessState_Crashed; break; + case KProcess::State_RunningAttached: *out = ams::svc::ProcessState_RunningAttached; break; + case KProcess::State_Terminating: *out = ams::svc::ProcessState_Terminating; break; + case KProcess::State_Terminated: *out = ams::svc::ProcessState_Terminated; break; + case KProcess::State_DebugBreak: *out = ams::svc::ProcessState_DebugBreak; break; + MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); + } + } + break; + default: + return svc::ResultInvalidEnumValue(); + } + + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ void ExitProcess64() { - MESOSPHERE_PANIC("Stubbed SvcExitProcess64 was called."); + return ExitProcess(); } Result GetProcessId64(uint64_t *out_process_id, ams::svc::Handle process_handle) { @@ -62,29 +361,29 @@ namespace ams::kern::svc { } Result GetProcessList64(int32_t *out_num_processes, KUserPointer out_process_ids, int32_t max_out_count) { - MESOSPHERE_PANIC("Stubbed SvcGetProcessList64 was called."); + return GetProcessList(out_num_processes, out_process_ids, max_out_count); } Result CreateProcess64(ams::svc::Handle *out_handle, KUserPointer parameters, KUserPointer caps, int32_t num_caps) { - MESOSPHERE_PANIC("Stubbed SvcCreateProcess64 was called."); + return CreateProcess(out_handle, parameters, caps, num_caps); } Result StartProcess64(ams::svc::Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size) { - MESOSPHERE_PANIC("Stubbed SvcStartProcess64 was called."); + return StartProcess(process_handle, priority, core_id, main_thread_stack_size); } Result TerminateProcess64(ams::svc::Handle process_handle) { - MESOSPHERE_PANIC("Stubbed SvcTerminateProcess64 was called."); + return TerminateProcess(process_handle); } Result GetProcessInfo64(int64_t *out_info, ams::svc::Handle process_handle, ams::svc::ProcessInfoType info_type) { - MESOSPHERE_PANIC("Stubbed SvcGetProcessInfo64 was called."); + return GetProcessInfo(out_info, process_handle, info_type); } /* ============================= 64From32 ABI ============================= */ void ExitProcess64From32() { - MESOSPHERE_PANIC("Stubbed SvcExitProcess64From32 was called."); + return ExitProcess(); } Result GetProcessId64From32(uint64_t *out_process_id, ams::svc::Handle process_handle) { @@ -92,23 +391,23 @@ namespace ams::kern::svc { } Result GetProcessList64From32(int32_t *out_num_processes, KUserPointer out_process_ids, int32_t max_out_count) { - MESOSPHERE_PANIC("Stubbed SvcGetProcessList64From32 was called."); + return GetProcessList(out_num_processes, out_process_ids, max_out_count); } Result CreateProcess64From32(ams::svc::Handle *out_handle, KUserPointer parameters, KUserPointer caps, int32_t num_caps) { - MESOSPHERE_PANIC("Stubbed SvcCreateProcess64From32 was called."); + return CreateProcess(out_handle, parameters, caps, num_caps); } Result StartProcess64From32(ams::svc::Handle process_handle, int32_t priority, int32_t core_id, uint64_t main_thread_stack_size) { - MESOSPHERE_PANIC("Stubbed SvcStartProcess64From32 was called."); + return StartProcess(process_handle, priority, core_id, main_thread_stack_size); } Result TerminateProcess64From32(ams::svc::Handle process_handle) { - MESOSPHERE_PANIC("Stubbed SvcTerminateProcess64From32 was called."); + return TerminateProcess(process_handle); } Result GetProcessInfo64From32(int64_t *out_info, ams::svc::Handle process_handle, ams::svc::ProcessInfoType info_type) { - MESOSPHERE_PANIC("Stubbed SvcGetProcessInfo64From32 was called."); + return GetProcessInfo(out_info, process_handle, info_type); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_process_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_process_memory.cpp index 2e6348493..bd8f8cb0c 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_process_memory.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_process_memory.cpp @@ -21,52 +21,224 @@ namespace ams::kern::svc { namespace { + constexpr bool IsValidProcessMemoryPermission(ams::svc::MemoryPermission perm) { + switch (perm) { + case ams::svc::MemoryPermission_None: + case ams::svc::MemoryPermission_Read: + case ams::svc::MemoryPermission_ReadWrite: + case ams::svc::MemoryPermission_ReadExecute: + return true; + default: + return false; + } + } + Result SetProcessMemoryPermission(ams::svc::Handle process_handle, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) { + /* Validate the address/size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(address == static_cast(address), svc::ResultInvalidCurrentMemory()); + R_UNLESS(size == static_cast(size), svc::ResultInvalidCurrentMemory()); + + /* Validate the memory permission. */ + R_UNLESS(IsValidProcessMemoryPermission(perm), svc::ResultInvalidNewMemoryPermission()); + + /* Get the process from its handle. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Validate that the address is in range. */ + auto &page_table = process->GetPageTable(); + R_UNLESS(page_table.Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Set the memory permission. */ + return page_table.SetProcessMemoryPermission(address, size, perm); + } + + Result MapProcessMemory(uintptr_t dst_address, ams::svc::Handle process_handle, uint64_t src_address, size_t size) { + /* Validate the address/size. */ + R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((dst_address < dst_address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS((src_address < src_address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(src_address == static_cast(src_address), svc::ResultInvalidCurrentMemory()); + + /* Get the processes. */ + KProcess *dst_process = GetCurrentProcessPointer(); + KScopedAutoObject src_process = dst_process->GetHandleTable().GetObjectWithoutPseudoHandle(process_handle); + R_UNLESS(src_process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the page tables. */ + auto &dst_pt = dst_process->GetPageTable(); + auto &src_pt = src_process->GetPageTable(); + + /* Validate that the mapping is in range. */ + R_UNLESS(src_pt.Contains(src_address, size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState_SharedCode), svc::ResultInvalidMemoryRegion()); + + /* Create a new page group. */ + KPageGroup pg(dst_pt.GetBlockInfoManager()); + + /* Make the page group. */ + R_TRY(src_pt.MakeAndOpenPageGroup(std::addressof(pg), + src_address, size / PageSize, + KMemoryState_FlagCanMapProcess, KMemoryState_FlagCanMapProcess, + KMemoryPermission_None, KMemoryPermission_None, + KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Close the page group when we're done. */ + ON_SCOPE_EXIT { pg.Close(); }; + + /* Map the group. */ + R_TRY(dst_pt.MapPageGroup(dst_address, pg, KMemoryState_SharedCode, KMemoryPermission_UserReadWrite)); + + return ResultSuccess(); + } + + Result UnmapProcessMemory(uintptr_t dst_address, ams::svc::Handle process_handle, uint64_t src_address, size_t size) { + /* Validate the address/size. */ + R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((dst_address < dst_address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS((src_address < src_address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(src_address == static_cast(src_address), svc::ResultInvalidCurrentMemory()); + + /* Get the processes. */ + KProcess *dst_process = GetCurrentProcessPointer(); + KScopedAutoObject src_process = dst_process->GetHandleTable().GetObjectWithoutPseudoHandle(process_handle); + R_UNLESS(src_process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the page tables. */ + auto &dst_pt = dst_process->GetPageTable(); + auto &src_pt = src_process->GetPageTable(); + + /* Validate that the mapping is in range. */ + R_UNLESS(src_pt.Contains(src_address, size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState_SharedCode), svc::ResultInvalidMemoryRegion()); + + /* Create a new page group. */ + KPageGroup pg(dst_pt.GetBlockInfoManager()); + + /* Make the page group. */ + R_TRY(src_pt.MakeAndOpenPageGroup(std::addressof(pg), + src_address, size / PageSize, + KMemoryState_FlagCanMapProcess, KMemoryState_FlagCanMapProcess, + KMemoryPermission_None, KMemoryPermission_None, + KMemoryAttribute_All, KMemoryAttribute_None)); + + /* Close the page group when we're done. */ + ON_SCOPE_EXIT { pg.Close(); }; + + /* Unmap the group. */ + R_TRY(dst_pt.UnmapPageGroup(dst_address, pg, KMemoryState_SharedCode)); + + return ResultSuccess(); + } + + Result MapProcessCodeMemory(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { + /* Validate the address/size. */ + R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((dst_address < dst_address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS((src_address < src_address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(src_address == static_cast(src_address), svc::ResultInvalidCurrentMemory()); + R_UNLESS(dst_address == static_cast(dst_address), svc::ResultInvalidCurrentMemory()); + R_UNLESS(size == static_cast(size), svc::ResultInvalidCurrentMemory()); + + /* Get the process from its handle. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Validate that the mapping is in range. */ + auto &page_table = process->GetPageTable(); + R_UNLESS(page_table.Contains(src_address, size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(page_table.CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidCurrentMemory()); + + /* Map the memory. */ + R_TRY(page_table.MapCodeMemory(dst_address, src_address, size)); + + return ResultSuccess(); + } + + Result UnmapProcessCodeMemory(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { + /* Validate the address/size. */ + R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((dst_address < dst_address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS((src_address < src_address + size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(src_address == static_cast(src_address), svc::ResultInvalidCurrentMemory()); + R_UNLESS(dst_address == static_cast(dst_address), svc::ResultInvalidCurrentMemory()); + R_UNLESS(size == static_cast(size), svc::ResultInvalidCurrentMemory()); + + /* Get the process from its handle. */ + KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle(process_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Validate that the mapping is in range. */ + auto &page_table = process->GetPageTable(); + R_UNLESS(page_table.Contains(src_address, size), svc::ResultInvalidCurrentMemory()); + R_UNLESS(page_table.CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidCurrentMemory()); + + /* Unmap the memory. */ + R_TRY(page_table.UnmapCodeMemory(dst_address, src_address, size)); + + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ Result SetProcessMemoryPermission64(ams::svc::Handle process_handle, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) { - MESOSPHERE_PANIC("Stubbed SvcSetProcessMemoryPermission64 was called."); + return SetProcessMemoryPermission(process_handle, address, size, perm); } Result MapProcessMemory64(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcMapProcessMemory64 was called."); + return MapProcessMemory(dst_address, process_handle, src_address, size); } Result UnmapProcessMemory64(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapProcessMemory64 was called."); + return UnmapProcessMemory(dst_address, process_handle, src_address, size); } Result MapProcessCodeMemory64(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { - MESOSPHERE_PANIC("Stubbed SvcMapProcessCodeMemory64 was called."); + return MapProcessCodeMemory(process_handle, dst_address, src_address, size); } Result UnmapProcessCodeMemory64(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapProcessCodeMemory64 was called."); + return UnmapProcessCodeMemory(process_handle, dst_address, src_address, size); } /* ============================= 64From32 ABI ============================= */ Result SetProcessMemoryPermission64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) { - MESOSPHERE_PANIC("Stubbed SvcSetProcessMemoryPermission64From32 was called."); + return SetProcessMemoryPermission(process_handle, address, size, perm); } Result MapProcessMemory64From32(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcMapProcessMemory64From32 was called."); + return MapProcessMemory(dst_address, process_handle, src_address, size); } Result UnmapProcessMemory64From32(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapProcessMemory64From32 was called."); + return UnmapProcessMemory(dst_address, process_handle, src_address, size); } Result MapProcessCodeMemory64From32(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { - MESOSPHERE_PANIC("Stubbed SvcMapProcessCodeMemory64From32 was called."); + return MapProcessCodeMemory(process_handle, dst_address, src_address, size); } Result UnmapProcessCodeMemory64From32(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapProcessCodeMemory64From32 was called."); + return UnmapProcessCodeMemory(process_handle, dst_address, src_address, size); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_processor.cpp b/libraries/libmesosphere/source/svc/kern_svc_processor.cpp index c89185f51..61e25b72b 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_processor.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_processor.cpp @@ -21,20 +21,22 @@ namespace ams::kern::svc { namespace { - + int32_t GetCurrentProcessorNumber() { + return GetCurrentCoreId(); + } } /* ============================= 64 ABI ============================= */ int32_t GetCurrentProcessorNumber64() { - MESOSPHERE_PANIC("Stubbed SvcGetCurrentProcessorNumber64 was called."); + return GetCurrentProcessorNumber(); } /* ============================= 64From32 ABI ============================= */ int32_t GetCurrentProcessorNumber64From32() { - MESOSPHERE_PANIC("Stubbed SvcGetCurrentProcessorNumber64From32 was called."); + return GetCurrentProcessorNumber(); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_query_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_query_memory.cpp index cf1480ef4..a40c5b4a2 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_query_memory.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_query_memory.cpp @@ -22,8 +22,6 @@ namespace ams::kern::svc { namespace { Result QueryProcessMemory(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uintptr_t address) { - MESOSPHERE_LOG("%s: QueryProcessMemory(0x%08x, 0x%zx) was called\n", GetCurrentProcess().GetName(), process_handle, address); - /* Get the process. */ KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject(process_handle); R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); @@ -37,7 +35,39 @@ namespace ams::kern::svc { return ResultSuccess(); } - Result QueryMemory(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, uintptr_t address) { + template + Result QueryProcessMemory(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uint64_t address) { + /* Get an ams::svc::MemoryInfo for the region. */ + ams::svc::MemoryInfo info = {}; + R_TRY(QueryProcessMemory(std::addressof(info), out_page_info, process_handle, address)); + + /* Copy the info to userspace. */ + if constexpr (std::same_as) { + R_TRY(out_memory_info.CopyFrom(std::addressof(info))); + } else { + /* Convert the info. */ + T converted_info = {}; + static_assert(std::same_as); + static_assert(std::same_as); + + converted_info.addr = info.addr; + converted_info.size = info.size; + converted_info.state = info.state; + converted_info.attr = info.attr; + converted_info.perm = info.perm; + converted_info.ipc_refcount = info.ipc_refcount; + converted_info.device_refcount = info.device_refcount; + + /* Copy it. */ + R_TRY(out_memory_info.CopyFrom(std::addressof(converted_info))); + } + + return ResultSuccess(); + } + + + template + Result QueryMemory(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, uintptr_t address) { /* Query memory is just QueryProcessMemory on the current process. */ return QueryProcessMemory(out_memory_info, out_page_info, ams::svc::PseudoHandle::CurrentProcess, address); } @@ -47,29 +77,21 @@ namespace ams::kern::svc { /* ============================= 64 ABI ============================= */ Result QueryMemory64(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Address address) { - /* Get an ams::svc::MemoryInfo for the region. */ - ams::svc::MemoryInfo info = {}; - R_TRY(QueryMemory(std::addressof(info), out_page_info, address)); - - /* Try to copy to userspace. In the 64-bit case, ams::svc::lp64::MemoryInfo is the same as ams::svc::MemoryInfo. */ - static_assert(sizeof(ams::svc::MemoryInfo) == sizeof(ams::svc::lp64::MemoryInfo)); - R_TRY(out_memory_info.CopyFrom(std::addressof(info))); - - return ResultSuccess(); + return QueryMemory(out_memory_info, out_page_info, address); } Result QueryProcessMemory64(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uint64_t address) { - MESOSPHERE_PANIC("Stubbed SvcQueryProcessMemory64 was called."); + return QueryProcessMemory(out_memory_info, out_page_info, process_handle, address); } /* ============================= 64From32 ABI ============================= */ Result QueryMemory64From32(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Address address) { - MESOSPHERE_PANIC("Stubbed SvcQueryMemory64From32 was called."); + return QueryMemory(out_memory_info, out_page_info, address); } Result QueryProcessMemory64From32(KUserPointer out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uint64_t address) { - MESOSPHERE_PANIC("Stubbed SvcQueryProcessMemory64From32 was called."); + return QueryProcessMemory(out_memory_info, out_page_info, process_handle, address); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_register.cpp b/libraries/libmesosphere/source/svc/kern_svc_register.cpp index fedf164b1..261330a15 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_register.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_register.cpp @@ -21,20 +21,26 @@ namespace ams::kern::svc { namespace { + Result ReadWriteRegister(uint32_t *out, ams::svc::PhysicalAddress address, uint32_t mask, uint32_t value) { + /* Clear the output unconditionally. */ + *out = 0; + /* Read/write the register. */ + return KSystemControl::ReadWriteRegister(out, address, mask, value); + } } /* ============================= 64 ABI ============================= */ Result ReadWriteRegister64(uint32_t *out_value, ams::svc::PhysicalAddress address, uint32_t mask, uint32_t value) { - MESOSPHERE_PANIC("Stubbed SvcReadWriteRegister64 was called."); + return ReadWriteRegister(out_value, address, mask, value); } /* ============================= 64From32 ABI ============================= */ Result ReadWriteRegister64From32(uint32_t *out_value, ams::svc::PhysicalAddress address, uint32_t mask, uint32_t value) { - MESOSPHERE_PANIC("Stubbed SvcReadWriteRegister64From32 was called."); + return ReadWriteRegister(out_value, address, mask, value); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_resource_limit.cpp b/libraries/libmesosphere/source/svc/kern_svc_resource_limit.cpp index af51a61b1..de3bdb0c6 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_resource_limit.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_resource_limit.cpp @@ -21,44 +21,108 @@ namespace ams::kern::svc { namespace { + constexpr bool IsValidLimitableResource(ams::svc::LimitableResource which) { + return which < ams::svc::LimitableResource_Count; + } + Result GetResourceLimitLimitValue(int64_t *out_limit_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) { + /* Validate the resource. */ + R_UNLESS(IsValidLimitableResource(which), svc::ResultInvalidEnumValue()); + + /* Get the resource limit. */ + KScopedAutoObject resource_limit = GetCurrentProcess().GetHandleTable().GetObject(resource_limit_handle); + R_UNLESS(resource_limit.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the limit value. */ + *out_limit_value = resource_limit->GetLimitValue(which); + + return ResultSuccess(); + } + + Result GetResourceLimitCurrentValue(int64_t *out_current_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) { + /* Validate the resource. */ + R_UNLESS(IsValidLimitableResource(which), svc::ResultInvalidEnumValue()); + + /* Get the resource limit. */ + KScopedAutoObject resource_limit = GetCurrentProcess().GetHandleTable().GetObject(resource_limit_handle); + R_UNLESS(resource_limit.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the current value. */ + *out_current_value = resource_limit->GetCurrentValue(which); + + return ResultSuccess(); + } + + Result CreateResourceLimit(ams::svc::Handle *out_handle) { + /* Create a new resource limit. */ + KResourceLimit *resource_limit = KResourceLimit::Create(); + R_UNLESS(resource_limit != nullptr, svc::ResultOutOfResource()); + + /* Ensure we don't leak a reference to the limit. */ + ON_SCOPE_EXIT { resource_limit->Close(); }; + + /* Initialize the resource limit. */ + resource_limit->Initialize(); + + /* Try to register the limit. */ + R_TRY(KResourceLimit::Register(resource_limit)); + + /* Add the limit to the handle table. */ + R_TRY(GetCurrentProcess().GetHandleTable().Add(out_handle, resource_limit)); + + return ResultSuccess(); + } + + Result SetResourceLimitLimitValue(ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which, int64_t limit_value) { + /* Validate the resource. */ + R_UNLESS(IsValidLimitableResource(which), svc::ResultInvalidEnumValue()); + + /* Get the resource limit. */ + KScopedAutoObject resource_limit = GetCurrentProcess().GetHandleTable().GetObject(resource_limit_handle); + R_UNLESS(resource_limit.IsNotNull(), svc::ResultInvalidHandle()); + + /* Set the limit value. */ + R_TRY(resource_limit->SetLimitValue(which, limit_value)); + + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ Result GetResourceLimitLimitValue64(int64_t *out_limit_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) { - MESOSPHERE_PANIC("Stubbed SvcGetResourceLimitLimitValue64 was called."); + return GetResourceLimitLimitValue(out_limit_value, resource_limit_handle, which); } Result GetResourceLimitCurrentValue64(int64_t *out_current_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) { - MESOSPHERE_PANIC("Stubbed SvcGetResourceLimitCurrentValue64 was called."); + return GetResourceLimitCurrentValue(out_current_value, resource_limit_handle, which); } Result CreateResourceLimit64(ams::svc::Handle *out_handle) { - MESOSPHERE_PANIC("Stubbed SvcCreateResourceLimit64 was called."); + return CreateResourceLimit(out_handle); } Result SetResourceLimitLimitValue64(ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which, int64_t limit_value) { - MESOSPHERE_PANIC("Stubbed SvcSetResourceLimitLimitValue64 was called."); + return SetResourceLimitLimitValue(resource_limit_handle, which, limit_value); } /* ============================= 64From32 ABI ============================= */ Result GetResourceLimitLimitValue64From32(int64_t *out_limit_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) { - MESOSPHERE_PANIC("Stubbed SvcGetResourceLimitLimitValue64From32 was called."); + return GetResourceLimitLimitValue(out_limit_value, resource_limit_handle, which); } Result GetResourceLimitCurrentValue64From32(int64_t *out_current_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) { - MESOSPHERE_PANIC("Stubbed SvcGetResourceLimitCurrentValue64From32 was called."); + return GetResourceLimitCurrentValue(out_current_value, resource_limit_handle, which); } Result CreateResourceLimit64From32(ams::svc::Handle *out_handle) { - MESOSPHERE_PANIC("Stubbed SvcCreateResourceLimit64From32 was called."); + return CreateResourceLimit(out_handle); } Result SetResourceLimitLimitValue64From32(ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which, int64_t limit_value) { - MESOSPHERE_PANIC("Stubbed SvcSetResourceLimitLimitValue64From32 was called."); + return SetResourceLimitLimitValue(resource_limit_handle, which, limit_value); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_secure_monitor_call.cpp b/libraries/libmesosphere/source/svc/kern_svc_secure_monitor_call.cpp index 8c0034ff6..101c884b1 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_secure_monitor_call.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_secure_monitor_call.cpp @@ -33,8 +33,6 @@ namespace ams::kern::svc { /* ============================= 64From32 ABI ============================= */ - void CallSecureMonitor64From32(ams::svc::ilp32::SecureMonitorArguments *args) { - MESOSPHERE_PANIC("Stubbed SvcCallSecureMonitor64From32 was called."); - } + /* CallSecureMonitor64From32 is not supported. */ } diff --git a/libraries/libmesosphere/source/svc/kern_svc_session.cpp b/libraries/libmesosphere/source/svc/kern_svc_session.cpp index 36e540c0b..8b70dd29c 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_session.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_session.cpp @@ -21,28 +21,108 @@ namespace ams::kern::svc { namespace { + template + Result CreateSession(ams::svc::Handle *out_server, ams::svc::Handle *out_client, uintptr_t name) { + /* Get the current process and handle table. */ + auto &process = GetCurrentProcess(); + auto &handle_table = process.GetHandleTable(); + /* Reserve a new session from the process resource limit. */ + KScopedResourceReservation session_reservation(std::addressof(process), ams::svc::LimitableResource_SessionCountMax); + R_UNLESS(session_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Create a new session. */ + T *session = T::Create(); + R_UNLESS(session != nullptr, svc::ResultOutOfResource()); + + /* Initialize the session. */ + session->Initialize(nullptr, name); + + /* Commit the session reservation. */ + session_reservation.Commit(); + + /* Ensure that we clean up the session (and its only references are handle table) on function end. */ + ON_SCOPE_EXIT { + session->GetServerSession().Close(); + session->GetClientSession().Close(); + }; + + /* Register the session. */ + R_TRY(T::Register(session)); + + /* Add the server session to the handle table. */ + R_TRY(handle_table.Add(out_server, std::addressof(session->GetServerSession()))); + + /* Ensure that we maintaing a clean handle state on exit. */ + auto handle_guard = SCOPE_GUARD { handle_table.Remove(*out_server); }; + + /* Add the client session to the handle table. */ + R_TRY(handle_table.Add(out_client, std::addressof(session->GetClientSession()))); + + /* We succeeded! */ + handle_guard.Cancel(); + return ResultSuccess(); + } + + Result CreateSession(ams::svc::Handle *out_server, ams::svc::Handle *out_client, bool is_light, uintptr_t name) { + if (is_light) { + return CreateSession(out_server, out_client, name); + } else { + return CreateSession(out_server, out_client, name); + } + } + + Result AcceptSession(ams::svc::Handle *out, ams::svc::Handle port_handle) { + /* Get the current handle table. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); + + /* Get the server port. */ + KScopedAutoObject port = handle_table.GetObject(port_handle); + R_UNLESS(port.IsNotNull(), svc::ResultInvalidHandle()); + + /* Reserve an entry for the new session. */ + R_TRY(handle_table.Reserve(out)); + auto handle_guard = SCOPE_GUARD { handle_table.Unreserve(*out); }; + + /* Accept the session. */ + KAutoObject *session; + if (port->IsLight()) { + session = port->AcceptLightSession(); + } else { + session = port->AcceptSession(); + } + + /* Ensure we accepted successfully. */ + R_UNLESS(session != nullptr, svc::ResultNotFound()); + + /* Register the session. */ + handle_table.Register(*out, session); + handle_guard.Cancel(); + session->Close(); + + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ Result CreateSession64(ams::svc::Handle *out_server_session_handle, ams::svc::Handle *out_client_session_handle, bool is_light, ams::svc::Address name) { - MESOSPHERE_PANIC("Stubbed SvcCreateSession64 was called."); + return CreateSession(out_server_session_handle, out_client_session_handle, is_light, name); } Result AcceptSession64(ams::svc::Handle *out_handle, ams::svc::Handle port) { - MESOSPHERE_PANIC("Stubbed SvcAcceptSession64 was called."); + return AcceptSession(out_handle, port); } /* ============================= 64From32 ABI ============================= */ Result CreateSession64From32(ams::svc::Handle *out_server_session_handle, ams::svc::Handle *out_client_session_handle, bool is_light, ams::svc::Address name) { - MESOSPHERE_PANIC("Stubbed SvcCreateSession64From32 was called."); + return CreateSession(out_server_session_handle, out_client_session_handle, is_light, name); } Result AcceptSession64From32(ams::svc::Handle *out_handle, ams::svc::Handle port) { - MESOSPHERE_PANIC("Stubbed SvcAcceptSession64From32 was called."); + return AcceptSession(out_handle, port); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_shared_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_shared_memory.cpp index fbd77c023..91326a975 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_shared_memory.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_shared_memory.cpp @@ -21,36 +21,138 @@ namespace ams::kern::svc { namespace { + constexpr bool IsValidSharedMemoryPermission(ams::svc::MemoryPermission perm) { + switch (perm) { + case ams::svc::MemoryPermission_Read: + case ams::svc::MemoryPermission_ReadWrite: + return true; + default: + return false; + } + } + constexpr bool IsValidRemoteSharedMemoryPermission(ams::svc::MemoryPermission perm) { + return IsValidSharedMemoryPermission(perm) || perm == ams::svc::MemoryPermission_DontCare; + } + + Result MapSharedMemory(ams::svc::Handle shmem_handle, uintptr_t address, size_t size, ams::svc::MemoryPermission map_perm) { + /* Validate the address/size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + + /* Validate the permission. */ + R_UNLESS(IsValidSharedMemoryPermission(map_perm), svc::ResultInvalidNewMemoryPermission()); + + /* Get the current process. */ + auto &process = GetCurrentProcess(); + auto &page_table = process.GetPageTable(); + + /* Get the shared memory. */ + KScopedAutoObject shmem = process.GetHandleTable().GetObject(shmem_handle); + R_UNLESS(shmem.IsNotNull(), svc::ResultInvalidHandle()); + + /* Verify that the mapping is in range. */ + R_UNLESS(page_table.CanContain(address, size, KMemoryState_Shared), svc::ResultInvalidMemoryRegion()); + + /* Add the shared memory to the process. */ + R_TRY(process.AddSharedMemory(shmem.GetPointerUnsafe(), address, size)); + + /* Ensure that we clean up the shared memory if we fail to map it. */ + auto guard = SCOPE_GUARD { process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); }; + + /* Map the shared memory. */ + R_TRY(shmem->Map(std::addressof(page_table), address, size, std::addressof(process), map_perm)); + + /* We succeeded. */ + guard.Cancel(); + return ResultSuccess(); + } + + Result UnmapSharedMemory(ams::svc::Handle shmem_handle, uintptr_t address, size_t size) { + /* Validate the address/size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + + /* Get the current process. */ + auto &process = GetCurrentProcess(); + auto &page_table = process.GetPageTable(); + + /* Get the shared memory. */ + KScopedAutoObject shmem = process.GetHandleTable().GetObject(shmem_handle); + R_UNLESS(shmem.IsNotNull(), svc::ResultInvalidHandle()); + + /* Verify that the mapping is in range. */ + R_UNLESS(page_table.CanContain(address, size, KMemoryState_Shared), svc::ResultInvalidMemoryRegion()); + + /* Unmap the shared memory. */ + R_TRY(shmem->Unmap(std::addressof(page_table), address, size, std::addressof(process))); + + /* Remove the shared memory from the process. */ + process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); + + return ResultSuccess(); + } + + Result CreateSharedMemory(ams::svc::Handle *out, size_t size, ams::svc::MemoryPermission owner_perm, ams::svc::MemoryPermission remote_perm) { + /* Validate the size. */ + R_UNLESS(0 < size && size < kern::MainMemorySize, svc::ResultInvalidSize()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + + /* Validate the permissions. */ + R_UNLESS(IsValidSharedMemoryPermission(owner_perm), svc::ResultInvalidNewMemoryPermission()); + R_UNLESS(IsValidRemoteSharedMemoryPermission(remote_perm), svc::ResultInvalidNewMemoryPermission()); + + /* Create the shared memory. */ + KSharedMemory *shmem = KSharedMemory::Create(); + R_UNLESS(shmem != nullptr, svc::ResultOutOfResource()); + + /* Ensure the only reference is in the handle table when we're done. */ + ON_SCOPE_EXIT { shmem->Close(); }; + + /* Initialize the shared memory. */ + R_TRY(shmem->Initialize(GetCurrentProcessPointer(), size, owner_perm, remote_perm)); + + /* Register the shared memory. */ + R_TRY(KSharedMemory::Register(shmem)); + + /* Add the shared memory to the handle table. */ + R_TRY(GetCurrentProcess().GetHandleTable().Add(out, shmem)); + + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ Result MapSharedMemory64(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) { - MESOSPHERE_PANIC("Stubbed SvcMapSharedMemory64 was called."); + return MapSharedMemory(shmem_handle, address, size, map_perm); } Result UnmapSharedMemory64(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapSharedMemory64 was called."); + return UnmapSharedMemory(shmem_handle, address, size); } Result CreateSharedMemory64(ams::svc::Handle *out_handle, ams::svc::Size size, ams::svc::MemoryPermission owner_perm, ams::svc::MemoryPermission remote_perm) { - MESOSPHERE_PANIC("Stubbed SvcCreateSharedMemory64 was called."); + return CreateSharedMemory(out_handle, size, owner_perm, remote_perm); } /* ============================= 64From32 ABI ============================= */ Result MapSharedMemory64From32(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) { - MESOSPHERE_PANIC("Stubbed SvcMapSharedMemory64From32 was called."); + return MapSharedMemory(shmem_handle, address, size, map_perm); } Result UnmapSharedMemory64From32(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapSharedMemory64From32 was called."); + return UnmapSharedMemory(shmem_handle, address, size); } Result CreateSharedMemory64From32(ams::svc::Handle *out_handle, ams::svc::Size size, ams::svc::MemoryPermission owner_perm, ams::svc::MemoryPermission remote_perm) { - MESOSPHERE_PANIC("Stubbed SvcCreateSharedMemory64From32 was called."); + return CreateSharedMemory(out_handle, size, owner_perm, remote_perm); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_synchronization.cpp b/libraries/libmesosphere/source/svc/kern_svc_synchronization.cpp index 7564da89a..1db753eea 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_synchronization.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_synchronization.cpp @@ -21,52 +21,156 @@ namespace ams::kern::svc { namespace { + Result CloseHandle(ams::svc::Handle handle) { + /* Remove the handle. */ + R_UNLESS(GetCurrentProcess().GetHandleTable().Remove(handle), svc::ResultInvalidHandle()); + return ResultSuccess(); + } + Result ResetSignal(ams::svc::Handle handle) { + /* Get the current handle table. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); + + /* Try to reset as readable event. */ + { + KScopedAutoObject readable_event = handle_table.GetObject(handle); + if (readable_event.IsNotNull()) { + return readable_event->Reset(); + } + } + + /* Try to reset as process. */ + { + KScopedAutoObject process = handle_table.GetObject(handle); + if (process.IsNotNull()) { + return process->Reset(); + } + } + + return svc::ResultInvalidHandle(); + } + + Result WaitSynchronizationImpl(int32_t *out_index, KSynchronizationObject **objs, int32_t num_handles, int64_t timeout_ns) { + /* Convert the timeout from nanoseconds to ticks. */ + s64 timeout; + if (timeout_ns > 0) { + u64 ticks = KHardwareTimer::GetTick(); + ticks += ams::svc::Tick(TimeSpan::FromNanoSeconds(timeout_ns)); + ticks += 2; + + timeout = ticks; + } else { + timeout = timeout_ns; + } + + return Kernel::GetSynchronization().Wait(out_index, objs, num_handles, timeout); + } + + Result WaitSynchronization(int32_t *out_index, KUserPointer user_handles, int32_t num_handles, int64_t timeout_ns) { + /* Ensure number of handles is valid. */ + R_UNLESS(0 <= num_handles && num_handles <= ams::svc::ArgumentHandleCountMax, svc::ResultOutOfRange()); + + /* Get the synchronization context. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); + KSynchronizationObject **objs = GetCurrentThread().GetSynchronizationObjectBuffer(); + ams::svc::Handle *handles = GetCurrentThread().GetHandleBuffer(); + + /* Copy user handles. */ + if (num_handles > 0) { + /* Ensure that we can try to get the handles. */ + R_UNLESS(GetCurrentProcess().GetPageTable().Contains(KProcessAddress(user_handles.GetUnsafePointer()), num_handles * sizeof(ams::svc::Handle)), svc::ResultInvalidPointer()); + + /* Get the handles. */ + R_TRY(user_handles.CopyArrayTo(handles, num_handles)); + + /* Convert the handles to objects. */ + R_UNLESS(handle_table.GetMultipleObjects(objs, handles, num_handles), svc::ResultInvalidHandle()); + } + + /* Ensure handles are closed when we're done. */ + ON_SCOPE_EXIT { + for (auto i = 0; i < num_handles; ++i) { + objs[i]->Close(); + } + }; + + /* Wait on the objects. */ + R_TRY_CATCH(WaitSynchronizationImpl(out_index, objs, num_handles, timeout_ns)) { + R_CONVERT(svc::ResultSessionClosed, ResultSuccess()) + } R_END_TRY_CATCH; + + return ResultSuccess(); + } + + Result CancelSynchronization(ams::svc::Handle handle) { + /* Get the thread from its handle. */ + KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject(handle); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle()); + + /* Cancel the thread's wait. */ + thread->WaitCancel(); + return ResultSuccess(); + } + + void SynchronizePreemptionState() { + /* Lock the scheduler. */ + KScopedSchedulerLock sl; + + /* If the current thread is pinned, unpin it. */ + KProcess *cur_process = GetCurrentProcessPointer(); + if (cur_process->GetPinnedThread(GetCurrentCoreId()) == GetCurrentThreadPointer()) { + /* Clear the current thread's interrupt flag. */ + GetCurrentThread().ClearInterruptFlag(); + + /* Unpin the current thread. */ + KScheduler::UnpinCurrentThread(cur_process); + } + } } /* ============================= 64 ABI ============================= */ Result CloseHandle64(ams::svc::Handle handle) { - MESOSPHERE_PANIC("Stubbed SvcCloseHandle64 was called."); + return CloseHandle(handle); } Result ResetSignal64(ams::svc::Handle handle) { - MESOSPHERE_PANIC("Stubbed SvcResetSignal64 was called."); + return ResetSignal(handle); } - Result WaitSynchronization64(int32_t *out_index, KUserPointer handles, int32_t numHandles, int64_t timeout_ns) { - MESOSPHERE_PANIC("Stubbed SvcWaitSynchronization64 was called."); + Result WaitSynchronization64(int32_t *out_index, KUserPointer handles, int32_t num_handles, int64_t timeout_ns) { + return WaitSynchronization(out_index, handles, num_handles, timeout_ns); } Result CancelSynchronization64(ams::svc::Handle handle) { - MESOSPHERE_PANIC("Stubbed SvcCancelSynchronization64 was called."); + return CancelSynchronization(handle); } void SynchronizePreemptionState64() { - MESOSPHERE_PANIC("Stubbed SvcSynchronizePreemptionState64 was called."); + return SynchronizePreemptionState(); } /* ============================= 64From32 ABI ============================= */ Result CloseHandle64From32(ams::svc::Handle handle) { - MESOSPHERE_PANIC("Stubbed SvcCloseHandle64From32 was called."); + return CloseHandle(handle); } Result ResetSignal64From32(ams::svc::Handle handle) { - MESOSPHERE_PANIC("Stubbed SvcResetSignal64From32 was called."); + return ResetSignal(handle); } - Result WaitSynchronization64From32(int32_t *out_index, KUserPointer handles, int32_t numHandles, int64_t timeout_ns) { - MESOSPHERE_PANIC("Stubbed SvcWaitSynchronization64From32 was called."); + Result WaitSynchronization64From32(int32_t *out_index, KUserPointer handles, int32_t num_handles, int64_t timeout_ns) { + return WaitSynchronization(out_index, handles, num_handles, timeout_ns); } Result CancelSynchronization64From32(ams::svc::Handle handle) { - MESOSPHERE_PANIC("Stubbed SvcCancelSynchronization64From32 was called."); + return CancelSynchronization(handle); } void SynchronizePreemptionState64From32() { - MESOSPHERE_PANIC("Stubbed SvcSynchronizePreemptionState64From32 was called."); + return SynchronizePreemptionState(); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_thread.cpp b/libraries/libmesosphere/source/svc/kern_svc_thread.cpp index 5c25342ee..7bd9dfd1d 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_thread.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_thread.cpp @@ -21,116 +21,329 @@ namespace ams::kern::svc { namespace { + constexpr bool IsValidCoreId(int32_t core_id) { + return (0 <= core_id && core_id < static_cast(cpu::NumCores)); + } + Result CreateThread(ams::svc::Handle *out, ams::svc::ThreadFunc f, uintptr_t arg, uintptr_t stack_bottom, int32_t priority, int32_t core_id) { + /* Adjust core id, if it's the default magic. */ + KProcess &process = GetCurrentProcess(); + if (core_id == ams::svc::IdealCoreUseProcessValue) { + core_id = process.GetIdealCoreId(); + } + + /* Validate arguments. */ + R_UNLESS(IsValidCoreId(core_id), svc::ResultInvalidCoreId()); + R_UNLESS(((1ul << core_id) & process.GetCoreMask()) != 0, svc::ResultInvalidCoreId()); + + R_UNLESS(ams::svc::HighestThreadPriority <= priority && priority <= ams::svc::LowestThreadPriority, svc::ResultInvalidPriority()); + R_UNLESS(process.CheckThreadPriority(priority), svc::ResultInvalidPriority()); + + /* Reserve a new thread from the process resource limit (waiting up to 100ms). */ + KScopedResourceReservation thread_reservation(std::addressof(process), ams::svc::LimitableResource_ThreadCountMax, 1, KHardwareTimer::GetTick() + ams::svc::Tick(TimeSpan::FromMilliSeconds(100))); + R_UNLESS(thread_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Create the thread. */ + KThread *thread = KThread::Create(); + R_UNLESS(thread != nullptr, svc::ResultOutOfResource()); + ON_SCOPE_EXIT { thread->Close(); }; + + /* Initialize the thread. */ + { + KScopedLightLock lk(process.GetStateLock()); + R_TRY(KThread::InitializeUserThread(thread, reinterpret_cast(static_cast(f)), arg, stack_bottom, priority, core_id, std::addressof(process))); + } + + /* Commit the thread reservation. */ + thread_reservation.Commit(); + + /* Clone the current fpu status to the new thread. */ + thread->GetContext().CloneFpuStatus(); + + /* Register the new thread. */ + R_TRY(KThread::Register(thread)); + + /* Add the thread to the handle table. */ + R_TRY(process.GetHandleTable().Add(out, thread)); + + return ResultSuccess(); + } + + Result StartThread(ams::svc::Handle thread_handle) { + /* Get the thread from its handle. */ + KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject(thread_handle); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle()); + + /* Try to start the thread. */ + R_TRY(thread->Run()); + + /* If we succeeded, persist a reference to the thread. */ + thread->Open(); + return ResultSuccess(); + } + + void ExitThread() { + GetCurrentThread().Exit(); + MESOSPHERE_PANIC("Thread survived call to exit"); + } + + void SleepThread(int64_t ns) { + /* When the input tick is positive, sleep. */ + if (AMS_LIKELY(ns > 0)) { + /* Convert the timeout from nanoseconds to ticks. */ + /* NOTE: Nintendo does not use this conversion logic in WaitSynchronization... */ + s64 timeout; + + const ams::svc::Tick offset_tick(TimeSpan::FromNanoSeconds(ns)); + if (AMS_LIKELY(offset_tick > 0)) { + timeout = KHardwareTimer::GetTick() + offset_tick + 2; + if (AMS_UNLIKELY(timeout <= 0)) { + timeout = std::numeric_limits::max(); + } + } else { + timeout = std::numeric_limits::max(); + } + + /* Sleep. */ + /* NOTE: Nintendo does not check the result of this sleep. */ + GetCurrentThread().Sleep(timeout); + } else if (ns == ams::svc::YieldType_WithoutCoreMigration) { + KScheduler::YieldWithoutCoreMigration(); + } else if (ns == ams::svc::YieldType_WithCoreMigration) { + KScheduler::YieldWithCoreMigration(); + } else if (ns == ams::svc::YieldType_ToAnyThread) { + KScheduler::YieldToAnyThread(); + } else { + /* Nintendo does nothing at all if an otherwise invalid value is passed. */ + } + } + + Result GetThreadPriority(int32_t *out_priority, ams::svc::Handle thread_handle) { + /* Get the thread from its handle. */ + KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject(thread_handle); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the thread's priority. */ + *out_priority = thread->GetPriority(); + return ResultSuccess(); + } + + Result SetThreadPriority(ams::svc::Handle thread_handle, int32_t priority) { + /* Get the current process. */ + KProcess &process = GetCurrentProcess(); + + /* Validate the priority. */ + R_UNLESS(ams::svc::HighestThreadPriority <= priority && priority <= ams::svc::LowestThreadPriority, svc::ResultInvalidPriority()); + R_UNLESS(process.CheckThreadPriority(priority), svc::ResultInvalidPriority()); + + /* Get the thread from its handle. */ + KScopedAutoObject thread = process.GetHandleTable().GetObject(thread_handle); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle()); + + /* Set the thread priority. */ + thread->SetBasePriority(priority); + return ResultSuccess(); + } + + Result GetThreadCoreMask(int32_t *out_core_id, uint64_t *out_affinity_mask, ams::svc::Handle thread_handle) { + /* Get the thread from its handle. */ + KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject(thread_handle); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the core mask. */ + R_TRY(thread->GetCoreMask(out_core_id, out_affinity_mask)); + + return ResultSuccess(); + } + + Result SetThreadCoreMask(ams::svc::Handle thread_handle, int32_t core_id, uint64_t affinity_mask) { + /* Determine the core id/affinity mask. */ + if (core_id == ams::svc::IdealCoreUseProcessValue) { + core_id = GetCurrentProcess().GetIdealCoreId(); + affinity_mask = (1ul << core_id); + } else { + /* Validate the affinity mask. */ + const u64 process_core_mask = GetCurrentProcess().GetCoreMask(); + R_UNLESS((affinity_mask | process_core_mask) == process_core_mask, svc::ResultInvalidCoreId()); + R_UNLESS(affinity_mask != 0, svc::ResultInvalidCombination()); + + /* Validate the core id. */ + if (IsValidCoreId(core_id)) { + R_UNLESS(((1ul << core_id) & affinity_mask) != 0, svc::ResultInvalidCombination()); + } else { + R_UNLESS(core_id == ams::svc::IdealCoreNoUpdate || core_id == ams::svc::IdealCoreDontCare, svc::ResultInvalidCoreId()); + } + } + + /* Get the thread from its handle. */ + KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject(thread_handle); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle()); + + /* Set the core mask. */ + R_TRY(thread->SetCoreMask(core_id, affinity_mask)); + + return ResultSuccess(); + } + + Result GetThreadId(uint64_t *out_thread_id, ams::svc::Handle thread_handle) { + /* Get the thread from its handle. */ + KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject(thread_handle); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the thread's id. */ + *out_thread_id = thread->GetId(); + return ResultSuccess(); + } + + Result GetThreadContext3(KUserPointer out_context, ams::svc::Handle thread_handle) { + /* Get the thread from its handle. */ + KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject(thread_handle); + R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle()); + + /* Require the handle be to a non-current thread in the current process. */ + R_UNLESS(thread->GetOwnerProcess() == GetCurrentProcessPointer(), svc::ResultInvalidHandle()); + R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(), svc::ResultBusy()); + + /* Get the thread context. */ + ams::svc::ThreadContext context = {}; + R_TRY(thread->GetThreadContext3(std::addressof(context))); + + /* Copy the thread context to user space. */ + R_TRY(out_context.CopyFrom(std::addressof(context))); + + return ResultSuccess(); + } + + Result GetThreadList(int32_t *out_num_threads, KUserPointer out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) { + /* Validate that the out count is valid. */ + R_UNLESS((0 <= max_out_count && max_out_count <= static_cast(std::numeric_limits::max() / sizeof(u64))), svc::ResultOutOfRange()); + + /* Validate that the pointer is in range. */ + if (max_out_count > 0) { + R_UNLESS(GetCurrentProcess().GetPageTable().Contains(KProcessAddress(out_thread_ids.GetUnsafePointer()), max_out_count * sizeof(u64)), svc::ResultInvalidCurrentMemory()); + } + + if (debug_handle == ams::svc::InvalidHandle) { + /* If passed invalid handle, we should return the global thread list. */ + R_TRY(KThread::GetThreadList(out_num_threads, out_thread_ids, max_out_count)); + } else { + /* Get the handle table. */ + auto &handle_table = GetCurrentProcess().GetHandleTable(); + + /* Try to get as a debug object. */ + KScopedAutoObject debug = handle_table.GetObject(debug_handle); + if (debug.IsNotNull()) { + /* Get the debug object's process. */ + KScopedAutoObject process = debug->GetProcess(); + R_UNLESS(process.IsNotNull(), svc::ResultProcessTerminated()); + + /* Get the thread list. */ + R_TRY(process->GetThreadList(out_num_threads, out_thread_ids, max_out_count)); + } else { + /* Try to get as a process. */ + KScopedAutoObject process = handle_table.GetObjectWithoutPseudoHandle(debug_handle); + R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle()); + + /* Get the thread list. */ + R_TRY(process->GetThreadList(out_num_threads, out_thread_ids, max_out_count)); + } + } + + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ Result CreateThread64(ams::svc::Handle *out_handle, ams::svc::ThreadFunc func, ams::svc::Address arg, ams::svc::Address stack_bottom, int32_t priority, int32_t core_id) { - MESOSPHERE_PANIC("Stubbed SvcCreateThread64 was called."); + return CreateThread(out_handle, func, arg, stack_bottom, priority, core_id); } Result StartThread64(ams::svc::Handle thread_handle) { - MESOSPHERE_PANIC("Stubbed SvcStartThread64 was called."); + return StartThread(thread_handle); } void ExitThread64() { - MESOSPHERE_PANIC("Stubbed SvcExitThread64 was called."); + return ExitThread(); } void SleepThread64(int64_t ns) { - MESOSPHERE_PANIC("Stubbed SvcSleepThread64 was called."); + return SleepThread(ns); } Result GetThreadPriority64(int32_t *out_priority, ams::svc::Handle thread_handle) { - MESOSPHERE_PANIC("Stubbed SvcGetThreadPriority64 was called."); + return GetThreadPriority(out_priority, thread_handle); } Result SetThreadPriority64(ams::svc::Handle thread_handle, int32_t priority) { - MESOSPHERE_PANIC("Stubbed SvcSetThreadPriority64 was called."); + return SetThreadPriority(thread_handle, priority); } Result GetThreadCoreMask64(int32_t *out_core_id, uint64_t *out_affinity_mask, ams::svc::Handle thread_handle) { - MESOSPHERE_PANIC("Stubbed SvcGetThreadCoreMask64 was called."); + return GetThreadCoreMask(out_core_id, out_affinity_mask, thread_handle); } Result SetThreadCoreMask64(ams::svc::Handle thread_handle, int32_t core_id, uint64_t affinity_mask) { - MESOSPHERE_PANIC("Stubbed SvcSetThreadCoreMask64 was called."); + return SetThreadCoreMask(thread_handle, core_id, affinity_mask); } Result GetThreadId64(uint64_t *out_thread_id, ams::svc::Handle thread_handle) { - MESOSPHERE_PANIC("Stubbed SvcGetThreadId64 was called."); - } - - Result GetDebugFutureThreadInfo64(ams::svc::lp64::LastThreadContext *out_context, uint64_t *thread_id, ams::svc::Handle debug_handle, int64_t ns) { - MESOSPHERE_PANIC("Stubbed SvcGetDebugFutureThreadInfo64 was called."); - } - - Result GetLastThreadInfo64(ams::svc::lp64::LastThreadContext *out_context, ams::svc::Address *out_tls_address, uint32_t *out_flags) { - MESOSPHERE_PANIC("Stubbed SvcGetLastThreadInfo64 was called."); + return GetThreadId(out_thread_id, thread_handle); } Result GetThreadContext364(KUserPointer out_context, ams::svc::Handle thread_handle) { - MESOSPHERE_PANIC("Stubbed SvcGetThreadContext364 was called."); + return GetThreadContext3(out_context, thread_handle); } Result GetThreadList64(int32_t *out_num_threads, KUserPointer out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) { - MESOSPHERE_PANIC("Stubbed SvcGetThreadList64 was called."); + return GetThreadList(out_num_threads, out_thread_ids, max_out_count, debug_handle); } /* ============================= 64From32 ABI ============================= */ Result CreateThread64From32(ams::svc::Handle *out_handle, ams::svc::ThreadFunc func, ams::svc::Address arg, ams::svc::Address stack_bottom, int32_t priority, int32_t core_id) { - MESOSPHERE_PANIC("Stubbed SvcCreateThread64From32 was called."); + return CreateThread(out_handle, func, arg, stack_bottom, priority, core_id); } Result StartThread64From32(ams::svc::Handle thread_handle) { - MESOSPHERE_PANIC("Stubbed SvcStartThread64From32 was called."); + return StartThread(thread_handle); } void ExitThread64From32() { - MESOSPHERE_PANIC("Stubbed SvcExitThread64From32 was called."); + return ExitThread(); } void SleepThread64From32(int64_t ns) { - MESOSPHERE_PANIC("Stubbed SvcSleepThread64From32 was called."); + return SleepThread(ns); } Result GetThreadPriority64From32(int32_t *out_priority, ams::svc::Handle thread_handle) { - MESOSPHERE_PANIC("Stubbed SvcGetThreadPriority64From32 was called."); + return GetThreadPriority(out_priority, thread_handle); } Result SetThreadPriority64From32(ams::svc::Handle thread_handle, int32_t priority) { - MESOSPHERE_PANIC("Stubbed SvcSetThreadPriority64From32 was called."); + return SetThreadPriority(thread_handle, priority); } Result GetThreadCoreMask64From32(int32_t *out_core_id, uint64_t *out_affinity_mask, ams::svc::Handle thread_handle) { - MESOSPHERE_PANIC("Stubbed SvcGetThreadCoreMask64From32 was called."); + return GetThreadCoreMask(out_core_id, out_affinity_mask, thread_handle); } Result SetThreadCoreMask64From32(ams::svc::Handle thread_handle, int32_t core_id, uint64_t affinity_mask) { - MESOSPHERE_PANIC("Stubbed SvcSetThreadCoreMask64From32 was called."); + return SetThreadCoreMask(thread_handle, core_id, affinity_mask); } Result GetThreadId64From32(uint64_t *out_thread_id, ams::svc::Handle thread_handle) { - MESOSPHERE_PANIC("Stubbed SvcGetThreadId64From32 was called."); - } - - Result GetDebugFutureThreadInfo64From32(ams::svc::ilp32::LastThreadContext *out_context, uint64_t *thread_id, ams::svc::Handle debug_handle, int64_t ns) { - MESOSPHERE_PANIC("Stubbed SvcGetDebugFutureThreadInfo64From32 was called."); - } - - Result GetLastThreadInfo64From32(ams::svc::ilp32::LastThreadContext *out_context, ams::svc::Address *out_tls_address, uint32_t *out_flags) { - MESOSPHERE_PANIC("Stubbed SvcGetLastThreadInfo64From32 was called."); + return GetThreadId(out_thread_id, thread_handle); } Result GetThreadContext364From32(KUserPointer out_context, ams::svc::Handle thread_handle) { - MESOSPHERE_PANIC("Stubbed SvcGetThreadContext364From32 was called."); + return GetThreadContext3(out_context, thread_handle); } Result GetThreadList64From32(int32_t *out_num_threads, KUserPointer out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) { - MESOSPHERE_PANIC("Stubbed SvcGetThreadList64From32 was called."); + return GetThreadList(out_num_threads, out_thread_ids, max_out_count, debug_handle); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_thread_profiler.cpp b/libraries/libmesosphere/source/svc/kern_svc_thread_profiler.cpp new file mode 100644 index 000000000..fd968bd30 --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_thread_profiler.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::svc { + + /* ============================= Common ============================= */ + + namespace { + + Result GetDebugFutureThreadInfo(ams::svc::LastThreadContext *out_context, uint64_t *out_thread_id, ams::svc::Handle debug_handle, int64_t ns) { + /* Only allow invoking the svc on development hardware. */ + R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNoThread()); + + /* Get the debug object. */ + KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject(debug_handle); + R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle()); + + /* Synchronize the current process to the desired time. */ + { + /* Get the wait object. */ + KWaitObject *wait_object = GetCurrentProcess().GetWaitObjectPointer(); + + /* Convert the timeout from nanoseconds to ticks. */ + s64 timeout; + if (ns > 0) { + u64 ticks = KHardwareTimer::GetTick(); + ticks += ams::svc::Tick(TimeSpan::FromNanoSeconds(ns)); + ticks += 2; + + timeout = ticks; + } else { + timeout = ns; + } + + /* Synchronize to the desired time. */ + R_TRY(wait_object->Synchronize(timeout)); + } + + /* Get the running thread info. */ + R_TRY(debug->GetRunningThreadInfo(out_context, out_thread_id)); + + return ResultSuccess(); + } + + Result GetLastThreadInfo(ams::svc::LastThreadContext *out_context, uintptr_t *out_tls_address, uint32_t *out_flags) { + /* Only allow invoking the svc on development hardware. */ + R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNoThread()); + + /* Get the thread info. */ + { + KScopedInterruptDisable di; + + /* Get the previous thread. */ + KThread *prev_thread = Kernel::GetScheduler().GetPreviousThread(); + R_UNLESS(prev_thread != nullptr, svc::ResultNoThread()); + + /* Verify the last thread was owned by the current process. */ + R_UNLESS(prev_thread->GetOwnerProcess() == GetCurrentProcessPointer(), svc::ResultUnknownThread()); + + /* Clear the output flags. */ + *out_flags = 0; + + /* Get the thread's exception context. */ + GetExceptionContext(prev_thread)->GetSvcThreadContext(out_context); + + /* Get the tls address. */ + *out_tls_address = GetInteger(prev_thread->GetThreadLocalRegionAddress()); + + /* Set the syscall flag if appropriate. */ + if (prev_thread->IsCallingSvc()) { + *out_flags |= ams::svc::LastThreadInfoFlag_ThreadInSystemCall; + } + } + + return ResultSuccess(); + } + + } + + /* ============================= 64 ABI ============================= */ + + Result GetDebugFutureThreadInfo64(ams::svc::lp64::LastThreadContext *out_context, uint64_t *out_thread_id, ams::svc::Handle debug_handle, int64_t ns) { + return GetDebugFutureThreadInfo(out_context, out_thread_id, debug_handle, ns); + } + + Result GetLastThreadInfo64(ams::svc::lp64::LastThreadContext *out_context, ams::svc::Address *out_tls_address, uint32_t *out_flags) { + static_assert(sizeof(*out_tls_address) == sizeof(uintptr_t)); + return GetLastThreadInfo(out_context, reinterpret_cast(out_tls_address), out_flags); + } + + /* ============================= 64From32 ABI ============================= */ + + Result GetDebugFutureThreadInfo64From32(ams::svc::ilp32::LastThreadContext *out_context, uint64_t *out_thread_id, ams::svc::Handle debug_handle, int64_t ns) { + ams::svc::LastThreadContext context = {}; + R_TRY(GetDebugFutureThreadInfo(std::addressof(context), out_thread_id, debug_handle, ns)); + + *out_context = { + .fp = static_cast(context.fp), + .sp = static_cast(context.sp), + .lr = static_cast(context.lr), + .pc = static_cast(context.pc), + }; + return ResultSuccess(); + } + + Result GetLastThreadInfo64From32(ams::svc::ilp32::LastThreadContext *out_context, ams::svc::Address *out_tls_address, uint32_t *out_flags) { + static_assert(sizeof(*out_tls_address) == sizeof(uintptr_t)); + + ams::svc::LastThreadContext context = {}; + R_TRY(GetLastThreadInfo(std::addressof(context), reinterpret_cast(out_tls_address), out_flags)); + + *out_context = { + .fp = static_cast(context.fp), + .sp = static_cast(context.sp), + .lr = static_cast(context.lr), + .pc = static_cast(context.pc), + }; + return ResultSuccess(); + } + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_tick.cpp b/libraries/libmesosphere/source/svc/kern_svc_tick.cpp index 991e9b5e4..b8712f1a1 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_tick.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_tick.cpp @@ -21,20 +21,22 @@ namespace ams::kern::svc { namespace { - + int64_t GetSystemTick() { + return KHardwareTimer::GetTick(); + } } /* ============================= 64 ABI ============================= */ int64_t GetSystemTick64() { - MESOSPHERE_PANIC("Stubbed SvcGetSystemTick64 was called."); + return GetSystemTick(); } /* ============================= 64From32 ABI ============================= */ int64_t GetSystemTick64From32() { - MESOSPHERE_PANIC("Stubbed SvcGetSystemTick64From32 was called."); + return GetSystemTick(); } } diff --git a/libraries/libmesosphere/source/svc/kern_svc_transfer_memory.cpp b/libraries/libmesosphere/source/svc/kern_svc_transfer_memory.cpp index 922c06ba7..ce435d44e 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_transfer_memory.cpp +++ b/libraries/libmesosphere/source/svc/kern_svc_transfer_memory.cpp @@ -21,36 +21,132 @@ namespace ams::kern::svc { namespace { + constexpr bool IsValidTransferMemoryPermission(ams::svc::MemoryPermission perm) { + switch (perm) { + case ams::svc::MemoryPermission_None: + case ams::svc::MemoryPermission_Read: + case ams::svc::MemoryPermission_ReadWrite: + return true; + default: + return false; + } + } + Result MapTransferMemory(ams::svc::Handle trmem_handle, uintptr_t address, size_t size, ams::svc::MemoryPermission map_perm) { + /* Validate the address/size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + + /* Validate the permission. */ + R_UNLESS(IsValidTransferMemoryPermission(map_perm), svc::ResultInvalidState()); + + /* Get the transfer memory. */ + KScopedAutoObject trmem = GetCurrentProcess().GetHandleTable().GetObject(trmem_handle); + R_UNLESS(trmem.IsNotNull(), svc::ResultInvalidHandle()); + + /* Verify that the mapping is in range. */ + R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_Transfered), svc::ResultInvalidMemoryRegion()); + + /* Map the transfer memory. */ + R_TRY(trmem->Map(address, size, map_perm)); + + /* We succeeded. */ + return ResultSuccess(); + } + + Result UnmapTransferMemory(ams::svc::Handle trmem_handle, uintptr_t address, size_t size) { + /* Validate the address/size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + + /* Get the transfer memory. */ + KScopedAutoObject trmem = GetCurrentProcess().GetHandleTable().GetObject(trmem_handle); + R_UNLESS(trmem.IsNotNull(), svc::ResultInvalidHandle()); + + /* Verify that the mapping is in range. */ + R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_Transfered), svc::ResultInvalidMemoryRegion()); + + /* Unmap the transfer memory. */ + R_TRY(trmem->Unmap(address, size)); + + return ResultSuccess(); + } + + Result CreateTransferMemory(ams::svc::Handle *out, uintptr_t address, size_t size, ams::svc::MemoryPermission map_perm) { + /* Validate the size. */ + R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress()); + R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize()); + R_UNLESS(size > 0, svc::ResultInvalidSize()); + R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory()); + + /* Validate the permissions. */ + R_UNLESS(IsValidTransferMemoryPermission(map_perm), svc::ResultInvalidNewMemoryPermission()); + + /* Get the current process and handle table. */ + auto &process = GetCurrentProcess(); + auto &handle_table = process.GetHandleTable(); + + /* Reserve a new transfer memory from the process resource limit. */ + KScopedResourceReservation trmem_reservation(std::addressof(process), ams::svc::LimitableResource_TransferMemoryCountMax); + R_UNLESS(trmem_reservation.Succeeded(), svc::ResultLimitReached()); + + /* Create the transfer memory. */ + KTransferMemory *trmem = KTransferMemory::Create(); + R_UNLESS(trmem != nullptr, svc::ResultOutOfResource()); + + /* Ensure the only reference is in the handle table when we're done. */ + ON_SCOPE_EXIT { trmem->Close(); }; + + /* Ensure that the region is in range. */ + R_UNLESS(process.GetPageTable().Contains(address, size), svc::ResultInvalidCurrentMemory()); + + /* Initialize the transfer memory. */ + R_TRY(trmem->Initialize(address, size, map_perm)); + + /* Commit the reservation. */ + trmem_reservation.Commit(); + + /* Register the transfer memory. */ + R_TRY(KTransferMemory::Register(trmem)); + + /* Add the transfer memory to the handle table. */ + R_TRY(handle_table.Add(out, trmem)); + + return ResultSuccess(); + } } /* ============================= 64 ABI ============================= */ - Result MapTransferMemory64From32(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission owner_perm) { - MESOSPHERE_PANIC("Stubbed SvcMapTransferMemory64From32 was called."); + Result MapTransferMemory64(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission owner_perm) { + return MapTransferMemory(trmem_handle, address, size, owner_perm); } - Result UnmapTransferMemory64From32(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapTransferMemory64From32 was called."); + Result UnmapTransferMemory64(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size) { + return UnmapTransferMemory(trmem_handle, address, size); } Result CreateTransferMemory64(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) { - MESOSPHERE_PANIC("Stubbed SvcCreateTransferMemory64 was called."); + return CreateTransferMemory(out_handle, address, size, map_perm); } /* ============================= 64From32 ABI ============================= */ - Result MapTransferMemory64(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission owner_perm) { - MESOSPHERE_PANIC("Stubbed SvcMapTransferMemory64 was called."); + Result MapTransferMemory64From32(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission owner_perm) { + return MapTransferMemory(trmem_handle, address, size, owner_perm); } - Result UnmapTransferMemory64(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size) { - MESOSPHERE_PANIC("Stubbed SvcUnmapTransferMemory64 was called."); + Result UnmapTransferMemory64From32(ams::svc::Handle trmem_handle, ams::svc::Address address, ams::svc::Size size) { + return UnmapTransferMemory(trmem_handle, address, size); } Result CreateTransferMemory64From32(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) { - MESOSPHERE_PANIC("Stubbed SvcCreateTransferMemory64From32 was called."); + return CreateTransferMemory(out_handle, address, size, map_perm); } } diff --git a/libraries/libstratosphere/Makefile b/libraries/libstratosphere/Makefile index a87f062f5..29de3a2a6 100644 --- a/libraries/libstratosphere/Makefile +++ b/libraries/libstratosphere/Makefile @@ -116,12 +116,12 @@ dist: dist-src dist-bin #--------------------------------------------------------------------------------- clean: @echo clean ... - @rm -fr release lib *.bz2 + @rm -fr release lib *.bz2 include/stratosphere.hpp.gch #--------------------------------------------------------------------------------- else -DEPENDS := $(OFILES:.o=.d) +DEPENDS := $(OFILES:.o=.d) $(GCH_FILES:.gch=.d) #--------------------------------------------------------------------------------- # main targets diff --git a/libraries/libstratosphere/include/stratosphere.hpp b/libraries/libstratosphere/include/stratosphere.hpp index 81f1e2371..9fffefa9f 100644 --- a/libraries/libstratosphere/include/stratosphere.hpp +++ b/libraries/libstratosphere/include/stratosphere.hpp @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -54,6 +55,7 @@ #include #include #include +#include #include #include #include diff --git a/libraries/libstratosphere/include/stratosphere/ams/ams_types.hpp b/libraries/libstratosphere/include/stratosphere/ams/ams_types.hpp index b16de724b..54b69ac4c 100644 --- a/libraries/libstratosphere/include/stratosphere/ams/ams_types.hpp +++ b/libraries/libstratosphere/include/stratosphere/ams/ams_types.hpp @@ -73,6 +73,7 @@ namespace ams { static constexpr uintptr_t StdAbortMagicAddress = 0x8; static constexpr u64 StdAbortMagicValue = 0xA55AF00DDEADCAFEul; static constexpr u32 StdAbortErrorDesc = 0xFFE; + static constexpr u32 StackOverflowErrorDesc = 0xFFD; static constexpr u32 DataAbortErrorDesc = 0x101; static constexpr u32 Magic = util::FourCC<'A', 'F', 'E', '2'>::Code; diff --git a/libraries/libstratosphere/include/stratosphere/ams/impl/ams_system_thread_definitions.hpp b/libraries/libstratosphere/include/stratosphere/ams/impl/ams_system_thread_definitions.hpp index f76783f20..a58d28f44 100644 --- a/libraries/libstratosphere/include/stratosphere/ams/impl/ams_system_thread_definitions.hpp +++ b/libraries/libstratosphere/include/stratosphere/ams/impl/ams_system_thread_definitions.hpp @@ -57,10 +57,12 @@ namespace ams::impl { AMS_DEFINE_SYSTEM_THREAD(-1, boot, Main); /* Mitm. */ - AMS_DEFINE_SYSTEM_THREAD(-7, mitm, InitializeThread); - AMS_DEFINE_SYSTEM_THREAD(-1, mitm_sf, QueryServerProcessThread); - AMS_DEFINE_SYSTEM_THREAD(16, mitm_fs, RomFileSystemInitializeThread); - AMS_DEFINE_SYSTEM_THREAD(21, mitm, DebugThrowThread); + AMS_DEFINE_SYSTEM_THREAD(-7, mitm, InitializeThread); + AMS_DEFINE_SYSTEM_THREAD(-1, mitm_sf, QueryServerProcessThread); + AMS_DEFINE_SYSTEM_THREAD(16, mitm_fs, RomFileSystemInitializeThread); + AMS_DEFINE_SYSTEM_THREAD(21, mitm, DebugThrowThread); + AMS_DEFINE_SYSTEM_THREAD(21, mitm_sysupdater, IpcServer); + AMS_DEFINE_SYSTEM_THREAD(21, mitm_sysupdater, AsyncPrepareSdCardUpdateTask); /* boot2. */ AMS_DEFINE_SYSTEM_THREAD(20, boot2, Main); @@ -93,6 +95,7 @@ namespace ams::impl { /* ns.*/ AMS_DEFINE_SYSTEM_THREAD(21, ns, ApplicationManagerIpcSession); + AMS_DEFINE_SYSTEM_THREAD(21, nssrv, AsyncPrepareCardUpdateTask); /* settings. */ AMS_DEFINE_SYSTEM_THREAD(21, settings, Main); @@ -110,7 +113,9 @@ namespace ams::impl { AMS_DEFINE_SYSTEM_THREAD(21, pgl, ProcessControlTask); /* lm. */ - AMS_DEFINE_SYSTEM_THREAD(-18, lm, IpcServer); + AMS_DEFINE_SYSTEM_THREAD(10, lm, IpcServer); + AMS_DEFINE_SYSTEM_THREAD(10, lm, Flush); + AMS_DEFINE_SYSTEM_THREAD(10, lm, HtcsConnection); #undef AMS_DEFINE_SYSTEM_THREAD diff --git a/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_attachment.hpp b/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_attachment.hpp index cf403c4b6..0d44e8929 100644 --- a/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_attachment.hpp +++ b/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_attachment.hpp @@ -19,33 +19,14 @@ namespace ams::erpt::sf { - class IAttachment : public ams::sf::IServiceObject { - protected: - enum class CommandId { - Open = 0, - Read = 1, - SetFlags = 2, - GetFlags = 3, - Close = 4, - GetSize = 5, - }; - public: - /* Actual commands. */ - virtual Result Open(const AttachmentId &attachment_id) = 0; - virtual Result Read(ams::sf::Out out_count, const ams::sf::OutBuffer &out_buffer) = 0; - virtual Result SetFlags(AttachmentFlagSet flags) = 0; - virtual Result GetFlags(ams::sf::Out out) = 0; - virtual Result Close() = 0; - virtual Result GetSize(ams::sf::Out out) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(Open), - MAKE_SERVICE_COMMAND_META(Read), - MAKE_SERVICE_COMMAND_META(SetFlags), - MAKE_SERVICE_COMMAND_META(GetFlags), - MAKE_SERVICE_COMMAND_META(Close), - MAKE_SERVICE_COMMAND_META(GetSize), - }; - }; + #define AMS_ERPT_I_ATTACHMENT_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, Open, (const AttachmentId &attachment_id)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, Read, (ams::sf::Out out_count, const ams::sf::OutBuffer &out_buffer)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, SetFlags, (AttachmentFlagSet flags)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, GetFlags, (ams::sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, Close, ()) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, GetSize, (ams::sf::Out out)) + + AMS_SF_DEFINE_INTERFACE(IAttachment, AMS_ERPT_I_ATTACHMENT_INTERFACE_INFO) } \ No newline at end of file diff --git a/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_context.hpp b/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_context.hpp index 85d18ef44..47cc54018 100644 --- a/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_context.hpp +++ b/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_context.hpp @@ -22,48 +22,20 @@ namespace ams::erpt::sf { - class IContext : public ams::sf::IServiceObject { - protected: - enum class CommandId { - SubmitContext = 0, - CreateReport = 1, - SetInitialLaunchSettingsCompletionTime = 2, - ClearInitialLaunchSettingsCompletionTime = 3, - UpdatePowerOnTime = 4, - UpdateAwakeTime = 5, - SubmitMultipleCategoryContext = 6, - UpdateApplicationLaunchTime = 7, - ClearApplicationLaunchTime = 8, - SubmitAttachment = 9, - CreateReportWithAttachments = 10, - }; - public: - /* Actual commands. */ - virtual Result SubmitContext(const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer) = 0; - virtual Result CreateReport(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer, const ams::sf::InBuffer &meta_buffer) = 0; - virtual Result SetInitialLaunchSettingsCompletionTime(const time::SteadyClockTimePoint &time_point) = 0; - virtual Result ClearInitialLaunchSettingsCompletionTime() = 0; - virtual Result UpdatePowerOnTime() = 0; - virtual Result UpdateAwakeTime() = 0; - virtual Result SubmitMultipleCategoryContext(const MultipleCategoryContextEntry &ctx_entry, const ams::sf::InBuffer &str_buffer) = 0; - virtual Result UpdateApplicationLaunchTime() = 0; - virtual Result ClearApplicationLaunchTime() = 0; - virtual Result SubmitAttachment(ams::sf::Out out, const ams::sf::InBuffer &attachment_name, const ams::sf::InBuffer &attachment_data) = 0; - virtual Result CreateReportWithAttachments(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer, const ams::sf::InBuffer &attachment_ids_buffer) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(SubmitContext), - MAKE_SERVICE_COMMAND_META(CreateReport), - MAKE_SERVICE_COMMAND_META(SetInitialLaunchSettingsCompletionTime, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(ClearInitialLaunchSettingsCompletionTime, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(UpdatePowerOnTime, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(UpdateAwakeTime, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(SubmitMultipleCategoryContext, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(UpdateApplicationLaunchTime, hos::Version_6_0_0), - MAKE_SERVICE_COMMAND_META(ClearApplicationLaunchTime, hos::Version_6_0_0), - MAKE_SERVICE_COMMAND_META(SubmitAttachment, hos::Version_8_0_0), - MAKE_SERVICE_COMMAND_META(CreateReportWithAttachments, hos::Version_8_0_0), - }; - }; + #define AMS_ERPT_I_CONTEXT_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, SubmitContext, (const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, CreateReport, (ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer, const ams::sf::InBuffer &meta_buffer)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, SetInitialLaunchSettingsCompletionTime, (const time::SteadyClockTimePoint &time_point), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, ClearInitialLaunchSettingsCompletionTime, (), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, UpdatePowerOnTime, (), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, UpdateAwakeTime, (), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, SubmitMultipleCategoryContext, (const MultipleCategoryContextEntry &ctx_entry, const ams::sf::InBuffer &str_buffer), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, UpdateApplicationLaunchTime, (), hos::Version_6_0_0) \ + AMS_SF_METHOD_INFO(C, H, 8, Result, ClearApplicationLaunchTime, (), hos::Version_6_0_0) \ + AMS_SF_METHOD_INFO(C, H, 9, Result, SubmitAttachment, (ams::sf::Out out, const ams::sf::InBuffer &attachment_name, const ams::sf::InBuffer &attachment_data), hos::Version_8_0_0) \ + AMS_SF_METHOD_INFO(C, H, 10, Result, CreateReportWithAttachments, (ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer, const ams::sf::InBuffer &attachment_ids_buffer), hos::Version_8_0_0) + + + AMS_SF_DEFINE_INTERFACE(IContext, AMS_ERPT_I_CONTEXT_INTERFACE_INFO) } \ No newline at end of file diff --git a/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_manager.hpp b/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_manager.hpp index 43329a6f5..01fe43534 100644 --- a/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_manager.hpp +++ b/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_manager.hpp @@ -19,33 +19,15 @@ namespace ams::erpt::sf { - class IManager : public ams::sf::IServiceObject { - protected: - enum class CommandId { - GetReportList = 0, - GetEvent = 1, - CleanupReports = 2, - DeleteReport = 3, - GetStorageUsageStatistics = 4, - GetAttachmentList = 5, - }; - public: - /* Actual commands. */ - virtual Result GetReportList(const ams::sf::OutBuffer &out_list, ReportType type_filter) = 0; - virtual Result GetEvent(ams::sf::OutCopyHandle out) = 0; - virtual Result CleanupReports() = 0; - virtual Result DeleteReport(const ReportId &report_id) = 0; - virtual Result GetStorageUsageStatistics(ams::sf::Out out) = 0; - virtual Result GetAttachmentList(const ams::sf::OutBuffer &out_buf, const ReportId &report_id) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetReportList), - MAKE_SERVICE_COMMAND_META(GetEvent), - MAKE_SERVICE_COMMAND_META(CleanupReports, hos::Version_4_0_0), - MAKE_SERVICE_COMMAND_META(DeleteReport, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(GetStorageUsageStatistics, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(GetAttachmentList, hos::Version_8_0_0), - }; - }; + #define AMS_ERPT_I_MANAGER_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GetReportList, (const ams::sf::OutBuffer &out_list, ReportType type_filter)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, GetEvent, (ams::sf::OutCopyHandle out)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, CleanupReports, (), hos::Version_4_0_0) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, DeleteReport, (const ReportId &report_id), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, GetStorageUsageStatistics, (ams::sf::Out out), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, GetAttachmentList, (const ams::sf::OutBuffer &out_buf, const ReportId &report_id), hos::Version_8_0_0) + + + AMS_SF_DEFINE_INTERFACE(IManager, AMS_ERPT_I_MANAGER_INTERFACE_INFO) } \ No newline at end of file diff --git a/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_report.hpp b/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_report.hpp index cc053ad84..2d8171c97 100644 --- a/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_report.hpp +++ b/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_report.hpp @@ -19,33 +19,15 @@ namespace ams::erpt::sf { - class IReport : public ams::sf::IServiceObject { - protected: - enum class CommandId { - Open = 0, - Read = 1, - SetFlags = 2, - GetFlags = 3, - Close = 4, - GetSize = 5, - }; - public: - /* Actual commands. */ - virtual Result Open(const ReportId &report_id) = 0; - virtual Result Read(ams::sf::Out out_count, const ams::sf::OutBuffer &out_buffer) = 0; - virtual Result SetFlags(ReportFlagSet flags) = 0; - virtual Result GetFlags(ams::sf::Out out) = 0; - virtual Result Close() = 0; - virtual Result GetSize(ams::sf::Out out) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(Open), - MAKE_SERVICE_COMMAND_META(Read), - MAKE_SERVICE_COMMAND_META(SetFlags), - MAKE_SERVICE_COMMAND_META(GetFlags), - MAKE_SERVICE_COMMAND_META(Close), - MAKE_SERVICE_COMMAND_META(GetSize), - }; - }; + #define AMS_ERPT_I_REPORT_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, Open, (const ReportId &report_id)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, Read, (ams::sf::Out out_count, const ams::sf::OutBuffer &out_buffer)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, SetFlags, (ReportFlagSet flags)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, GetFlags, (ams::sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, Close, ()) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, GetSize, (ams::sf::Out out)) + + + AMS_SF_DEFINE_INTERFACE(IReport, AMS_ERPT_I_REPORT_INTERFACE_INFO) } \ No newline at end of file diff --git a/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_session.hpp b/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_session.hpp index 739d0bde2..c61e22229 100644 --- a/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_session.hpp +++ b/libraries/libstratosphere/include/stratosphere/erpt/sf/erpt_sf_i_session.hpp @@ -22,24 +22,11 @@ namespace ams::erpt::sf { - class ISession : public ams::sf::IServiceObject { - protected: - enum class CommandId { - OpenReport = 0, - OpenManager = 1, - OpenAttachment = 2, - }; - public: - /* Actual commands. */ - virtual Result OpenReport(ams::sf::Out> out) = 0; - virtual Result OpenManager(ams::sf::Out> out) = 0; - virtual Result OpenAttachment(ams::sf::Out> out) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(OpenReport), - MAKE_SERVICE_COMMAND_META(OpenManager), - MAKE_SERVICE_COMMAND_META(OpenAttachment, hos::Version_8_0_0), - }; - }; + #define AMS_ERPT_I_SESSION_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, OpenReport, (ams::sf::Out> out)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, OpenManager, (ams::sf::Out> out)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, OpenAttachment, (ams::sf::Out> out), hos::Version_8_0_0) + + AMS_SF_DEFINE_INTERFACE(ISession, AMS_ERPT_I_SESSION_INTERFACE_INFO) } \ No newline at end of file diff --git a/libraries/libstratosphere/include/stratosphere/err.hpp b/libraries/libstratosphere/include/stratosphere/err.hpp new file mode 100644 index 000000000..c8cb979a0 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/err.hpp @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once + +#include diff --git a/libraries/libstratosphere/include/stratosphere/err/err_error_context.hpp b/libraries/libstratosphere/include/stratosphere/err/err_error_context.hpp new file mode 100644 index 000000000..df40e82b6 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/err/err_error_context.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::err { + + enum class ErrorContextType : u8 { + None = 0, + Http = 1, + FileSystem = 2, + WebMediaPlayer = 3, + LocalContentShare = 4, + }; + + struct PaddingErrorContext { + u8 padding[0x200 - 8]; + }; + + struct ErrorContext : public sf::LargeData, public sf::PrefersMapAliasTransferMode { + ErrorContextType type; + u8 reserved[7]; + + union { + PaddingErrorContext padding; + }; + }; + static_assert(sizeof(ErrorContext) == 0x200); + static_assert(util::is_pod::value); + +} diff --git a/libraries/libstratosphere/include/stratosphere/fatal.hpp b/libraries/libstratosphere/include/stratosphere/fatal.hpp index 52ad0b054..00025c562 100644 --- a/libraries/libstratosphere/include/stratosphere/fatal.hpp +++ b/libraries/libstratosphere/include/stratosphere/fatal.hpp @@ -16,4 +16,7 @@ #pragma once -#include "fatal/fatal_types.hpp" +#include + +#include +#include diff --git a/libraries/libstratosphere/include/stratosphere/fatal/impl/fatal_i_private_service.hpp b/libraries/libstratosphere/include/stratosphere/fatal/impl/fatal_i_private_service.hpp new file mode 100644 index 000000000..59e2b70bc --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fatal/impl/fatal_i_private_service.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::fatal::impl { + + #define AMS_FATAL_I_PRIVATE_SERVICE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GetFatalEvent, (sf::OutCopyHandle out_h)) + + AMS_SF_DEFINE_INTERFACE(IPrivateService, AMS_FATAL_I_PRIVATE_SERVICE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/fatal/impl/fatal_i_service.hpp b/libraries/libstratosphere/include/stratosphere/fatal/impl/fatal_i_service.hpp new file mode 100644 index 000000000..36922e786 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fatal/impl/fatal_i_service.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::fatal::impl { + + #define AMS_FATAL_I_SERVICE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, ThrowFatal, (Result error, const sf::ClientProcessId &client_pid)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, ThrowFatalWithPolicy, (Result error, const sf::ClientProcessId &client_pid, FatalPolicy policy)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, ThrowFatalWithCpuContext, (Result error, const sf::ClientProcessId &client_pid, FatalPolicy policy, const CpuContext &cpu_ctx)) + + AMS_SF_DEFINE_INTERFACE(IService, AMS_FATAL_I_SERVICE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/fs.hpp b/libraries/libstratosphere/include/stratosphere/fs.hpp index 96093da58..176588fe7 100644 --- a/libraries/libstratosphere/include/stratosphere/fs.hpp +++ b/libraries/libstratosphere/include/stratosphere/fs.hpp @@ -16,7 +16,13 @@ #pragma once #include +#include +#include +#include #include +#include +#include +#include #include #include #include @@ -24,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -53,3 +60,4 @@ #include #include #include +#include diff --git a/libraries/libstratosphere/include/stratosphere/fs/fs_access_log.hpp b/libraries/libstratosphere/include/stratosphere/fs/fs_access_log.hpp new file mode 100644 index 000000000..5900d6de1 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fs/fs_access_log.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::fs { + + enum AccessLogMode : u32 { + AccessLogMode_None = 0, + AccessLogMode_Log = 1, + AccessLogMode_SdCard = 2, + }; + + Result GetGlobalAccessLogMode(u32 *out); + Result SetGlobalAccessLogMode(u32 mode); + + void SetLocalAccessLog(bool enabled); + void SetLocalSystemAccessLogForDebug(bool enabled); + +} diff --git a/libraries/libstratosphere/include/stratosphere/fs/fs_context.hpp b/libraries/libstratosphere/include/stratosphere/fs/fs_context.hpp new file mode 100644 index 000000000..9ce9dfc46 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fs/fs_context.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::fs { + + enum class AbortSpecifier { + Default, + Abort, + Return, + }; + + using ResultHandler = AbortSpecifier (*)(Result); + + class FsContext { + private: + ResultHandler handler; + public: + constexpr explicit FsContext(ResultHandler h) : handler(h) { /* ... */ } + + constexpr void SetHandler(ResultHandler h) { this->handler = h; } + + constexpr AbortSpecifier HandleResult(Result result) const { return this->handler(result); } + }; + + void SetDefaultFsContextResultHandler(const ResultHandler handler); + + const FsContext *GetCurrentThreadFsContext(); + void SetCurrentThreadFsContext(const FsContext *context); + + class ScopedFsContext { + private: + const FsContext * const prev_context; + public: + ALWAYS_INLINE ScopedFsContext(const FsContext &ctx) : prev_context(GetCurrentThreadFsContext()) { + SetCurrentThreadFsContext(std::addressof(ctx)); + } + + ALWAYS_INLINE ~ScopedFsContext() { + SetCurrentThreadFsContext(this->prev_context); + } + }; + + class ScopedAutoAbortDisabler { + private: + const FsContext * const prev_context; + public: + ScopedAutoAbortDisabler(); + ALWAYS_INLINE ~ScopedAutoAbortDisabler() { + SetCurrentThreadFsContext(this->prev_context); + } + }; + +} diff --git a/libraries/libstratosphere/include/stratosphere/fs/fs_filesystem.hpp b/libraries/libstratosphere/include/stratosphere/fs/fs_filesystem.hpp index 98e9187b7..f6852db93 100644 --- a/libraries/libstratosphere/include/stratosphere/fs/fs_filesystem.hpp +++ b/libraries/libstratosphere/include/stratosphere/fs/fs_filesystem.hpp @@ -53,8 +53,6 @@ namespace ams::fs { CreateOption_BigFile = ::FsCreateOption_BigFile, }; - using FileTimeStampRaw = ::FsTimeStampRaw; - struct FileHandle; struct DirectoryHandle; @@ -74,7 +72,6 @@ namespace ams::fs { Result GetTotalSpaceSize(s64 *out, const char *path); Result SetConcatenationFileAttribute(const char *path); - Result GetFileTimeStampRaw(FileTimeStampRaw *out, const char *path); Result OpenFile(FileHandle *out, std::unique_ptr &&file, int mode); diff --git a/libraries/libstratosphere/include/stratosphere/fs/fs_filesystem_for_debug.hpp b/libraries/libstratosphere/include/stratosphere/fs/fs_filesystem_for_debug.hpp new file mode 100644 index 000000000..e68dd59e2 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fs/fs_filesystem_for_debug.hpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::fs { + + using FileTimeStampRaw = ::FsTimeStampRaw; + + namespace impl { + + Result GetFileTimeStampRawForDebug(FileTimeStampRaw *out, const char *path); + + } + + Result GetFileTimeStampRawForDebug(FileTimeStampRaw *out, const char *path); + +} diff --git a/libraries/libstratosphere/include/stratosphere/fs/fs_filesystem_utils.hpp b/libraries/libstratosphere/include/stratosphere/fs/fs_filesystem_utils.hpp index 5460deade..6f915a866 100644 --- a/libraries/libstratosphere/include/stratosphere/fs/fs_filesystem_utils.hpp +++ b/libraries/libstratosphere/include/stratosphere/fs/fs_filesystem_utils.hpp @@ -14,8 +14,9 @@ * along with this program. If not, see . */ #pragma once -#include "fs_common.hpp" -#include "fs_filesystem.hpp" +#include +#include +#include namespace ams::fs { diff --git a/libraries/libstratosphere/include/stratosphere/fs/fs_path_tool.hpp b/libraries/libstratosphere/include/stratosphere/fs/fs_path_tool.hpp index 37f2c5b5d..9d9e145b8 100644 --- a/libraries/libstratosphere/include/stratosphere/fs/fs_path_tool.hpp +++ b/libraries/libstratosphere/include/stratosphere/fs/fs_path_tool.hpp @@ -14,8 +14,8 @@ * along with this program. If not, see . */ #pragma once -#include "fs_common.hpp" -#include "../fssrv/fssrv_sf_path.hpp" +#include +#include namespace ams::fs { diff --git a/libraries/libstratosphere/include/stratosphere/fs/fs_path_utils.hpp b/libraries/libstratosphere/include/stratosphere/fs/fs_path_utils.hpp index 63cb917d3..59174ce20 100644 --- a/libraries/libstratosphere/include/stratosphere/fs/fs_path_utils.hpp +++ b/libraries/libstratosphere/include/stratosphere/fs/fs_path_utils.hpp @@ -14,8 +14,8 @@ * along with this program. If not, see . */ #pragma once -#include "fs_common.hpp" -#include "../fssrv/fssrv_sf_path.hpp" +#include +#include namespace ams::fs { diff --git a/libraries/libstratosphere/include/stratosphere/fs/fs_priority.hpp b/libraries/libstratosphere/include/stratosphere/fs/fs_priority.hpp new file mode 100644 index 000000000..84e707dd2 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fs/fs_priority.hpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::fs { + + enum Priority { + Priority_Realtime = 0, + Priority_Normal = 1, + Priority_Low = 2, + }; + + enum PriorityRaw { + PriorityRaw_Realtime = 0, + PriorityRaw_Normal = 1, + PriorityRaw_Low = 2, + PriorityRaw_Background = 3, + }; + + Priority GetPriorityOnCurrentThread(); + Priority GetPriority(os::ThreadType *thread); + PriorityRaw GetPriorityRawOnCurrentThread(); + PriorityRaw GetPriorityRaw(os::ThreadType *thread); + + void SetPriorityOnCurrentThread(Priority prio); + void SetPriority(os::ThreadType *thread, Priority prio); + void SetPriorityRawOnCurrentThread(PriorityRaw prio); + void SetPriorityRaw(os::ThreadType *thread, PriorityRaw prio); + +} diff --git a/libraries/libstratosphere/include/stratosphere/fs/fs_query_range.hpp b/libraries/libstratosphere/include/stratosphere/fs/fs_query_range.hpp index 2456f48db..35e684c9c 100644 --- a/libraries/libstratosphere/include/stratosphere/fs/fs_query_range.hpp +++ b/libraries/libstratosphere/include/stratosphere/fs/fs_query_range.hpp @@ -14,8 +14,8 @@ * along with this program. If not, see . */ #pragma once -#include "fs_common.hpp" -#include "fs_file.hpp" +#include +#include namespace ams::fs { diff --git a/libraries/libstratosphere/include/stratosphere/fs/fs_result_config.hpp b/libraries/libstratosphere/include/stratosphere/fs/fs_result_config.hpp new file mode 100644 index 000000000..ab89bfc92 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fs/fs_result_config.hpp @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::fs { + + void SetEnabledAutoAbort(bool enabled); + void SetResultHandledByApplication(bool application); + +} diff --git a/libraries/libstratosphere/include/stratosphere/fs/fs_shared_filesystem_holder.hpp b/libraries/libstratosphere/include/stratosphere/fs/fs_shared_filesystem_holder.hpp new file mode 100644 index 000000000..9a4c5931f --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fs/fs_shared_filesystem_holder.hpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include + +namespace ams::fs { + + class SharedFileSystemHolder : public fsa::IFileSystem, public impl::Newable { + NON_COPYABLE(SharedFileSystemHolder); + NON_MOVEABLE(SharedFileSystemHolder); + private: + std::shared_ptr fs; + public: + SharedFileSystemHolder(std::shared_ptr f) : fs(std::move(f)) { /* ... */ } + public: + virtual Result CreateFileImpl(const char *path, s64 size, int flags) override { return this->fs->CreateFile(path, size, flags); } + virtual Result DeleteFileImpl(const char *path) override { return this->fs->DeleteFile(path); } + virtual Result CreateDirectoryImpl(const char *path) override { return this->fs->CreateDirectory(path); } + virtual Result DeleteDirectoryImpl(const char *path) override { return this->fs->DeleteDirectory(path); } + virtual Result DeleteDirectoryRecursivelyImpl(const char *path) override { return this->fs->DeleteDirectoryRecursively(path); } + virtual Result RenameFileImpl(const char *old_path, const char *new_path) override { return this->fs->RenameFile(old_path, new_path); } + virtual Result RenameDirectoryImpl(const char *old_path, const char *new_path) override { return this->fs->RenameDirectory(old_path, new_path); } + virtual Result GetEntryTypeImpl(fs::DirectoryEntryType *out, const char *path) override { return this->fs->GetEntryType(out, path); } + virtual Result OpenFileImpl(std::unique_ptr *out_file, const char *path, fs::OpenMode mode) override { return this->fs->OpenFile(out_file, path, mode); } + virtual Result OpenDirectoryImpl(std::unique_ptr *out_dir, const char *path, fs::OpenDirectoryMode mode) override { return this->fs->OpenDirectory(out_dir, path, mode); } + virtual Result CommitImpl() override { return this->fs->Commit(); } + virtual Result GetFreeSpaceSizeImpl(s64 *out, const char *path) override { return this->fs->GetFreeSpaceSize(out, path); } + virtual Result GetTotalSpaceSizeImpl(s64 *out, const char *path) override { return this->fs->GetTotalSpaceSize(out, path); } + virtual Result CleanDirectoryRecursivelyImpl(const char *path) override { return this->fs->CleanDirectoryRecursively(path); } + + /* These aren't accessible as commands. */ + virtual Result CommitProvisionallyImpl(s64 counter) override { return this->fs->CommitProvisionally(counter); } + virtual Result RollbackImpl() override { return this->fs->Rollback(); } + virtual Result FlushImpl() override { return this->fs->Flush(); } + }; + +} diff --git a/libraries/libstratosphere/include/stratosphere/fs/fsa/fs_ifile.hpp b/libraries/libstratosphere/include/stratosphere/fs/fsa/fs_ifile.hpp index 422240110..2d262639b 100644 --- a/libraries/libstratosphere/include/stratosphere/fs/fsa/fs_ifile.hpp +++ b/libraries/libstratosphere/include/stratosphere/fs/fsa/fs_ifile.hpp @@ -14,10 +14,10 @@ * along with this program. If not, see . */ #pragma once -#include "../fs_common.hpp" -#include "../fs_file.hpp" -#include "../fs_filesystem.hpp" -#include "../fs_operate_range.hpp" +#include +#include +#include +#include namespace ams::fs::fsa { diff --git a/libraries/libstratosphere/include/stratosphere/fs/fsa/fs_ifilesystem.hpp b/libraries/libstratosphere/include/stratosphere/fs/fsa/fs_ifilesystem.hpp index f6c91d557..83b0ea4ae 100644 --- a/libraries/libstratosphere/include/stratosphere/fs/fsa/fs_ifilesystem.hpp +++ b/libraries/libstratosphere/include/stratosphere/fs/fsa/fs_ifilesystem.hpp @@ -17,6 +17,7 @@ #pragma once #include "../fs_common.hpp" #include "../fs_filesystem.hpp" +#include "../fs_filesystem_for_debug.hpp" namespace ams::fs::fsa { diff --git a/libraries/libstratosphere/include/stratosphere/fs/impl/fs_access_log_impl.hpp b/libraries/libstratosphere/include/stratosphere/fs/impl/fs_access_log_impl.hpp new file mode 100644 index 000000000..5b7788ff7 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fs/impl/fs_access_log_impl.hpp @@ -0,0 +1,206 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include +#include + +namespace ams::fs::impl { + + enum AccessLogTarget : u32 { + AccessLogTarget_None = (0 << 0), + AccessLogTarget_Application = (1 << 0), + AccessLogTarget_System = (1 << 1), + }; + + struct IdentifyAccessLogHandle { + void *handle; + public: + static constexpr IdentifyAccessLogHandle MakeHandle(void *h) { + return IdentifyAccessLogHandle{h}; + } + }; + + bool IsEnabledAccessLog(u32 target); + bool IsEnabledAccessLog(); + + bool IsEnabledHandleAccessLog(fs::FileHandle handle); + bool IsEnabledHandleAccessLog(fs::DirectoryHandle handle); + bool IsEnabledHandleAccessLog(fs::impl::IdentifyAccessLogHandle handle); + bool IsEnabledHandleAccessLog(const void *handle); + + bool IsEnabledFileSystemAccessorAccessLog(const char *mount_name); + void EnableFileSystemAccessorAccessLog(const char *mount_name); + + using AccessLogPrinterCallback = int (*)(char *buffer, size_t buffer_size); + void RegisterStartAccessLogPrinterCallback(AccessLogPrinterCallback callback); + + void OutputAccessLog(Result result, os::Tick start, os::Tick end, const char *name, fs::FileHandle handle, const char *fmt, ...) __attribute__((format (printf, 6, 7))); + void OutputAccessLog(Result result, os::Tick start, os::Tick end, const char *name, fs::DirectoryHandle handle, const char *fmt, ...) __attribute__((format (printf, 6, 7))); + void OutputAccessLog(Result result, os::Tick start, os::Tick end, const char *name, fs::impl::IdentifyAccessLogHandle handle, const char *fmt, ...) __attribute__((format (printf, 6, 7))); + void OutputAccessLog(Result result, os::Tick start, os::Tick end, const char *name, const void *handle, const char *fmt, ...) __attribute__((format (printf, 6, 7))); + void OutputAccessLog(Result result, fs::Priority priority, os::Tick start, os::Tick end, const char *name, const void *handle, const char *fmt, ...) __attribute__((format (printf, 7, 8))); + void OutputAccessLog(Result result, fs::PriorityRaw priority_raw, os::Tick start, os::Tick end, const char *name, const void *handle, const char *fmt, ...) __attribute__((format (printf, 7, 8))); + + void OutputAccessLogToOnlySdCard(const char *fmt, ...) __attribute__((format (printf, 1, 2))); + + void OutputAccessLogUnlessResultSuccess(Result result, os::Tick start, os::Tick end, const char *name, fs::FileHandle handle, const char *fmt, ...) __attribute__((format (printf, 6, 7))); + void OutputAccessLogUnlessResultSuccess(Result result, os::Tick start, os::Tick end, const char *name, fs::DirectoryHandle handle, const char *fmt, ...) __attribute__((format (printf, 6, 7))); + void OutputAccessLogUnlessResultSuccess(Result result, os::Tick start, os::Tick end, const char *name, const void *handle, const char *fmt, ...) __attribute__((format (printf, 6, 7))); + + class IdString { + private: + char buffer[0x20]; + private: + const char *ToValueString(int id); + public: + template + const char *ToString(T id); + }; + + template requires (requires { T{}; }) + inline T DereferenceOutValue(T *out_value, Result result) { + if (R_SUCCEEDED(result) && out_value != nullptr) { + return *out_value; + } else { + return T{}; + } + } + +} + +/* Access log result name. */ +#define AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME __tmp_ams_fs_access_log_result +/* Access log utils. */ +#define AMS_FS_IMPL_ACCESS_LOG_DEREFERENCE_OUT_VALUE(__VALUE__) ::ams::fs::impl::DereferenceOutValue(__VALUE__, AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME) + +/* Access log components. */ +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_SIZE ", size: %" PRId64 "" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_READ_SIZE ", read_size: %zu" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_OFFSET_AND_SIZE ", offset: %" PRId64 ", size: %zu" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_THREAD_ID ", thread_id: %" PRIu64 "" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_MOUNT ", name: \"%s\"" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_ENTRY_COUNT ", entry_count: %" PRId64 "" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_ENTRY_BUFFER_COUNT ", entry_buffer_count: %" PRId64 "" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_OPEN_MODE ", open_mode: 0x%" PRIX32 "" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH ", path: \"%s\"" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH_AND_SIZE ", path: \"%s\", size: %" PRId64 "" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH_AND_OPEN_MODE ", path: \"%s\", open_mode: 0x%" PRIX32 "" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_RENAME ", path: \"%s\", new_path: \"%s\"" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_DIRECTORY_ENTRY_TYPE ", entry_type: %s" + +/* Access log formats. */ +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_NONE "" + +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_READ_FILE(__OUT_READ_SIZE__, __OFFSET__, __SIZE__) \ + AMS_FS_IMPL_ACCESS_LOG_FORMAT_OFFSET_AND_SIZE AMS_FS_IMPL_ACCESS_LOG_FORMAT_READ_SIZE, __OFFSET__, __SIZE__, AMS_FS_IMPL_ACCESS_LOG_DEREFERENCE_OUT_VALUE(__OUT_READ_SIZE__) + +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_WRITE_FILE_WITH_NO_OPTION AMS_FS_IMPL_ACCESS_LOG_FORMAT_OFFSET_AND_SIZE +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_WRITE_FILE_WITH_FLUSH_OPTION AMS_FS_IMPL_ACCESS_LOG_FORMAT_WRITE_FILE_WITH_NO_OPTION ", write_option: Flush" +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_WRITE_FILE(__OPTION__) ((__OPTION__).HasFlushFlag() ? AMS_FS_IMPL_ACCESS_LOG_FORMAT_WRITE_FILE_WITH_FLUSH_OPTION : AMS_FS_IMPL_ACCESS_LOG_FORMAT_WRITE_FILE_WITH_NO_OPTION) + +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_GET_FILE_SIZE(__OUT_SIZE__) \ + AMS_FS_IMPL_ACCESS_LOG_FORMAT_SIZE, AMS_FS_IMPL_ACCESS_LOG_DEREFERENCE_OUT_VALUE(__OUT_SIZE__) + +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_READ_DIRECTORY(__OUT_ENTRY_COUNT__, __ENTRY_BUFFER_COUNT__) \ + AMS_FS_IMPL_ACCESS_LOG_FORMAT_ENTRY_BUFFER_COUNT AMS_FS_IMPL_ACCESS_LOG_FORMAT_ENTRY_COUNT, __ENTRY_BUFFER_COUNT__, AMS_FS_IMPL_ACCESS_LOG_DEREFERENCE_OUT_VALUE(__OUT_ENTRY_COUNT__) + +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_GET_DIRECTORY_ENTRY_COUNT(__OUT_ENTRY_COUNT__) \ + AMS_FS_IMPL_ACCESS_LOG_FORMAT_ENTRY_COUNT, AMS_FS_IMPL_ACCESS_LOG_DEREFERENCE_OUT_VALUE(__OUT_ENTRY_COUNT__) + +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_GET_ENTRY_TYPE(__OUT_ENTRY_TYPE__, __PATH__) \ + AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH AMS_FS_IMPL_ACCESS_LOG_FORMAT_DIRECTORY_ENTRY_TYPE, __PATH__, ::ams::fs::impl::IdString().ToString(AMS_FS_IMPL_ACCESS_LOG_DEREFERENCE_OUT_VALUE(__OUT_ENTRY_TYPE__)) + +#define AMS_FS_IMPL_ACCESS_LOG_FORMAT_GET_SPACE_SIZE(__OUT_SIZE__, __NAME__) \ + AMS_FS_IMPL_ACCESS_LOG_FORMAT_MOUNT AMS_FS_IMPL_ACCESS_LOG_FORMAT_SIZE, __NAME__, AMS_FS_IMPL_ACCESS_LOG_DEREFERENCE_OUT_VALUE(__OUT_SIZE__) + +/* Access log invocation lambdas. */ +#define AMS_FS_IMPL_ACCESS_LOG_IMPL(__EXPR__, __HANDLE__, __ENABLED__, __NAME__, ...) \ + [&](const char *name) { \ + if (!(__ENABLED__)) { \ + return (__EXPR__); \ + } else { \ + const ::ams::os::Tick start = ::ams::os::GetSystemTick(); \ + const auto AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME = (__EXPR__); \ + const ::ams::os::Tick end = ::ams::os::GetSystemTick(); \ + ::ams::fs::impl::OutputAccessLog(AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME, start, end, name, __HANDLE__, __VA_ARGS__); \ + return AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME; \ + } \ + }(__NAME__) + +#define AMS_FS_IMPL_ACCESS_LOG_WITH_PRIORITY_IMPL(__EXPR__, __PRIORITY__, __HANDLE__, __ENABLED__, __NAME__, ...) \ + [&](const char *name) { \ + if (!(__ENABLED__)) { \ + return (__EXPR__); \ + } else { \ + const ::ams::os::Tick start = ::ams::os::GetSystemTick(); \ + const auto AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME = (__EXPR__); \ + const ::ams::os::Tick end = ::ams::os::GetSystemTick(); \ + ::ams::fs::impl::OutputAccessLog(AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME, __PRIORITY__, start, end, name, __HANDLE__, __VA_ARGS__); \ + return AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME; \ + } \ + }(__NAME__) + +#define AMS_FS_IMPL_ACCESS_LOG_EXPLICIT_IMPL(__RESULT__, __START__, __END__, __HANDLE__, __ENABLED__, __NAME__, ...) \ + [&](const char *name) { \ + if (!(__ENABLED__)) { \ + return __RESULT__; \ + } else { \ + const auto AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME = (__RESULT__); \ + ::ams::fs::impl::OutputAccessLog(AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME, __START__, __END__, name, __HANDLE__, __VA_ARGS__); \ + return AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME; \ + } \ + }(__NAME__) + +#define AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED_IMPL(__EXPR__, __ENABLED__, __NAME__, ...) \ + [&](const char *name) { \ + if (!(__ENABLED__)) { \ + return (__EXPR__); \ + } else { \ + const ::ams::os::Tick start = ::ams::os::GetSystemTick(); \ + const auto AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME = (__EXPR__); \ + const ::ams::os::Tick end = ::ams::os::GetSystemTick(); \ + ::ams::fs::impl::OutputAccessLogUnlessResultSuccess(AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME, start, end, name, nullptr, __VA_ARGS__); \ + return AMS_FS_IMPL_ACCESS_LOG_RESULT_NAME; \ + } \ + }(__NAME__) + + +/* Access log api. */ +#define AMS_FS_IMPL_ACCESS_LOG(__EXPR__, __HANDLE__, ...) \ + AMS_FS_IMPL_ACCESS_LOG_IMPL((__EXPR__), __HANDLE__, ::ams::fs::impl::IsEnabledAccessLog() && ::ams::fs::impl::IsEnabledHandleAccessLog(__HANDLE__), AMS_CURRENT_FUNCTION_NAME, __VA_ARGS__) + +#define AMS_FS_IMPL_ACCESS_LOG_WITH_NAME(__EXPR__, __HANDLE__, __NAME__, ...) \ + AMS_FS_IMPL_ACCESS_LOG_IMPL((__EXPR__), __HANDLE__, ::ams::fs::impl::IsEnabledAccessLog() && ::ams::fs::impl::IsEnabledHandleAccessLog(__HANDLE__), __NAME__, __VA_ARGS__) + +#define AMS_FS_IMPL_ACCESS_LOG_EXPLICIT(__RESULT__, __START__, __END__, __HANDLE__, __NAME__, ...) \ + AMS_FS_IMPL_ACCESS_LOG_EXPLICIT_IMPL((__RESULT__), __START__, __END__, __HANDLE__, ::ams::fs::impl::IsEnabledAccessLog() && ::ams::fs::impl::IsEnabledHandleAccessLog(__HANDLE__), __NAME__, __VA_ARGS__) + +#define AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(__EXPR__, ...) \ + AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED_IMPL((__EXPR__), ::ams::fs::impl::IsEnabledAccessLog(), AMS_CURRENT_FUNCTION_NAME, __VA_ARGS__) + +/* Specific utilities. */ +#define AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(__EXPR__, __HANDLE__, __FILESYSTEM__, ...) \ + AMS_FS_IMPL_ACCESS_LOG_IMPL((__EXPR__), __HANDLE__, ::ams::fs::impl::IsEnabledAccessLog() && (__FILESYSTEM__)->IsEnabledAccessLog(), AMS_CURRENT_FUNCTION_NAME, __VA_ARGS__) + +#define AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM_WITH_NAME(__EXPR__, __HANDLE__, __FILESYSTEM__, __NAME__, ...) \ + AMS_FS_IMPL_ACCESS_LOG_IMPL((__EXPR__), __HANDLE__, ::ams::fs::impl::IsEnabledAccessLog() && (__FILESYSTEM__)->IsEnabledAccessLog(), __NAME__, __VA_ARGS__) + +#define AMS_FS_IMPL_ACCESS_LOG_UNMOUNT(__EXPR__, __MOUNT_NAME__, ...) \ + AMS_FS_IMPL_ACCESS_LOG_IMPL((__EXPR__), nullptr, ::ams::fs::impl::IsEnabledAccessLog() && ::ams::fs::impl::IsEnabledFileSystemAccessorAccessLog(__MOUNT_NAME__), AMS_CURRENT_FUNCTION_NAME, __VA_ARGS__) diff --git a/libraries/libstratosphere/include/stratosphere/fs/impl/fs_fs_inline_context_utils.hpp b/libraries/libstratosphere/include/stratosphere/fs/impl/fs_fs_inline_context_utils.hpp new file mode 100644 index 000000000..eae1f0d3f --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fs/impl/fs_fs_inline_context_utils.hpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::fs::impl { + + constexpr inline u8 TlsIoPriorityMask = 0x7; + constexpr inline u8 TlsIoRecursiveCallMask = 0x8; + + struct TlsIoValueForInheritance { + u8 _tls_value; + }; + + inline void SetCurrentRequestRecursive() { + os::ThreadType * const cur_thread = os::GetCurrentThread(); + sf::SetFsInlineContext(cur_thread, TlsIoRecursiveCallMask | sf::GetFsInlineContext(cur_thread)); + } + + inline bool IsCurrentRequestRecursive() { + return (sf::GetFsInlineContext(os::GetCurrentThread()) & TlsIoRecursiveCallMask) != 0; + } + + inline TlsIoValueForInheritance GetTlsIoValueForInheritance() { + return TlsIoValueForInheritance { sf::GetFsInlineContext(os::GetCurrentThread()) }; + } + + inline void SetTlsIoValueForInheritance(TlsIoValueForInheritance tls_io) { + sf::SetFsInlineContext(os::GetCurrentThread(), tls_io._tls_value); + } + +} diff --git a/libraries/libstratosphere/include/stratosphere/fs/impl/fs_priority_utils.hpp b/libraries/libstratosphere/include/stratosphere/fs/impl/fs_priority_utils.hpp new file mode 100644 index 000000000..043483df8 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fs/impl/fs_priority_utils.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::fs::impl { + + enum TlsIoPriority : u8 { + TlsIoPriority_Normal = 0, + TlsIoPriority_Realtime = 1, + TlsIoPriority_Low = 2, + TlsIoPriority_Background = 3, + }; + + /* Ensure that TlsIo priority matches libnx priority. */ + static_assert(TlsIoPriority_Normal == static_cast(::FsPriority_Normal)); + static_assert(TlsIoPriority_Realtime == static_cast(::FsPriority_Realtime)); + static_assert(TlsIoPriority_Low == static_cast(::FsPriority_Low)); + static_assert(TlsIoPriority_Background == static_cast(::FsPriority_Background)); + + constexpr inline Result ConvertFsPriorityToTlsIoPriority(u8 *out, PriorityRaw priority) { + AMS_ASSERT(out != nullptr); + + switch (priority) { + case PriorityRaw_Normal: *out = TlsIoPriority_Normal; break; + case PriorityRaw_Realtime: *out = TlsIoPriority_Realtime; break; + case PriorityRaw_Low: *out = TlsIoPriority_Low; break; + case PriorityRaw_Background: *out = TlsIoPriority_Background; break; + default: return fs::ResultInvalidArgument(); + } + + return ResultSuccess(); + } + + constexpr inline Result ConvertTlsIoPriorityToFsPriority(PriorityRaw *out, u8 tls_io) { + AMS_ASSERT(out != nullptr); + + switch (static_cast(tls_io)) { + case TlsIoPriority_Normal: *out = PriorityRaw_Normal; break; + case TlsIoPriority_Realtime: *out = PriorityRaw_Realtime; break; + case TlsIoPriority_Low: *out = PriorityRaw_Low; break; + case TlsIoPriority_Background: *out = PriorityRaw_Background; break; + default: return fs::ResultInvalidArgument(); + } + + return ResultSuccess(); + } + + inline u8 GetTlsIoPriority(os::ThreadType *thread) { + return sf::GetFsInlineContext(thread) & TlsIoPriorityMask; + } + +} diff --git a/libraries/libstratosphere/include/stratosphere/fs/impl/fs_result_utils.hpp b/libraries/libstratosphere/include/stratosphere/fs/impl/fs_result_utils.hpp new file mode 100644 index 000000000..2107b7713 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fs/impl/fs_result_utils.hpp @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::fs::impl { + + bool IsAbortNeeded(Result result); + void LogErrorMessage(Result result, const char *function); + +} + +#define AMS_FS_R_CHECK_ABORT_IMPL(__RESULT__, __FORCE__) \ + ({ \ + if (::ams::fs::impl::IsAbortNeeded(__RESULT__) || (__FORCE__)) { \ + ::ams::fs::impl::LogErrorMessage(__RESULT__, AMS_CURRENT_FUNCTION_NAME); \ + R_ABORT_UNLESS(__RESULT__); \ + } \ + }) + +#define AMS_FS_R_TRY(__RESULT__) \ + ({ \ + const ::ams::Result __tmp_fs_result = (__RESULT__); \ + AMS_FS_R_CHECK_ABORT_IMPL(__tmp_fs_result, false); \ + R_TRY(__tmp_fs_result); \ + }) + +#define AMS_FS_R_ABORT_UNLESS(__RESULT__) \ + ({ \ + const ::ams::Result __tmp_fs_result = (__RESULT__); \ + AMS_FS_R_CHECK_ABORT_IMPL(__tmp_fs_result, true); \ + }) + +#define AMS_FS_ABORT_UNLESS_WITH_RESULT(__EXPR__, __RESULT__) \ + ({ \ + if (!(__EXPR__)) { \ + AMS_FS_R_ABORT_UNLESS((__RESULT__)); \ + } \ + }) + +#define AMS_FS_R_THROW(__RESULT__) \ + ({ \ + const ::ams::Result __tmp_fs_result = (__RESULT__); \ + AMS_FS_R_CHECK_ABORT_IMPL(__tmp_fs_result, false); \ + return __tmp_fs_result; \ + }) + +#define AMS_FS_R_UNLESS(__EXPR__, __RESULT__) \ + ({ \ + if (!(__EXPR__)) { \ + AMS_FS_R_THROW((__RESULT__)); \ + } \ + }) + +#define AMS_FS_R_TRY_CATCH(__EXPR__) R_TRY_CATCH(__EXPR__) + +#define AMS_FS_R_CATCH(...) R_CATCH(__VA_ARGS__) + +#define AMS_FS_R_END_TRY_CATCH \ + else if (R_FAILED(R_CURRENT_RESULT)) { \ + AMS_FS_R_THROW(R_CURRENT_RESULT); \ + } \ + } \ + }) + +#define AMS_FS_R_END_TRY_CATCH_WITH_ABORT_UNLESS \ + else { \ + AMS_FS_R_ABORT_UNLESS(R_CURRENT_RESULT); \ + } \ + } \ + }) diff --git a/libraries/libstratosphere/include/stratosphere/fssrv.hpp b/libraries/libstratosphere/include/stratosphere/fssrv.hpp index 91d839512..a2fb95b3d 100644 --- a/libraries/libstratosphere/include/stratosphere/fssrv.hpp +++ b/libraries/libstratosphere/include/stratosphere/fssrv.hpp @@ -15,7 +15,8 @@ */ #pragma once -#include +#include +#include #include #include #include diff --git a/libraries/libstratosphere/include/stratosphere/fssrv/interface_adapters/fssrv_filesystem_interface_adapter.hpp b/libraries/libstratosphere/include/stratosphere/fssrv/interface_adapters/fssrv_filesystem_interface_adapter.hpp index 4355176d1..a62bda661 100644 --- a/libraries/libstratosphere/include/stratosphere/fssrv/interface_adapters/fssrv_filesystem_interface_adapter.hpp +++ b/libraries/libstratosphere/include/stratosphere/fssrv/interface_adapters/fssrv_filesystem_interface_adapter.hpp @@ -14,13 +14,16 @@ * along with this program. If not, see . */ #pragma once -#include "../../fs/fs_common.hpp" -#include "../../fs/fs_file.hpp" -#include "../../fs/fs_directory.hpp" -#include "../../fs/fs_filesystem.hpp" -#include "../../fs/fs_query_range.hpp" -#include "../../fssrv/fssrv_sf_path.hpp" -#include "../../fssystem/fssystem_utility.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include namespace ams::fs::fsa { @@ -34,17 +37,8 @@ namespace ams::fssrv::impl { class FileSystemInterfaceAdapter; - class FileInterfaceAdapter final : public ams::sf::IServiceObject { + class FileInterfaceAdapter final { NON_COPYABLE(FileInterfaceAdapter); - public: - enum class CommandId { - Read = 0, - Write = 1, - Flush = 2, - SetSize = 3, - GetSize = 4, - OperateRange = 5, - }; private: std::shared_ptr parent_filesystem; std::unique_ptr base_file; @@ -62,27 +56,11 @@ namespace ams::fssrv::impl { Result SetSize(s64 size); Result GetSize(ams::sf::Out out); Result OperateRange(ams::sf::Out out, s32 op_id, s64 offset, s64 size); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - /* 1.0.0- */ - MAKE_SERVICE_COMMAND_META(Read), - MAKE_SERVICE_COMMAND_META(Write), - MAKE_SERVICE_COMMAND_META(Flush), - MAKE_SERVICE_COMMAND_META(SetSize), - MAKE_SERVICE_COMMAND_META(GetSize), - - /* 4.0.0- */ - MAKE_SERVICE_COMMAND_META(OperateRange, hos::Version_4_0_0), - }; }; + static_assert(fssrv::sf::IsIFile); - class DirectoryInterfaceAdapter final : public ams::sf::IServiceObject { + class DirectoryInterfaceAdapter final { NON_COPYABLE(DirectoryInterfaceAdapter); - public: - enum class CommandId { - Read = 0, - GetEntryCount = 1, - }; private: std::shared_ptr parent_filesystem; std::unique_ptr base_dir; @@ -94,39 +72,11 @@ namespace ams::fssrv::impl { /* Command API */ Result Read(ams::sf::Out out, const ams::sf::OutBuffer &out_entries); Result GetEntryCount(ams::sf::Out out); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(Read), - MAKE_SERVICE_COMMAND_META(GetEntryCount), - }; }; + static_assert(fssrv::sf::IsIDirectory); - class FileSystemInterfaceAdapter final : public std::enable_shared_from_this, public ams::sf::IServiceObject { + class FileSystemInterfaceAdapter final : public std::enable_shared_from_this { NON_COPYABLE(FileSystemInterfaceAdapter); - public: - enum class CommandId { - /* 1.0.0+ */ - CreateFile = 0, - DeleteFile = 1, - CreateDirectory = 2, - DeleteDirectory = 3, - DeleteDirectoryRecursively = 4, - RenameFile = 5, - RenameDirectory = 6, - GetEntryType = 7, - OpenFile = 8, - OpenDirectory = 9, - Commit = 10, - GetFreeSpaceSize = 11, - GetTotalSpaceSize = 12, - - /* 3.0.0+ */ - CleanDirectoryRecursively = 13, - GetFileTimeStampRaw = 14, - - /* 4.0.0+ */ - QueryEntry = 15, - }; private: std::shared_ptr base_fs; std::unique_lock mount_count_semaphore; @@ -153,8 +103,8 @@ namespace ams::fssrv::impl { Result RenameFile(const fssrv::sf::Path &old_path, const fssrv::sf::Path &new_path); Result RenameDirectory(const fssrv::sf::Path &old_path, const fssrv::sf::Path &new_path); Result GetEntryType(ams::sf::Out out, const fssrv::sf::Path &path); - Result OpenFile(ams::sf::Out> out, const fssrv::sf::Path &path, u32 mode); - Result OpenDirectory(ams::sf::Out> out, const fssrv::sf::Path &path, u32 mode); + Result OpenFile(ams::sf::Out> out, const fssrv::sf::Path &path, u32 mode); + Result OpenDirectory(ams::sf::Out> out, const fssrv::sf::Path &path, u32 mode); Result Commit(); Result GetFreeSpaceSize(ams::sf::Out out, const fssrv::sf::Path &path); Result GetTotalSpaceSize(ams::sf::Out out, const fssrv::sf::Path &path); @@ -163,30 +113,6 @@ namespace ams::fssrv::impl { Result GetFileTimeStampRaw(ams::sf::Out out, const fssrv::sf::Path &path); Result QueryEntry(const ams::sf::OutBuffer &out_buf, const ams::sf::InBuffer &in_buf, s32 query_id, const fssrv::sf::Path &path); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - /* 1.0.0- */ - MAKE_SERVICE_COMMAND_META(CreateFile), - MAKE_SERVICE_COMMAND_META(DeleteFile), - MAKE_SERVICE_COMMAND_META(CreateDirectory), - MAKE_SERVICE_COMMAND_META(DeleteDirectory), - MAKE_SERVICE_COMMAND_META(DeleteDirectoryRecursively), - MAKE_SERVICE_COMMAND_META(RenameFile), - MAKE_SERVICE_COMMAND_META(RenameDirectory), - MAKE_SERVICE_COMMAND_META(GetEntryType), - MAKE_SERVICE_COMMAND_META(OpenFile), - MAKE_SERVICE_COMMAND_META(OpenDirectory), - MAKE_SERVICE_COMMAND_META(Commit), - MAKE_SERVICE_COMMAND_META(GetFreeSpaceSize), - MAKE_SERVICE_COMMAND_META(GetTotalSpaceSize), - - /* 3.0.0- */ - MAKE_SERVICE_COMMAND_META(CleanDirectoryRecursively, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GetFileTimeStampRaw, hos::Version_3_0_0), - - /* 4.0.0- */ - MAKE_SERVICE_COMMAND_META(QueryEntry, hos::Version_4_0_0), - }; }; } diff --git a/libraries/libstratosphere/include/stratosphere/fssrv/interface_adapters/fssrv_storage_interface_adapter.hpp b/libraries/libstratosphere/include/stratosphere/fssrv/interface_adapters/fssrv_storage_interface_adapter.hpp index 6170e3f08..c1a74b42f 100644 --- a/libraries/libstratosphere/include/stratosphere/fssrv/interface_adapters/fssrv_storage_interface_adapter.hpp +++ b/libraries/libstratosphere/include/stratosphere/fssrv/interface_adapters/fssrv_storage_interface_adapter.hpp @@ -14,9 +14,10 @@ * along with this program. If not, see . */ #pragma once -#include "../../fs/fs_common.hpp" -#include "../../fs/fs_query_range.hpp" -#include "../../fssystem/fssystem_utility.hpp" +#include +#include +#include +#include namespace ams::fs { @@ -26,17 +27,8 @@ namespace ams::fs { namespace ams::fssrv::impl { - class StorageInterfaceAdapter final : public ams::sf::IServiceObject { + class StorageInterfaceAdapter final { NON_COPYABLE(StorageInterfaceAdapter); - public: - enum class CommandId { - Read = 0, - Write = 1, - Flush = 2, - SetSize = 3, - GetSize = 4, - OperateRange = 5, - }; private: /* TODO: Nintendo uses fssystem::AsynchronousAccessStorage here. */ std::shared_ptr base_storage; @@ -53,7 +45,7 @@ namespace ams::fssrv::impl { ~StorageInterfaceAdapter(); private: std::optional> AcquireCacheInvalidationReadLock(); - private: + public: /* Command API. */ Result Read(s64 offset, const ams::sf::OutNonSecureBuffer &buffer, s64 size); Result Write(s64 offset, const ams::sf::InNonSecureBuffer &buffer, s64 size); @@ -61,18 +53,7 @@ namespace ams::fssrv::impl { Result SetSize(s64 size); Result GetSize(ams::sf::Out out); Result OperateRange(ams::sf::Out out, s32 op_id, s64 offset, s64 size); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - /* 1.0.0- */ - MAKE_SERVICE_COMMAND_META(Read), - MAKE_SERVICE_COMMAND_META(Write), - MAKE_SERVICE_COMMAND_META(Flush), - MAKE_SERVICE_COMMAND_META(SetSize), - MAKE_SERVICE_COMMAND_META(GetSize), - - /* 4.0.0- */ - MAKE_SERVICE_COMMAND_META(OperateRange, hos::Version_4_0_0), - }; }; + static_assert(fssrv::sf::IsIStorage); } diff --git a/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_idirectory.hpp b/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_idirectory.hpp new file mode 100644 index 000000000..422a06dde --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_idirectory.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::fssrv::sf { + + #define AMS_FSSRV_I_DIRECTORY_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, Read, (ams::sf::Out out, const ams::sf::OutBuffer &out_entries)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, GetEntryCount, (ams::sf::Out out)) + + AMS_SF_DEFINE_INTERFACE(IDirectory, AMS_FSSRV_I_DIRECTORY_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_ifile.hpp b/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_ifile.hpp new file mode 100644 index 000000000..03d30b29a --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_ifile.hpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::fssrv::sf { + + #define AMS_FSSRV_I_FILE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, Read, (ams::sf::Out out, s64 offset, const ams::sf::OutNonSecureBuffer &buffer, s64 size, ams::fs::ReadOption option)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, Write, (s64 offset, const ams::sf::InNonSecureBuffer &buffer, s64 size, ams::fs::WriteOption option)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, Flush, ()) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, SetSize, (s64 size)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, GetSize, (ams::sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, OperateRange, (ams::sf::Out out, s32 op_id, s64 offset, s64 size), hos::Version_4_0_0) \ + + AMS_SF_DEFINE_INTERFACE(IFile, AMS_FSSRV_I_FILE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_ifilesystem.hpp b/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_ifilesystem.hpp new file mode 100644 index 000000000..90fa215ee --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_ifilesystem.hpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +namespace ams::fssrv::sf { + + #define AMS_FSSRV_I_FILESYSTEM_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, CreateFile, (const ams::fssrv::sf::Path &path, s64 size, s32 option)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, DeleteFile, (const ams::fssrv::sf::Path &path)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, CreateDirectory, (const ams::fssrv::sf::Path &path)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, DeleteDirectory, (const ams::fssrv::sf::Path &path)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, DeleteDirectoryRecursively, (const ams::fssrv::sf::Path &path)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, RenameFile, (const ams::fssrv::sf::Path &old_path, const ams::fssrv::sf::Path &new_path)) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, RenameDirectory, (const ams::fssrv::sf::Path &old_path, const ams::fssrv::sf::Path &new_path)) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, GetEntryType, (ams::sf::Out out, const ams::fssrv::sf::Path &path)) \ + AMS_SF_METHOD_INFO(C, H, 8, Result, OpenFile, (ams::sf::Out> out, const ams::fssrv::sf::Path &path, u32 mode)) \ + AMS_SF_METHOD_INFO(C, H, 9, Result, OpenDirectory, (ams::sf::Out> out, const ams::fssrv::sf::Path &path, u32 mode)) \ + AMS_SF_METHOD_INFO(C, H, 10, Result, Commit, ()) \ + AMS_SF_METHOD_INFO(C, H, 11, Result, GetFreeSpaceSize, (ams::sf::Out out, const ams::fssrv::sf::Path &path)) \ + AMS_SF_METHOD_INFO(C, H, 12, Result, GetTotalSpaceSize, (ams::sf::Out out, const ams::fssrv::sf::Path &path)) \ + AMS_SF_METHOD_INFO(C, H, 13, Result, CleanDirectoryRecursively, (const ams::fssrv::sf::Path &path), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 14, Result, GetFileTimeStampRaw, (ams::sf::Out out, const ams::fssrv::sf::Path &path), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 15, Result, QueryEntry, (const ams::sf::OutBuffer &out_buf, const ams::sf::InBuffer &in_buf, s32 query_id, const ams::fssrv::sf::Path &path), hos::Version_4_0_0) + + AMS_SF_DEFINE_INTERFACE(IFileSystem, AMS_FSSRV_I_FILESYSTEM_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_istorage.hpp b/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_istorage.hpp new file mode 100644 index 000000000..5f8cb761f --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_istorage.hpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::fssrv::sf { + + #define AMS_FSSRV_I_STORAGE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, Read, (s64 offset, const ams::sf::OutNonSecureBuffer &buffer, s64 size)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, Write, (s64 offset, const ams::sf::InNonSecureBuffer &buffer, s64 size)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, Flush, ()) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, SetSize, (s64 size)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, GetSize, (ams::sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, OperateRange, (ams::sf::Out out, s32 op_id, s64 offset, s64 size), hos::Version_4_0_0) + + AMS_SF_DEFINE_INTERFACE(IStorage, AMS_FSSRV_I_STORAGE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/fssrv/fssrv_sf_path.hpp b/libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_path.hpp similarity index 100% rename from libraries/libstratosphere/include/stratosphere/fssrv/fssrv_sf_path.hpp rename to libraries/libstratosphere/include/stratosphere/fssrv/sf/fssrv_sf_path.hpp diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_buffer_manager_utils.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_buffer_manager_utils.hpp index db1e01bce..e68998605 100644 --- a/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_buffer_manager_utils.hpp +++ b/libraries/libstratosphere/include/stratosphere/fssystem/buffers/fssystem_buffer_manager_utils.hpp @@ -37,8 +37,7 @@ namespace ams::fssystem::buffers { } R_TRY(on_failure()); - /* TODO: os::SleepThread */ - svc::SleepThread(impl::RetryWait.GetNanoSeconds()); + os::SleepThread(impl::RetryWait); continue; } diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage.hpp index 0fdde75a2..0631e8e67 100644 --- a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage.hpp +++ b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage.hpp @@ -133,7 +133,7 @@ namespace ams::fssystem { virtual Result GetSize(s64 *out) override { AMS_ASSERT(out != nullptr); - *out = this->table.GetSize(); + *out = this->table.GetEnd(); return ResultSuccess(); } diff --git a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage_template_impl.hpp b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage_template_impl.hpp index 01ddcc8b5..44dab4f6b 100644 --- a/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage_template_impl.hpp +++ b/libraries/libstratosphere/include/stratosphere/fssystem/fssystem_indirect_storage_template_impl.hpp @@ -64,10 +64,25 @@ namespace ams::fssystem { /* Process a base storage entry. */ if (cr_info.CanDo()) { + /* Ensure that we can process. */ R_UNLESS(cur_entry.storage_index == 0, fs::ResultInvalidIndirectEntryStorageIndex()); - const auto data_offset = cur_offset - cur_entry_offset; - R_TRY(func(std::addressof(this->data_storage[0]), cur_entry.GetPhysicalOffset() + data_offset, cur_offset, static_cast(cr_info.GetReadSize()))); + /* Get the current data storage's size. */ + s64 cur_data_storage_size; + R_TRY(this->data_storage[0].GetSize(std::addressof(cur_data_storage_size))); + + /* Ensure that we remain within range. */ + const auto data_offset = cur_offset - cur_entry_offset; + const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset(); + const auto cur_size = static_cast(cr_info.GetReadSize()); + R_UNLESS(0 <= cur_entry_phys_offset && cur_entry_phys_offset <= cur_data_storage_size, fs::ResultInvalidIndirectEntryOffset()); + R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <= cur_data_storage_size, fs::ResultInvalidIndirectStorageSize()); + + /* Operate. */ + R_TRY(func(std::addressof(this->data_storage[0]), cur_entry_phys_offset + data_offset, cur_offset, cur_size)); + + /* Mark as done. */ + cr_info.Done(); } } @@ -101,7 +116,16 @@ namespace ams::fssystem { } if (needs_operate) { - R_TRY(func(std::addressof(this->data_storage[cur_entry.storage_index]), cur_entry.GetPhysicalOffset() + data_offset, cur_offset, cur_size)); + /* Get the current data storage's size. */ + s64 cur_data_storage_size; + R_TRY(this->data_storage[cur_entry.storage_index].GetSize(std::addressof(cur_data_storage_size))); + + /* Ensure that we remain within range. */ + const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset(); + R_UNLESS(0 <= cur_entry_phys_offset && cur_entry_phys_offset <= cur_data_storage_size, fs::ResultIndirectStorageCorrupted()); + R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <= cur_data_storage_size, fs::ResultIndirectStorageCorrupted()); + + R_TRY(func(std::addressof(this->data_storage[cur_entry.storage_index]), cur_entry_phys_offset + data_offset, cur_offset, cur_size)); } cur_offset += cur_size; diff --git a/libraries/libstratosphere/include/stratosphere/hos/hos_types.hpp b/libraries/libstratosphere/include/stratosphere/hos/hos_types.hpp index b93c6e738..961fc2805 100644 --- a/libraries/libstratosphere/include/stratosphere/hos/hos_types.hpp +++ b/libraries/libstratosphere/include/stratosphere/hos/hos_types.hpp @@ -54,12 +54,13 @@ namespace ams::hos { Version_10_0_0 = ::ams::TargetFirmware_10_0_0, Version_10_0_1 = ::ams::TargetFirmware_10_0_1, Version_10_0_2 = ::ams::TargetFirmware_10_0_2, + Version_10_0_3 = ::ams::TargetFirmware_10_0_3, + Version_10_0_4 = ::ams::TargetFirmware_10_0_4, + Version_10_1_0 = ::ams::TargetFirmware_10_1_0, Version_Current = ::ams::TargetFirmware_Current, Version_Max = ::ams::TargetFirmware_Max, }; - - } diff --git a/libraries/libstratosphere/include/stratosphere/ldr.hpp b/libraries/libstratosphere/include/stratosphere/ldr.hpp index c278de699..20f40cbdd 100644 --- a/libraries/libstratosphere/include/stratosphere/ldr.hpp +++ b/libraries/libstratosphere/include/stratosphere/ldr.hpp @@ -19,3 +19,6 @@ #include #include #include +#include +#include +#include \ No newline at end of file diff --git a/libraries/libstratosphere/include/stratosphere/ldr/impl/ldr_debug_monitor_interface.hpp b/libraries/libstratosphere/include/stratosphere/ldr/impl/ldr_debug_monitor_interface.hpp new file mode 100644 index 000000000..328ae400c --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/ldr/impl/ldr_debug_monitor_interface.hpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::ldr::impl { + + #define AMS_LDR_I_DEBUG_MONITOR_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, SetProgramArguments, (ncm::ProgramId program_id, const sf::InPointerBuffer &args, u32 args_size)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, FlushArguments, ()) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, GetProcessModuleInfo, (sf::Out count, const sf::OutPointerArray &out, os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 65000, void, AtmosphereHasLaunchedProgram, (sf::Out out, ncm::ProgramId program_id)) + + AMS_SF_DEFINE_INTERFACE(IDebugMonitorInterface, AMS_LDR_I_DEBUG_MONITOR_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/ldr/impl/ldr_process_manager_interface.hpp b/libraries/libstratosphere/include/stratosphere/ldr/impl/ldr_process_manager_interface.hpp new file mode 100644 index 000000000..946ef636c --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/ldr/impl/ldr_process_manager_interface.hpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::ldr::impl { + + #define AMS_LDR_I_PROCESS_MANAGER_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, CreateProcess, (sf::OutMoveHandle proc_h, PinId id, u32 flags, sf::CopyHandle reslimit_h)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, GetProgramInfo, (sf::Out out_program_info, const ncm::ProgramLocation &loc)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, PinProgram, (sf::Out out_id, const ncm::ProgramLocation &loc)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, UnpinProgram, (PinId id)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, SetEnabledProgramVerification, (bool enabled), hos::Version_10_0_0) \ + AMS_SF_METHOD_INFO(C, H, 65000, void, AtmosphereHasLaunchedProgram, (sf::Out out, ncm::ProgramId program_id)) \ + AMS_SF_METHOD_INFO(C, H, 65001, Result, AtmosphereGetProgramInfo, (sf::Out out_program_info, sf::Out out_status, const ncm::ProgramLocation &loc)) \ + AMS_SF_METHOD_INFO(C, H, 65002, Result, AtmospherePinProgram, (sf::Out out_id, const ncm::ProgramLocation &loc, const cfg::OverrideStatus &override_status)) + + AMS_SF_DEFINE_INTERFACE(IProcessManagerInterface, AMS_LDR_I_PROCESS_MANAGER_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/ldr/impl/ldr_shell_interface.hpp b/libraries/libstratosphere/include/stratosphere/ldr/impl/ldr_shell_interface.hpp new file mode 100644 index 000000000..5e3e31188 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/ldr/impl/ldr_shell_interface.hpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::ldr::impl { + + #define AMS_LDR_I_SHELL_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, SetProgramArguments, (ncm::ProgramId program_id, const sf::InPointerBuffer &args, u32 args_size)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, FlushArguments, ()) \ + AMS_SF_METHOD_INFO(C, H, 65000, Result, AtmosphereRegisterExternalCode, (sf::OutMoveHandle out, ncm::ProgramId program_id)) \ + AMS_SF_METHOD_INFO(C, H, 65001, void, AtmosphereUnregisterExternalCode, (ncm::ProgramId program_id)) + + AMS_SF_DEFINE_INTERFACE(IShellInterface, AMS_LDR_I_SHELL_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/lr/lr_i_add_on_content_location_resolver.hpp b/libraries/libstratosphere/include/stratosphere/lr/lr_i_add_on_content_location_resolver.hpp index 70a8a58b8..64495307a 100644 --- a/libraries/libstratosphere/include/stratosphere/lr/lr_i_add_on_content_location_resolver.hpp +++ b/libraries/libstratosphere/include/stratosphere/lr/lr_i_add_on_content_location_resolver.hpp @@ -19,34 +19,14 @@ namespace ams::lr { - class IAddOnContentLocationResolver : public sf::IServiceObject { - protected: - enum class CommandId { - ResolveAddOnContentPath = 0, - RegisterAddOnContentStorageDeprecated = 1, - RegisterAddOnContentStorage = 1, - UnregisterAllAddOnContentPath = 2, - RefreshApplicationAddOnContent = 3, - UnregisterApplicationAddOnContent = 4, - }; - public: - /* Actual commands. */ - virtual Result ResolveAddOnContentPath(sf::Out out, ncm::DataId id) = 0; - virtual Result RegisterAddOnContentStorageDeprecated(ncm::DataId id, ncm::StorageId storage_id) = 0; - virtual Result RegisterAddOnContentStorage(ncm::DataId id, ncm::ApplicationId application_id, ncm::StorageId storage_id) = 0; - virtual Result UnregisterAllAddOnContentPath() = 0; - virtual Result RefreshApplicationAddOnContent(const sf::InArray &ids) = 0; - virtual Result UnregisterApplicationAddOnContent(ncm::ApplicationId id) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(ResolveAddOnContentPath, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(RegisterAddOnContentStorageDeprecated, hos::Version_2_0_0, hos::Version_8_1_0), - MAKE_SERVICE_COMMAND_META(RegisterAddOnContentStorage, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(UnregisterAllAddOnContentPath, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(RefreshApplicationAddOnContent, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(UnregisterApplicationAddOnContent, hos::Version_9_0_0), - }; - }; + #define AMS_LR_I_ADD_ON_CONTENT_LOCATION_RESOLVER_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, ResolveAddOnContentPath, (sf::Out out, ncm::DataId id), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, RegisterAddOnContentStorageDeprecated, (ncm::DataId id, ncm::StorageId storage_id), hos::Version_2_0_0, hos::Version_8_1_1) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, RegisterAddOnContentStorage, (ncm::DataId id, ncm::ApplicationId application_id, ncm::StorageId storage_id), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, UnregisterAllAddOnContentPath, (), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, RefreshApplicationAddOnContent, (const sf::InArray &ids), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, UnregisterApplicationAddOnContent, (ncm::ApplicationId id), hos::Version_9_0_0) + AMS_SF_DEFINE_INTERFACE(IAddOnContentLocationResolver, AMS_LR_I_ADD_ON_CONTENT_LOCATION_RESOLVER_INTERFACE_INFO) } diff --git a/libraries/libstratosphere/include/stratosphere/lr/lr_i_location_resolver.hpp b/libraries/libstratosphere/include/stratosphere/lr/lr_i_location_resolver.hpp index d9d8d9daf..29dec9f59 100644 --- a/libraries/libstratosphere/include/stratosphere/lr/lr_i_location_resolver.hpp +++ b/libraries/libstratosphere/include/stratosphere/lr/lr_i_location_resolver.hpp @@ -19,97 +19,35 @@ namespace ams::lr { - class ILocationResolver : public sf::IServiceObject { - NON_COPYABLE(ILocationResolver); - NON_MOVEABLE(ILocationResolver); - protected: - enum class CommandId { - ResolveProgramPath = 0, - RedirectProgramPath = 1, - ResolveApplicationControlPath = 2, - ResolveApplicationHtmlDocumentPath = 3, - ResolveDataPath = 4, - RedirectApplicationControlPathDeprecated = 5, - RedirectApplicationControlPath = 5, - RedirectApplicationHtmlDocumentPathDeprecated = 6, - RedirectApplicationHtmlDocumentPath = 6, - ResolveApplicationLegalInformationPath = 7, - RedirectApplicationLegalInformationPathDeprecated = 8, - RedirectApplicationLegalInformationPath = 8, - Refresh = 9, - RedirectApplicationProgramPathDeprecated = 10, - RedirectApplicationProgramPath = 10, - ClearApplicationRedirectionDeprecated = 11, - ClearApplicationRedirection = 11, - EraseProgramRedirection = 12, - EraseApplicationControlRedirection = 13, - EraseApplicationHtmlDocumentRedirection = 14, - EraseApplicationLegalInformationRedirection = 15, - ResolveProgramPathForDebug = 16, - RedirectProgramPathForDebug = 17, - RedirectApplicationProgramPathForDebugDeprecated = 18, - RedirectApplicationProgramPathForDebug = 18, - EraseProgramRedirectionForDebug = 19, - }; - public: - ILocationResolver() { /* ... */ } - public: - /* Actual commands. */ - virtual Result ResolveProgramPath(sf::Out out, ncm::ProgramId id) = 0; - virtual Result RedirectProgramPath(const Path &path, ncm::ProgramId id) = 0; - virtual Result ResolveApplicationControlPath(sf::Out out, ncm::ProgramId id) = 0; - virtual Result ResolveApplicationHtmlDocumentPath(sf::Out out, ncm::ProgramId id) = 0; - virtual Result ResolveDataPath(sf::Out out, ncm::DataId id) = 0; - virtual Result RedirectApplicationControlPathDeprecated(const Path &path, ncm::ProgramId id) = 0; - virtual Result RedirectApplicationControlPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) = 0; - virtual Result RedirectApplicationHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) = 0; - virtual Result RedirectApplicationHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) = 0; - virtual Result ResolveApplicationLegalInformationPath(sf::Out out, ncm::ProgramId id) = 0; - virtual Result RedirectApplicationLegalInformationPathDeprecated(const Path &path, ncm::ProgramId id) = 0; - virtual Result RedirectApplicationLegalInformationPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) = 0; - virtual Result Refresh() = 0; - virtual Result RedirectApplicationProgramPathDeprecated(const Path &path, ncm::ProgramId id) = 0; - virtual Result RedirectApplicationProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) = 0; - virtual Result ClearApplicationRedirectionDeprecated() = 0; - virtual Result ClearApplicationRedirection(const sf::InArray &excluding_ids) = 0; - virtual Result EraseProgramRedirection(ncm::ProgramId id) = 0; - virtual Result EraseApplicationControlRedirection(ncm::ProgramId id) = 0; - virtual Result EraseApplicationHtmlDocumentRedirection(ncm::ProgramId id) = 0; - virtual Result EraseApplicationLegalInformationRedirection(ncm::ProgramId id) = 0; - virtual Result ResolveProgramPathForDebug(sf::Out out, ncm::ProgramId id) = 0; - virtual Result RedirectProgramPathForDebug(const Path &path, ncm::ProgramId id) = 0; - virtual Result RedirectApplicationProgramPathForDebugDeprecated(const Path &path, ncm::ProgramId id) = 0; - virtual Result RedirectApplicationProgramPathForDebug(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) = 0; - virtual Result EraseProgramRedirectionForDebug(ncm::ProgramId id) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(ResolveProgramPath), - MAKE_SERVICE_COMMAND_META(RedirectProgramPath), - MAKE_SERVICE_COMMAND_META(ResolveApplicationControlPath), - MAKE_SERVICE_COMMAND_META(ResolveApplicationHtmlDocumentPath), - MAKE_SERVICE_COMMAND_META(ResolveDataPath), - MAKE_SERVICE_COMMAND_META(RedirectApplicationControlPathDeprecated, hos::Version_1_0_0, hos::Version_8_1_0), - MAKE_SERVICE_COMMAND_META(RedirectApplicationControlPath, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(RedirectApplicationHtmlDocumentPathDeprecated, hos::Version_1_0_0, hos::Version_8_1_0), - MAKE_SERVICE_COMMAND_META(RedirectApplicationHtmlDocumentPath, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(ResolveApplicationLegalInformationPath), - MAKE_SERVICE_COMMAND_META(RedirectApplicationLegalInformationPathDeprecated, hos::Version_1_0_0, hos::Version_8_1_0), - MAKE_SERVICE_COMMAND_META(RedirectApplicationLegalInformationPath, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(Refresh), - MAKE_SERVICE_COMMAND_META(RedirectApplicationProgramPathDeprecated, hos::Version_5_0_0, hos::Version_8_1_0), - MAKE_SERVICE_COMMAND_META(RedirectApplicationProgramPath, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(ClearApplicationRedirectionDeprecated, hos::Version_5_0_0, hos::Version_8_1_0), - MAKE_SERVICE_COMMAND_META(ClearApplicationRedirection, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(EraseProgramRedirection, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(EraseApplicationControlRedirection, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(EraseApplicationHtmlDocumentRedirection, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(EraseApplicationLegalInformationRedirection, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(ResolveProgramPathForDebug, hos::Version_7_0_0), - MAKE_SERVICE_COMMAND_META(RedirectProgramPathForDebug, hos::Version_7_0_0), - MAKE_SERVICE_COMMAND_META(RedirectApplicationProgramPathForDebugDeprecated, hos::Version_7_0_0, hos::Version_8_1_0), - MAKE_SERVICE_COMMAND_META(RedirectApplicationProgramPathForDebug, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(EraseProgramRedirectionForDebug, hos::Version_7_0_0), - }; - }; + #define AMS_LR_I_LOCATION_RESOLVER_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, ResolveProgramPath, (sf::Out out, ncm::ProgramId id)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, RedirectProgramPath, (const Path &path, ncm::ProgramId id)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, ResolveApplicationControlPath, (sf::Out out, ncm::ProgramId id)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, ResolveApplicationHtmlDocumentPath, (sf::Out out, ncm::ProgramId id)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, ResolveDataPath, (sf::Out out, ncm::DataId id)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, RedirectApplicationControlPathDeprecated, (const Path &path, ncm::ProgramId id), hos::Version_1_0_0, hos::Version_8_1_1) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, RedirectApplicationControlPath, (const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, RedirectApplicationHtmlDocumentPathDeprecated, (const Path &path, ncm::ProgramId id), hos::Version_1_0_0, hos::Version_8_1_1) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, RedirectApplicationHtmlDocumentPath, (const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, ResolveApplicationLegalInformationPath, (sf::Out out, ncm::ProgramId id)) \ + AMS_SF_METHOD_INFO(C, H, 8, Result, RedirectApplicationLegalInformationPathDeprecated, (const Path &path, ncm::ProgramId id), hos::Version_1_0_0, hos::Version_8_1_1) \ + AMS_SF_METHOD_INFO(C, H, 8, Result, RedirectApplicationLegalInformationPath, (const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 9, Result, Refresh, ()) \ + AMS_SF_METHOD_INFO(C, H, 10, Result, RedirectApplicationProgramPathDeprecated, (const Path &path, ncm::ProgramId id), hos::Version_5_0_0, hos::Version_8_1_1) \ + AMS_SF_METHOD_INFO(C, H, 10, Result, RedirectApplicationProgramPath, (const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 11, Result, ClearApplicationRedirectionDeprecated, (), hos::Version_5_0_0, hos::Version_8_1_1) \ + AMS_SF_METHOD_INFO(C, H, 11, Result, ClearApplicationRedirection, (const sf::InArray &excluding_ids), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 12, Result, EraseProgramRedirection, (ncm::ProgramId id), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 13, Result, EraseApplicationControlRedirection, (ncm::ProgramId id), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 14, Result, EraseApplicationHtmlDocumentRedirection, (ncm::ProgramId id), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 15, Result, EraseApplicationLegalInformationRedirection, (ncm::ProgramId id), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 16, Result, ResolveProgramPathForDebug, (sf::Out out, ncm::ProgramId id), hos::Version_7_0_0) \ + AMS_SF_METHOD_INFO(C, H, 17, Result, RedirectProgramPathForDebug, (const Path &path, ncm::ProgramId id), hos::Version_7_0_0) \ + AMS_SF_METHOD_INFO(C, H, 18, Result, RedirectApplicationProgramPathForDebugDeprecated, (const Path &path, ncm::ProgramId id), hos::Version_7_0_0, hos::Version_8_1_1) \ + AMS_SF_METHOD_INFO(C, H, 18, Result, RedirectApplicationProgramPathForDebug, (const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 19, Result, EraseProgramRedirectionForDebug, (ncm::ProgramId id), hos::Version_7_0_0) + + + AMS_SF_DEFINE_INTERFACE(ILocationResolver, AMS_LR_I_LOCATION_RESOLVER_INTERFACE_INFO) } diff --git a/libraries/libstratosphere/include/stratosphere/lr/lr_i_location_resolver_manager.hpp b/libraries/libstratosphere/include/stratosphere/lr/lr_i_location_resolver_manager.hpp index 5d9c55184..79cac08eb 100644 --- a/libraries/libstratosphere/include/stratosphere/lr/lr_i_location_resolver_manager.hpp +++ b/libraries/libstratosphere/include/stratosphere/lr/lr_i_location_resolver_manager.hpp @@ -22,20 +22,12 @@ namespace ams::lr { - class ILocationResolverManager : public sf::IServiceObject { - protected: - enum class CommandId { - OpenLocationResolver = 0, - OpenRegisteredLocationResolver = 1, - RefreshLocationResolver = 2, - OpenAddOnContentLocationResolver = 3, - }; - public: - /* Actual commands. */ - virtual Result OpenLocationResolver(sf::Out> out, ncm::StorageId storage_id) = 0; - virtual Result OpenRegisteredLocationResolver(sf::Out> out) = 0; - virtual Result RefreshLocationResolver(ncm::StorageId storage_id) = 0; - virtual Result OpenAddOnContentLocationResolver(sf::Out> out) = 0; - }; + #define AMS_LR_I_LOCATION_RESOLVER_MANAGER_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, OpenLocationResolver, (sf::Out> out, ncm::StorageId storage_id)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, OpenRegisteredLocationResolver, (sf::Out> out)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, RefreshLocationResolver, (ncm::StorageId storage_id)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, OpenAddOnContentLocationResolver, (sf::Out> out), hos::Version_2_0_0) + + AMS_SF_DEFINE_INTERFACE(ILocationResolverManager, AMS_LR_I_LOCATION_RESOLVER_MANAGER_INTERFACE_INFO) } diff --git a/libraries/libstratosphere/include/stratosphere/lr/lr_i_registered_location_resolver.hpp b/libraries/libstratosphere/include/stratosphere/lr/lr_i_registered_location_resolver.hpp index 837dfbc8b..d628c5a51 100644 --- a/libraries/libstratosphere/include/stratosphere/lr/lr_i_registered_location_resolver.hpp +++ b/libraries/libstratosphere/include/stratosphere/lr/lr_i_registered_location_resolver.hpp @@ -19,57 +19,22 @@ namespace ams::lr { - class IRegisteredLocationResolver : public sf::IServiceObject { - protected: - enum class CommandId { - ResolveProgramPath = 0, - RegisterProgramPathDeprecated = 1, - RegisterProgramPath = 1, - UnregisterProgramPath = 2, - RedirectProgramPathDeprecated = 3, - RedirectProgramPath = 3, - ResolveHtmlDocumentPath = 4, - RegisterHtmlDocumentPathDeprecated = 5, - RegisterHtmlDocumentPath = 5, - UnregisterHtmlDocumentPath = 6, - RedirectHtmlDocumentPathDeprecated = 7, - RedirectHtmlDocumentPath = 7, - Refresh = 8, - RefreshExcluding = 9, - }; - public: - /* Actual commands. */ - virtual Result ResolveProgramPath(sf::Out out, ncm::ProgramId id) = 0; - virtual Result RegisterProgramPathDeprecated(const Path &path, ncm::ProgramId id) = 0; - virtual Result RegisterProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) = 0; - virtual Result UnregisterProgramPath(ncm::ProgramId id) = 0; - virtual Result RedirectProgramPathDeprecated(const Path &path, ncm::ProgramId id) = 0; - virtual Result RedirectProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) = 0; - virtual Result ResolveHtmlDocumentPath(sf::Out out, ncm::ProgramId id) = 0; - virtual Result RegisterHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) = 0; - virtual Result RegisterHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) = 0; - virtual Result UnregisterHtmlDocumentPath(ncm::ProgramId id) = 0; - virtual Result RedirectHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) = 0; - virtual Result RedirectHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) = 0; - virtual Result Refresh() = 0; - virtual Result RefreshExcluding(const sf::InArray &ids) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(ResolveProgramPath), - MAKE_SERVICE_COMMAND_META(RegisterProgramPathDeprecated, hos::Version_1_0_0, hos::Version_8_1_0), - MAKE_SERVICE_COMMAND_META(RegisterProgramPath, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(UnregisterProgramPath), - MAKE_SERVICE_COMMAND_META(RedirectProgramPathDeprecated, hos::Version_1_0_0, hos::Version_8_1_0), - MAKE_SERVICE_COMMAND_META(RedirectProgramPath, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(ResolveHtmlDocumentPath, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(RegisterHtmlDocumentPathDeprecated, hos::Version_2_0_0, hos::Version_8_1_0), - MAKE_SERVICE_COMMAND_META(RegisterHtmlDocumentPath, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(UnregisterHtmlDocumentPath, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(RedirectHtmlDocumentPathDeprecated, hos::Version_2_0_0, hos::Version_8_1_0), - MAKE_SERVICE_COMMAND_META(RedirectHtmlDocumentPath, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(Refresh, hos::Version_7_0_0), - MAKE_SERVICE_COMMAND_META(RefreshExcluding, hos::Version_9_0_0), - }; - }; + #define AMS_LR_I_REGISTERED_LOCATION_RESOLVER_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, ResolveProgramPath, (sf::Out out, ncm::ProgramId id)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, RegisterProgramPathDeprecated, (const Path &path, ncm::ProgramId id), hos::Version_1_0_0, hos::Version_8_1_1) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, RegisterProgramPath, (const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, UnregisterProgramPath, (ncm::ProgramId id)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, RedirectProgramPathDeprecated, (const Path &path, ncm::ProgramId id), hos::Version_1_0_0, hos::Version_8_1_1) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, RedirectProgramPath, (const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, ResolveHtmlDocumentPath, (sf::Out out, ncm::ProgramId id), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, RegisterHtmlDocumentPathDeprecated, (const Path &path, ncm::ProgramId id), hos::Version_2_0_0, hos::Version_8_1_1) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, RegisterHtmlDocumentPath, (const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, UnregisterHtmlDocumentPath, (ncm::ProgramId id), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, RedirectHtmlDocumentPathDeprecated, (const Path &path, ncm::ProgramId id), hos::Version_2_0_0, hos::Version_8_1_1) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, RedirectHtmlDocumentPath, (const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 8, Result, Refresh, (), hos::Version_7_0_0) \ + AMS_SF_METHOD_INFO(C, H, 9, Result, RefreshExcluding, (const sf::InArray &ids), hos::Version_9_0_0) + + AMS_SF_DEFINE_INTERFACE(IRegisteredLocationResolver, AMS_LR_I_REGISTERED_LOCATION_RESOLVER_INTERFACE_INFO) } diff --git a/libraries/libstratosphere/include/stratosphere/lr/lr_location_resolver_manager_impl.hpp b/libraries/libstratosphere/include/stratosphere/lr/lr_location_resolver_manager_impl.hpp index 6711cab75..25e571883 100644 --- a/libraries/libstratosphere/include/stratosphere/lr/lr_location_resolver_manager_impl.hpp +++ b/libraries/libstratosphere/include/stratosphere/lr/lr_location_resolver_manager_impl.hpp @@ -21,7 +21,7 @@ namespace ams::lr { - class LocationResolverManagerImpl final : public ILocationResolverManager { + class LocationResolverManagerImpl final { private: /* Resolver storage. */ ncm::BoundedMap, 5> location_resolvers; @@ -31,17 +31,11 @@ namespace ams::lr { os::Mutex mutex{false}; public: /* Actual commands. */ - virtual Result OpenLocationResolver(sf::Out> out, ncm::StorageId storage_id) override; - virtual Result OpenRegisteredLocationResolver(sf::Out> out) override; - virtual Result RefreshLocationResolver(ncm::StorageId storage_id) override; - virtual Result OpenAddOnContentLocationResolver(sf::Out> out) override; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(OpenLocationResolver), - MAKE_SERVICE_COMMAND_META(OpenRegisteredLocationResolver), - MAKE_SERVICE_COMMAND_META(RefreshLocationResolver), - MAKE_SERVICE_COMMAND_META(OpenAddOnContentLocationResolver, hos::Version_2_0_0), - }; + Result OpenLocationResolver(sf::Out> out, ncm::StorageId storage_id); + Result OpenRegisteredLocationResolver(sf::Out> out); + Result RefreshLocationResolver(ncm::StorageId storage_id); + Result OpenAddOnContentLocationResolver(sf::Out> out); }; + static_assert(IsILocationResolverManager); } diff --git a/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_manager_impl.hpp b/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_manager_impl.hpp index 244aa3ffb..ecff33061 100644 --- a/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_manager_impl.hpp +++ b/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_manager_impl.hpp @@ -67,7 +67,7 @@ namespace ams::ncm { }; static_assert(util::is_pod::value); - class ContentManagerImpl final : public IContentManager { + class ContentManagerImpl final { private: constexpr static size_t MaxContentStorageRoots = 8; constexpr static size_t MaxContentMetaDatabaseRoots = 8; @@ -131,21 +131,22 @@ namespace ams::ncm { Result EnsureAndMountSystemSaveData(const char *mount, const SystemSaveDataInfo &info) const; public: /* Actual commands. */ - virtual Result CreateContentStorage(StorageId storage_id) override; - virtual Result CreateContentMetaDatabase(StorageId storage_id) override; - virtual Result VerifyContentStorage(StorageId storage_id) override; - virtual Result VerifyContentMetaDatabase(StorageId storage_id) override; - virtual Result OpenContentStorage(sf::Out> out, StorageId storage_id) override; - virtual Result OpenContentMetaDatabase(sf::Out> out, StorageId storage_id) override; - virtual Result CloseContentStorageForcibly(StorageId storage_id) override; - virtual Result CloseContentMetaDatabaseForcibly(StorageId storage_id) override; - virtual Result CleanupContentMetaDatabase(StorageId storage_id) override; - virtual Result ActivateContentStorage(StorageId storage_id) override; - virtual Result InactivateContentStorage(StorageId storage_id) override; - virtual Result ActivateContentMetaDatabase(StorageId storage_id) override; - virtual Result InactivateContentMetaDatabase(StorageId storage_id) override; - virtual Result InvalidateRightsIdCache() override; - virtual Result GetMemoryReport(sf::Out out) override; + Result CreateContentStorage(StorageId storage_id); + Result CreateContentMetaDatabase(StorageId storage_id); + Result VerifyContentStorage(StorageId storage_id); + Result VerifyContentMetaDatabase(StorageId storage_id); + Result OpenContentStorage(sf::Out> out, StorageId storage_id); + Result OpenContentMetaDatabase(sf::Out> out, StorageId storage_id); + Result CloseContentStorageForcibly(StorageId storage_id); + Result CloseContentMetaDatabaseForcibly(StorageId storage_id); + Result CleanupContentMetaDatabase(StorageId storage_id); + Result ActivateContentStorage(StorageId storage_id); + Result InactivateContentStorage(StorageId storage_id); + Result ActivateContentMetaDatabase(StorageId storage_id); + Result InactivateContentMetaDatabase(StorageId storage_id); + Result InvalidateRightsIdCache(); + Result GetMemoryReport(sf::Out out); }; + static_assert(IsIContentManager); } diff --git a/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_meta.hpp b/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_meta.hpp index 3f015e80c..9810b7658 100644 --- a/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_meta.hpp +++ b/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_meta.hpp @@ -45,7 +45,7 @@ namespace ams::ncm { }; } - constexpr ContentMetaKey ToKey() { + constexpr ContentMetaKey ToKey() const { return ContentMetaKey::Make(this->id, this->version, this->type); } }; diff --git a/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_meta_extended_data.hpp b/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_meta_extended_data.hpp index 2f997f174..65aab75e5 100644 --- a/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_meta_extended_data.hpp +++ b/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_meta_extended_data.hpp @@ -64,7 +64,7 @@ namespace ams::ncm { }; struct SystemUpdateMetaExtendedDataHeader { - u32 unk; // Always seems to be set to 2 + u32 version; u32 firmware_variation_count; }; @@ -318,12 +318,12 @@ namespace ams::ncm { return reinterpret_cast(this->data); } - uintptr_t GetFirmwarVariationIdStartAddress() const { + uintptr_t GetFirmwareVariationIdStartAddress() const { return this->GetHeaderAddress() + sizeof(SystemUpdateMetaExtendedDataHeader); } uintptr_t GetFirmwareVariationIdAddress(size_t i) const { - return this->GetFirmwarVariationIdStartAddress() + i * sizeof(FirmwareVariationId); + return this->GetFirmwareVariationIdStartAddress() + i * sizeof(FirmwareVariationId); } uintptr_t GetFirmwareVariationInfoStartAddress() const { @@ -331,7 +331,7 @@ namespace ams::ncm { } uintptr_t GetFirmwareVariationInfoAddress(size_t i) const { - return this->GetFirmwarVariationIdStartAddress() + i * sizeof(FirmwareVariationInfo); + return this->GetFirmwareVariationInfoStartAddress() + i * sizeof(FirmwareVariationInfo); } uintptr_t GetContentMetaInfoStartAddress() const { diff --git a/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_meta_utils.hpp b/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_meta_utils.hpp index 72ffbfe61..572f744f7 100644 --- a/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_meta_utils.hpp +++ b/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_meta_utils.hpp @@ -23,8 +23,11 @@ namespace ams::ncm { - Result ReadContentMetaPath(AutoBuffer *out, const char *path); + using MountContentMetaFunction = Result (*)(const char *mount_name, const char *path); + Result ReadContentMetaPath(AutoBuffer *out, const char *path); Result ReadVariationContentMetaInfoList(s32 *out_count, std::unique_ptr *out_meta_infos, const Path &path, FirmwareVariationId firmware_variation_id); + void SetMountContentMetaFunction(MountContentMetaFunction func); + } diff --git a/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_storage.hpp b/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_storage.hpp index 5d4790152..8e004d0ab 100644 --- a/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_storage.hpp +++ b/libraries/libstratosphere/include/stratosphere/ncm/ncm_content_storage.hpp @@ -142,28 +142,30 @@ namespace ams::ncm { return this->interface->ReadContentIdFile(sf::OutBuffer(dst, size), content_id, offset); } - Result GetRightsId(ams::fs::RightsId *out_rights_id, PlaceHolderId placeholder_id) { - AMS_ASSERT(this->interface != nullptr); - AMS_ABORT_UNLESS(hos::GetVersion() < hos::Version_3_0_0); - return this->interface->GetRightsIdFromPlaceHolderIdDeprecated(out_rights_id, placeholder_id); - } - Result GetRightsId(ncm::RightsId *out_rights_id, PlaceHolderId placeholder_id) { AMS_ASSERT(this->interface != nullptr); - AMS_ABORT_UNLESS(hos::GetVersion() >= hos::Version_3_0_0); - return this->interface->GetRightsIdFromPlaceHolderId(out_rights_id, placeholder_id); - } - Result GetRightsId(ams::fs::RightsId *out_rights_id, ContentId content_id) { - AMS_ASSERT(this->interface != nullptr); - AMS_ABORT_UNLESS(hos::GetVersion() < hos::Version_3_0_0); - return this->interface->GetRightsIdFromContentIdDeprecated(out_rights_id, content_id); + const auto vers = hos::GetVersion(); + if (vers >= hos::Version_3_0_0) { + return this->interface->GetRightsIdFromPlaceHolderId(out_rights_id, placeholder_id); + } else { + AMS_ABORT_UNLESS(vers >= hos::Version_2_0_0); + *out_rights_id = {}; + return this->interface->GetRightsIdFromPlaceHolderIdDeprecated(std::addressof(out_rights_id->id), placeholder_id); + } } Result GetRightsId(ncm::RightsId *out_rights_id, ContentId content_id) { AMS_ASSERT(this->interface != nullptr); - AMS_ABORT_UNLESS(hos::GetVersion() >= hos::Version_3_0_0); - return this->interface->GetRightsIdFromContentId(out_rights_id, content_id); + + const auto vers = hos::GetVersion(); + if (vers >= hos::Version_3_0_0) { + return this->interface->GetRightsIdFromContentId(out_rights_id, content_id); + } else { + AMS_ABORT_UNLESS(vers >= hos::Version_2_0_0); + *out_rights_id = {}; + return this->interface->GetRightsIdFromContentIdDeprecated(std::addressof(out_rights_id->id), content_id); + } } Result WriteContentForDebug(ContentId content_id, s64 offset, const void *buf, size_t size) { diff --git a/libraries/libstratosphere/include/stratosphere/ncm/ncm_i_content_manager.hpp b/libraries/libstratosphere/include/stratosphere/ncm/ncm_i_content_manager.hpp index 1ddfd6aa3..c65867ab4 100644 --- a/libraries/libstratosphere/include/stratosphere/ncm/ncm_i_content_manager.hpp +++ b/libraries/libstratosphere/include/stratosphere/ncm/ncm_i_content_manager.hpp @@ -20,59 +20,23 @@ namespace ams::ncm { - class IContentManager : public sf::IServiceObject { - protected: - enum class CommandId { - CreateContentStorage = 0, - CreateContentMetaDatabase = 1, - VerifyContentStorage = 2, - VerifyContentMetaDatabase = 3, - OpenContentStorage = 4, - OpenContentMetaDatabase = 5, - CloseContentStorageForcibly = 6, - CloseContentMetaDatabaseForcibly = 7, - CleanupContentMetaDatabase = 8, - ActivateContentStorage = 9, - InactivateContentStorage = 10, - ActivateContentMetaDatabase = 11, - InactivateContentMetaDatabase = 12, - InvalidateRightsIdCache = 13, - GetMemoryReport = 14, - }; - public: - virtual Result CreateContentStorage(StorageId storage_id) = 0; - virtual Result CreateContentMetaDatabase(StorageId storage_id) = 0; - virtual Result VerifyContentStorage(StorageId storage_id) = 0; - virtual Result VerifyContentMetaDatabase(StorageId storage_id) = 0; - virtual Result OpenContentStorage(sf::Out> out, StorageId storage_id) = 0; - virtual Result OpenContentMetaDatabase(sf::Out> out, StorageId storage_id) = 0; - virtual Result CloseContentStorageForcibly(StorageId storage_id) = 0; - virtual Result CloseContentMetaDatabaseForcibly(StorageId storage_id) = 0; - virtual Result CleanupContentMetaDatabase(StorageId storage_id) = 0; - virtual Result ActivateContentStorage(StorageId storage_id) = 0; - virtual Result InactivateContentStorage(StorageId storage_id) = 0; - virtual Result ActivateContentMetaDatabase(StorageId storage_id) = 0; - virtual Result InactivateContentMetaDatabase(StorageId storage_id) = 0; - virtual Result InvalidateRightsIdCache() = 0; - virtual Result GetMemoryReport(sf::Out out) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(CreateContentStorage), - MAKE_SERVICE_COMMAND_META(CreateContentMetaDatabase), - MAKE_SERVICE_COMMAND_META(VerifyContentStorage), - MAKE_SERVICE_COMMAND_META(VerifyContentMetaDatabase), - MAKE_SERVICE_COMMAND_META(OpenContentStorage), - MAKE_SERVICE_COMMAND_META(OpenContentMetaDatabase), - MAKE_SERVICE_COMMAND_META(CloseContentStorageForcibly, hos::Version_1_0_0, hos::Version_1_0_0), - MAKE_SERVICE_COMMAND_META(CloseContentMetaDatabaseForcibly, hos::Version_1_0_0, hos::Version_1_0_0), - MAKE_SERVICE_COMMAND_META(CleanupContentMetaDatabase), - MAKE_SERVICE_COMMAND_META(ActivateContentStorage, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(InactivateContentStorage, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(ActivateContentMetaDatabase, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(InactivateContentMetaDatabase, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(InvalidateRightsIdCache, hos::Version_9_0_0), - MAKE_SERVICE_COMMAND_META(GetMemoryReport, hos::Version_10_0_0), - }; - }; + #define AMS_NCM_I_CONTENT_MANAGER_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, CreateContentStorage, (StorageId storage_id)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, CreateContentMetaDatabase, (StorageId storage_id)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, VerifyContentStorage, (StorageId storage_id)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, VerifyContentMetaDatabase, (StorageId storage_id)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, OpenContentStorage, (sf::Out> out, StorageId storage_id)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, OpenContentMetaDatabase, (sf::Out> out, StorageId storage_id)) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, CloseContentStorageForcibly, (StorageId storage_id), hos::Version_1_0_0, hos::Version_1_0_0) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, CloseContentMetaDatabaseForcibly, (StorageId storage_id), hos::Version_1_0_0, hos::Version_1_0_0) \ + AMS_SF_METHOD_INFO(C, H, 8, Result, CleanupContentMetaDatabase, (StorageId storage_id)) \ + AMS_SF_METHOD_INFO(C, H, 9, Result, ActivateContentStorage, (StorageId storage_id), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 10, Result, InactivateContentStorage, (StorageId storage_id), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 11, Result, ActivateContentMetaDatabase, (StorageId storage_id), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 12, Result, InactivateContentMetaDatabase, (StorageId storage_id), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 13, Result, InvalidateRightsIdCache, (), hos::Version_9_0_0) \ + AMS_SF_METHOD_INFO(C, H, 14, Result, GetMemoryReport, (sf::Out out), hos::Version_10_0_0) + + AMS_SF_DEFINE_INTERFACE(IContentManager, AMS_NCM_I_CONTENT_MANAGER_INTERFACE_INFO); } diff --git a/libraries/libstratosphere/include/stratosphere/ncm/ncm_i_content_meta_database.hpp b/libraries/libstratosphere/include/stratosphere/ncm/ncm_i_content_meta_database.hpp index 98b001c21..b7d81067b 100644 --- a/libraries/libstratosphere/include/stratosphere/ncm/ncm_i_content_meta_database.hpp +++ b/libraries/libstratosphere/include/stratosphere/ncm/ncm_i_content_meta_database.hpp @@ -19,84 +19,31 @@ namespace ams::ncm { - class IContentMetaDatabase : public sf::IServiceObject { - protected: - enum class CommandId { - Set = 0, - Get = 1, - Remove = 2, - GetContentIdByType = 3, - ListContentInfo = 4, - List = 5, - GetLatestContentMetaKey = 6, - ListApplication = 7, - Has = 8, - HasAll = 9, - GetSize = 10, - GetRequiredSystemVersion = 11, - GetPatchId = 12, - DisableForcibly = 13, - LookupOrphanContent = 14, - Commit = 15, - HasContent = 16, - ListContentMetaInfo = 17, - GetAttributes = 18, - GetRequiredApplicationVersion = 19, - GetContentIdByTypeAndIdOffset = 20, - GetCount = 21, - GetOwnerApplicationId = 22, - }; - public: - /* Actual commands. */ - virtual Result Set(const ContentMetaKey &key, sf::InBuffer value) = 0; - virtual Result Get(sf::Out out_size, const ContentMetaKey &key, sf::OutBuffer out_value) = 0; - virtual Result Remove(const ContentMetaKey &key) = 0; - virtual Result GetContentIdByType(sf::Out out_content_id, const ContentMetaKey &key, ContentType type) = 0; - virtual Result ListContentInfo(sf::Out out_entries_written, const sf::OutArray &out_info, const ContentMetaKey &key, s32 offset) = 0; - virtual Result List(sf::Out out_entries_total, sf::Out out_entries_written, const sf::OutArray &out_info, ContentMetaType meta_type, ApplicationId application_id, u64 min, u64 max, ContentInstallType install_type) = 0; - virtual Result GetLatestContentMetaKey(sf::Out out_key, u64 id) = 0; - virtual Result ListApplication(sf::Out out_entries_total, sf::Out out_entries_written, const sf::OutArray &out_keys, ContentMetaType meta_type) = 0; - virtual Result Has(sf::Out out, const ContentMetaKey &key) = 0; - virtual Result HasAll(sf::Out out, const sf::InArray &keys) = 0; - virtual Result GetSize(sf::Out out_size, const ContentMetaKey &key) = 0; - virtual Result GetRequiredSystemVersion(sf::Out out_version, const ContentMetaKey &key) = 0; - virtual Result GetPatchId(sf::Out out_patch_id, const ContentMetaKey &key) = 0; - virtual Result DisableForcibly() = 0; - virtual Result LookupOrphanContent(const sf::OutArray &out_orphaned, const sf::InArray &content_ids) = 0; - virtual Result Commit() = 0; - virtual Result HasContent(sf::Out out, const ContentMetaKey &key, const ContentId &content_id) = 0; - virtual Result ListContentMetaInfo(sf::Out out_entries_written, const sf::OutArray &out_meta_info, const ContentMetaKey &key, s32 offset) = 0; - virtual Result GetAttributes(sf::Out out_attributes, const ContentMetaKey &key) = 0; - virtual Result GetRequiredApplicationVersion(sf::Out out_version, const ContentMetaKey &key) = 0; - virtual Result GetContentIdByTypeAndIdOffset(sf::Out out_content_id, const ContentMetaKey &key, ContentType type, u8 id_offset) = 0; - virtual Result GetCount(sf::Out out_count) = 0; - virtual Result GetOwnerApplicationId(sf::Out out_id, const ContentMetaKey &key) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(Set), - MAKE_SERVICE_COMMAND_META(Get), - MAKE_SERVICE_COMMAND_META(Remove), - MAKE_SERVICE_COMMAND_META(GetContentIdByType), - MAKE_SERVICE_COMMAND_META(ListContentInfo), - MAKE_SERVICE_COMMAND_META(List), - MAKE_SERVICE_COMMAND_META(GetLatestContentMetaKey), - MAKE_SERVICE_COMMAND_META(ListApplication), - MAKE_SERVICE_COMMAND_META(Has), - MAKE_SERVICE_COMMAND_META(HasAll), - MAKE_SERVICE_COMMAND_META(GetSize), - MAKE_SERVICE_COMMAND_META(GetRequiredSystemVersion), - MAKE_SERVICE_COMMAND_META(GetPatchId), - MAKE_SERVICE_COMMAND_META(DisableForcibly), - MAKE_SERVICE_COMMAND_META(LookupOrphanContent), - MAKE_SERVICE_COMMAND_META(Commit), - MAKE_SERVICE_COMMAND_META(HasContent), - MAKE_SERVICE_COMMAND_META(ListContentMetaInfo), - MAKE_SERVICE_COMMAND_META(GetAttributes), - MAKE_SERVICE_COMMAND_META(GetRequiredApplicationVersion, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(GetContentIdByTypeAndIdOffset, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(GetCount, hos::Version_10_0_0), - MAKE_SERVICE_COMMAND_META(GetOwnerApplicationId, hos::Version_10_0_0), - }; - }; + #define AMS_NCM_I_CONTENT_META_DATABASE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, Set, (const ContentMetaKey &key, sf::InBuffer value)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, Get, (sf::Out out_size, const ContentMetaKey &key, sf::OutBuffer out_value)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, Remove, (const ContentMetaKey &key)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, GetContentIdByType, (sf::Out out_content_id, const ContentMetaKey &key, ContentType type)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, ListContentInfo, (sf::Out out_entries_written, const sf::OutArray &out_info, const ContentMetaKey &key, s32 offset)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, List, (sf::Out out_entries_total, sf::Out out_entries_written, const sf::OutArray &out_info, ContentMetaType meta_type, ApplicationId application_id, u64 min, u64 max, ContentInstallType install_type)) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, GetLatestContentMetaKey, (sf::Out out_key, u64 id)) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, ListApplication, (sf::Out out_entries_total, sf::Out out_entries_written, const sf::OutArray &out_keys, ContentMetaType meta_type)) \ + AMS_SF_METHOD_INFO(C, H, 8, Result, Has, (sf::Out out, const ContentMetaKey &key)) \ + AMS_SF_METHOD_INFO(C, H, 9, Result, HasAll, (sf::Out out, const sf::InArray &keys)) \ + AMS_SF_METHOD_INFO(C, H, 10, Result, GetSize, (sf::Out out_size, const ContentMetaKey &key)) \ + AMS_SF_METHOD_INFO(C, H, 11, Result, GetRequiredSystemVersion, (sf::Out out_version, const ContentMetaKey &key)) \ + AMS_SF_METHOD_INFO(C, H, 12, Result, GetPatchId, (sf::Out out_patch_id, const ContentMetaKey &key)) \ + AMS_SF_METHOD_INFO(C, H, 13, Result, DisableForcibly, ()) \ + AMS_SF_METHOD_INFO(C, H, 14, Result, LookupOrphanContent, (const sf::OutArray &out_orphaned, const sf::InArray &content_ids)) \ + AMS_SF_METHOD_INFO(C, H, 15, Result, Commit, ()) \ + AMS_SF_METHOD_INFO(C, H, 16, Result, HasContent, (sf::Out out, const ContentMetaKey &key, const ContentId &content_id)) \ + AMS_SF_METHOD_INFO(C, H, 17, Result, ListContentMetaInfo, (sf::Out out_entries_written, const sf::OutArray &out_meta_info, const ContentMetaKey &key, s32 offset)) \ + AMS_SF_METHOD_INFO(C, H, 18, Result, GetAttributes, (sf::Out out_attributes, const ContentMetaKey &key)) \ + AMS_SF_METHOD_INFO(C, H, 19, Result, GetRequiredApplicationVersion, (sf::Out out_version, const ContentMetaKey &key), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 20, Result, GetContentIdByTypeAndIdOffset, (sf::Out out_content_id, const ContentMetaKey &key, ContentType type, u8 id_offset), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 21, Result, GetCount, (sf::Out out_count), hos::Version_10_0_0) \ + AMS_SF_METHOD_INFO(C, H, 22, Result, GetOwnerApplicationId, (sf::Out out_id, const ContentMetaKey &key), hos::Version_10_0_0) + + AMS_SF_DEFINE_INTERFACE(IContentMetaDatabase, AMS_NCM_I_CONTENT_META_DATABASE_INTERFACE_INFO) } diff --git a/libraries/libstratosphere/include/stratosphere/ncm/ncm_i_content_storage.hpp b/libraries/libstratosphere/include/stratosphere/ncm/ncm_i_content_storage.hpp index abe070674..6ab43bb9e 100644 --- a/libraries/libstratosphere/include/stratosphere/ncm/ncm_i_content_storage.hpp +++ b/libraries/libstratosphere/include/stratosphere/ncm/ncm_i_content_storage.hpp @@ -22,109 +22,38 @@ namespace ams::ncm { - class IContentStorage : public sf::IServiceObject { - NON_COPYABLE(IContentStorage); - NON_MOVEABLE(IContentStorage); - protected: - enum class CommandId { - GeneratePlaceHolderId = 0, - CreatePlaceHolder = 1, - DeletePlaceHolder = 2, - HasPlaceHolder = 3, - WritePlaceHolder = 4, - Register = 5, - Delete = 6, - Has = 7, - GetPath = 8, - GetPlaceHolderPath = 9, - CleanupAllPlaceHolder = 10, - ListPlaceHolder = 11, - GetContentCount = 12, - ListContentId = 13, - GetSizeFromContentId = 14, - DisableForcibly = 15, - RevertToPlaceHolder = 16, - SetPlaceHolderSize = 17, - ReadContentIdFile = 18, - GetRightsIdFromPlaceHolderIdDeprecated = 19, - GetRightsIdFromPlaceHolderId = 19, - GetRightsIdFromContentIdDeprecated = 20, - GetRightsIdFromContentId = 20, - WriteContentForDebug = 21, - GetFreeSpaceSize = 22, - GetTotalSpaceSize = 23, - FlushPlaceHolder = 24, - GetSizeFromPlaceHolderId = 25, - RepairInvalidFileAttribute = 26, - GetRightsIdFromPlaceHolderIdWithCache = 27, - }; - public: - IContentStorage() { /* ... */ } - public: - virtual Result GeneratePlaceHolderId(sf::Out out) = 0; - virtual Result CreatePlaceHolder(PlaceHolderId placeholder_id, ContentId content_id, s64 size) = 0; - virtual Result DeletePlaceHolder(PlaceHolderId placeholder_id) = 0; - virtual Result HasPlaceHolder(sf::Out out, PlaceHolderId placeholder_id) = 0; - virtual Result WritePlaceHolder(PlaceHolderId placeholder_id, s64 offset, sf::InBuffer data) = 0; - virtual Result Register(PlaceHolderId placeholder_id, ContentId content_id) = 0; - virtual Result Delete(ContentId content_id) = 0; - virtual Result Has(sf::Out out, ContentId content_id) = 0; - virtual Result GetPath(sf::Out out, ContentId content_id) = 0; - virtual Result GetPlaceHolderPath(sf::Out out, PlaceHolderId placeholder_id) = 0; - virtual Result CleanupAllPlaceHolder() = 0; - virtual Result ListPlaceHolder(sf::Out out_count, const sf::OutArray &out_buf) = 0; - virtual Result GetContentCount(sf::Out out_count) = 0; - virtual Result ListContentId(sf::Out out_count, const sf::OutArray &out_buf, s32 start_offset) = 0; - virtual Result GetSizeFromContentId(sf::Out out_size, ContentId content_id) = 0; - virtual Result DisableForcibly() = 0; - virtual Result RevertToPlaceHolder(PlaceHolderId placeholder_id, ContentId old_content_id, ContentId new_content_id) = 0; - virtual Result SetPlaceHolderSize(PlaceHolderId placeholder_id, s64 size) = 0; - virtual Result ReadContentIdFile(sf::OutBuffer buf, ContentId content_id, s64 offset) = 0; - virtual Result GetRightsIdFromPlaceHolderIdDeprecated(sf::Out out_rights_id, PlaceHolderId placeholder_id) = 0; - virtual Result GetRightsIdFromPlaceHolderId(sf::Out out_rights_id, PlaceHolderId placeholder_id) = 0; - virtual Result GetRightsIdFromContentIdDeprecated(sf::Out out_rights_id, ContentId content_id) = 0; - virtual Result GetRightsIdFromContentId(sf::Out out_rights_id, ContentId content_id) = 0; - virtual Result WriteContentForDebug(ContentId content_id, s64 offset, sf::InBuffer data) = 0; - virtual Result GetFreeSpaceSize(sf::Out out_size) = 0; - virtual Result GetTotalSpaceSize(sf::Out out_size) = 0; - virtual Result FlushPlaceHolder() = 0; - virtual Result GetSizeFromPlaceHolderId(sf::Out out, PlaceHolderId placeholder_id) = 0; - virtual Result RepairInvalidFileAttribute() = 0; - virtual Result GetRightsIdFromPlaceHolderIdWithCache(sf::Out out_rights_id, PlaceHolderId placeholder_id, ContentId cache_content_id) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GeneratePlaceHolderId), - MAKE_SERVICE_COMMAND_META(CreatePlaceHolder), - MAKE_SERVICE_COMMAND_META(DeletePlaceHolder), - MAKE_SERVICE_COMMAND_META(HasPlaceHolder), - MAKE_SERVICE_COMMAND_META(WritePlaceHolder), - MAKE_SERVICE_COMMAND_META(Register), - MAKE_SERVICE_COMMAND_META(Delete), - MAKE_SERVICE_COMMAND_META(Has), - MAKE_SERVICE_COMMAND_META(GetPath), - MAKE_SERVICE_COMMAND_META(GetPlaceHolderPath), - MAKE_SERVICE_COMMAND_META(CleanupAllPlaceHolder), - MAKE_SERVICE_COMMAND_META(ListPlaceHolder), - MAKE_SERVICE_COMMAND_META(GeneratePlaceHolderId), - MAKE_SERVICE_COMMAND_META(GetContentCount), - MAKE_SERVICE_COMMAND_META(ListContentId), - MAKE_SERVICE_COMMAND_META(GetSizeFromContentId), - MAKE_SERVICE_COMMAND_META(DisableForcibly), - MAKE_SERVICE_COMMAND_META(RevertToPlaceHolder, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(SetPlaceHolderSize, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(ReadContentIdFile, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(GetRightsIdFromPlaceHolderIdDeprecated, hos::Version_2_0_0, hos::Version_2_3_0), - MAKE_SERVICE_COMMAND_META(GetRightsIdFromPlaceHolderId, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GetRightsIdFromContentIdDeprecated, hos::Version_2_0_0, hos::Version_2_3_0), - MAKE_SERVICE_COMMAND_META(GetRightsIdFromContentId, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(WriteContentForDebug, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(GetFreeSpaceSize, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(GetTotalSpaceSize, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(FlushPlaceHolder, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GetSizeFromPlaceHolderId, hos::Version_4_0_0), - MAKE_SERVICE_COMMAND_META(RepairInvalidFileAttribute, hos::Version_4_0_0), - MAKE_SERVICE_COMMAND_META(GetRightsIdFromPlaceHolderIdWithCache, hos::Version_8_0_0), - }; - }; + #define AMS_NCM_I_CONTENT_STORAGE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GeneratePlaceHolderId, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, CreatePlaceHolder, (PlaceHolderId placeholder_id, ContentId content_id, s64 size)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, DeletePlaceHolder, (PlaceHolderId placeholder_id)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, HasPlaceHolder, (sf::Out out, PlaceHolderId placeholder_id)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, WritePlaceHolder, (PlaceHolderId placeholder_id, s64 offset, sf::InBuffer data)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, Register, (PlaceHolderId placeholder_id, ContentId content_id)) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, Delete, (ContentId content_id)) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, Has, (sf::Out out, ContentId content_id)) \ + AMS_SF_METHOD_INFO(C, H, 8, Result, GetPath, (sf::Out out, ContentId content_id)) \ + AMS_SF_METHOD_INFO(C, H, 9, Result, GetPlaceHolderPath, (sf::Out out, PlaceHolderId placeholder_id)) \ + AMS_SF_METHOD_INFO(C, H, 10, Result, CleanupAllPlaceHolder, ()) \ + AMS_SF_METHOD_INFO(C, H, 11, Result, ListPlaceHolder, (sf::Out out_count, const sf::OutArray &out_buf)) \ + AMS_SF_METHOD_INFO(C, H, 12, Result, GetContentCount, (sf::Out out_count)) \ + AMS_SF_METHOD_INFO(C, H, 13, Result, ListContentId, (sf::Out out_count, const sf::OutArray &out_buf, s32 start_offset)) \ + AMS_SF_METHOD_INFO(C, H, 14, Result, GetSizeFromContentId, (sf::Out out_size, ContentId content_id)) \ + AMS_SF_METHOD_INFO(C, H, 15, Result, DisableForcibly, ()) \ + AMS_SF_METHOD_INFO(C, H, 16, Result, RevertToPlaceHolder, (PlaceHolderId placeholder_id, ContentId old_content_id, ContentId new_content_id), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 17, Result, SetPlaceHolderSize, (PlaceHolderId placeholder_id, s64 size), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 18, Result, ReadContentIdFile, (sf::OutBuffer buf, ContentId content_id, s64 offset), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 19, Result, GetRightsIdFromPlaceHolderIdDeprecated, (sf::Out out_rights_id, PlaceHolderId placeholder_id), hos::Version_2_0_0, hos::Version_2_3_0) \ + AMS_SF_METHOD_INFO(C, H, 19, Result, GetRightsIdFromPlaceHolderId, (sf::Out out_rights_id, PlaceHolderId placeholder_id), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 20, Result, GetRightsIdFromContentIdDeprecated, (sf::Out out_rights_id, ContentId content_id), hos::Version_2_0_0, hos::Version_2_3_0) \ + AMS_SF_METHOD_INFO(C, H, 20, Result, GetRightsIdFromContentId, (sf::Out out_rights_id, ContentId content_id), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 21, Result, WriteContentForDebug, (ContentId content_id, s64 offset, sf::InBuffer data), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 22, Result, GetFreeSpaceSize, (sf::Out out_size), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 23, Result, GetTotalSpaceSize, (sf::Out out_size), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 24, Result, FlushPlaceHolder, (), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 25, Result, GetSizeFromPlaceHolderId, (sf::Out out, PlaceHolderId placeholder_id), hos::Version_4_0_0) \ + AMS_SF_METHOD_INFO(C, H, 26, Result, RepairInvalidFileAttribute, (), hos::Version_4_0_0) \ + AMS_SF_METHOD_INFO(C, H, 27, Result, GetRightsIdFromPlaceHolderIdWithCache, (sf::Out out_rights_id, PlaceHolderId placeholder_id, ContentId cache_content_id), hos::Version_8_0_0) + + AMS_SF_DEFINE_INTERFACE(IContentStorage, AMS_NCM_I_CONTENT_STORAGE_INTERFACE_INFO) } diff --git a/libraries/libstratosphere/include/stratosphere/ns.hpp b/libraries/libstratosphere/include/stratosphere/ns.hpp new file mode 100644 index 000000000..8e7d2a607 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/ns.hpp @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once + +#include diff --git a/libraries/libstratosphere/include/stratosphere/ns/impl/ns_i_async.hpp b/libraries/libstratosphere/include/stratosphere/ns/impl/ns_i_async.hpp new file mode 100644 index 000000000..1d4885da6 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/ns/impl/ns_i_async.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::ns::impl { + + #define AMS_NS_I_ASYNC_RESULT_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, Get, ()) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, Cancel, ()) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, GetErrorContext, (::ams::sf::Out<::ams::err::ErrorContext> out)) + + AMS_SF_DEFINE_INTERFACE(IAsyncResult, AMS_NS_I_ASYNC_RESULT_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/os.hpp b/libraries/libstratosphere/include/stratosphere/os.hpp index 4db896902..ea37812ac 100644 --- a/libraries/libstratosphere/include/stratosphere/os.hpp +++ b/libraries/libstratosphere/include/stratosphere/os.hpp @@ -29,12 +29,14 @@ #include #include #include +#include #include #include #include #include #include -#include +#include +#include #include #include #include diff --git a/libraries/libstratosphere/include/stratosphere/os/os_managed_handle.hpp b/libraries/libstratosphere/include/stratosphere/os/os_managed_handle.hpp index b5e5281d4..737c3c245 100644 --- a/libraries/libstratosphere/include/stratosphere/os/os_managed_handle.hpp +++ b/libraries/libstratosphere/include/stratosphere/os/os_managed_handle.hpp @@ -70,6 +70,11 @@ namespace ams::os { return h; } + void Detach() { + const Handle h = this->Move(); + AMS_UNUSED(h); + } + void Reset(Handle h) { ManagedHandle(h).Swap(*this); } diff --git a/libraries/libstratosphere/include/stratosphere/os/os_sdk_thread_local_storage.hpp b/libraries/libstratosphere/include/stratosphere/os/os_sdk_thread_local_storage.hpp new file mode 100644 index 000000000..d55e312fd --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/os/os_sdk_thread_local_storage.hpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include + +namespace ams::os { + + class SdkThreadLocalStorage { + NON_COPYABLE(SdkThreadLocalStorage); + NON_MOVEABLE(SdkThreadLocalStorage); + private: + TlsSlot tls_slot; + public: + SdkThreadLocalStorage() { + R_ABORT_UNLESS(os::SdkAllocateTlsSlot(std::addressof(this->tls_slot), nullptr)); + } + + explicit SdkThreadLocalStorage(TlsDestructor destructor) { + R_ABORT_UNLESS(os::SdkAllocateTlsSlot(std::addressof(this->tls_slot), destructor)); + } + + ~SdkThreadLocalStorage() { + os::FreeTlsSlot(this->tls_slot); + } + + uintptr_t GetValue() const { return os::GetTlsValue(this->tls_slot); } + void SetValue(uintptr_t value) { return os::SetTlsValue(this->tls_slot, value); } + + TlsSlot GetTlsSlot() const { return this->tls_slot; } + }; + +} diff --git a/libraries/libstratosphere/include/stratosphere/os/os_sdk_thread_local_storage_api.hpp b/libraries/libstratosphere/include/stratosphere/os/os_sdk_thread_local_storage_api.hpp new file mode 100644 index 000000000..174142204 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/os/os_sdk_thread_local_storage_api.hpp @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::os { + + Result SdkAllocateTlsSlot(TlsSlot *out, TlsDestructor destructor); + +} diff --git a/libraries/libstratosphere/include/stratosphere/os/os_thread_common.hpp b/libraries/libstratosphere/include/stratosphere/os/os_thread_common.hpp index 8e634dc9e..8c9373fc9 100644 --- a/libraries/libstratosphere/include/stratosphere/os/os_thread_common.hpp +++ b/libraries/libstratosphere/include/stratosphere/os/os_thread_common.hpp @@ -27,6 +27,8 @@ namespace ams::os { constexpr inline s32 DefaultThreadPriority = ThreadPriorityRangeSize / 2; constexpr inline s32 LowestThreadPriority = ThreadPriorityRangeSize - 1; + constexpr inline s32 InvalidThreadPriority = 127; + constexpr inline s32 LowestSystemThreadPriority = 35; constexpr inline s32 HighestSystemThreadPriority = -12; diff --git a/libraries/libstratosphere/include/stratosphere/os/os_thread_local_storage.hpp b/libraries/libstratosphere/include/stratosphere/os/os_thread_local_storage.hpp new file mode 100644 index 000000000..641974c68 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/os/os_thread_local_storage.hpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include + +namespace ams::os { + + class ThreadLocalStorage { + NON_COPYABLE(ThreadLocalStorage); + NON_MOVEABLE(ThreadLocalStorage); + private: + TlsSlot tls_slot; + public: + ThreadLocalStorage() { + R_ABORT_UNLESS(os::AllocateTlsSlot(std::addressof(this->tls_slot), nullptr)); + } + + explicit ThreadLocalStorage(TlsDestructor destructor) { + R_ABORT_UNLESS(os::AllocateTlsSlot(std::addressof(this->tls_slot), destructor)); + } + + ~ThreadLocalStorage() { + os::FreeTlsSlot(this->tls_slot); + } + + uintptr_t GetValue() const { return os::GetTlsValue(this->tls_slot); } + void SetValue(uintptr_t value) { return os::SetTlsValue(this->tls_slot, value); } + + TlsSlot GetTlsSlot() const { return this->tls_slot; } + }; + +} diff --git a/libraries/libstratosphere/include/stratosphere/os/os_thread_types.hpp b/libraries/libstratosphere/include/stratosphere/os/os_thread_types.hpp index c687add16..08f1f6329 100644 --- a/libraries/libstratosphere/include/stratosphere/os/os_thread_types.hpp +++ b/libraries/libstratosphere/include/stratosphere/os/os_thread_types.hpp @@ -57,6 +57,12 @@ namespace ams::os { size_t stack_size; ThreadFunction function; void *argument; + + /* NOTE: Here, Nintendo stores the TLS array. This is handled by libnx in our case. */ + /* However, we need to access certain values in other threads' TLS (Nintendo uses a hardcoded layout for SDK tls members...) */ + /* These members are tls slot holders in sdk code, but just normal thread type members under our scheme. */ + uintptr_t atomic_sf_inline_context; + mutable impl::InternalCriticalSectionStorage cs_thread; mutable impl::InternalConditionVariableStorage cv_thread; diff --git a/libraries/libstratosphere/include/stratosphere/os/os_transfer_memory.hpp b/libraries/libstratosphere/include/stratosphere/os/os_transfer_memory.hpp new file mode 100644 index 000000000..c06e352bc --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/os/os_transfer_memory.hpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::os { + + class TransferMemory { + NON_COPYABLE(TransferMemory); + NON_MOVEABLE(TransferMemory); + private: + TransferMemoryType tmem; + public: + constexpr TransferMemory() : tmem{ .state = TransferMemoryType::State_NotInitialized } { + /* ... */ + } + + TransferMemory(void *address, size_t size, MemoryPermission perm) { + R_ABORT_UNLESS(CreateTransferMemory(std::addressof(this->tmem), address, size, perm)); + } + + TransferMemory(size_t size, Handle handle, bool managed) { + this->Attach(size, handle, managed); + } + + ~TransferMemory() { + if (this->tmem.state == TransferMemoryType::State_NotInitialized) { + return; + } + DestroyTransferMemory(std::addressof(this->tmem)); + } + + void Attach(size_t size, Handle handle, bool managed) { + AttachTransferMemory(std::addressof(this->tmem), size, handle, managed); + } + + Handle Detach() { + return DetachTransferMemory(std::addressof(this->tmem)); + } + + Result Map(void **out, MemoryPermission owner_perm) { + return MapTransferMemory(out, std::addressof(this->tmem), owner_perm); + } + + void Unmap() { + UnmapTransferMemory(std::addressof(this->tmem)); + } + + operator TransferMemoryType &() { + return this->tmem; + } + + operator const TransferMemoryType &() const { + return this->tmem; + } + + TransferMemoryType *GetBase() { + return std::addressof(this->tmem); + } + }; + +} diff --git a/libraries/libstratosphere/include/stratosphere/os/os_transfer_memory_api.hpp b/libraries/libstratosphere/include/stratosphere/os/os_transfer_memory_api.hpp new file mode 100644 index 000000000..2891903f1 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/os/os_transfer_memory_api.hpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include + +namespace ams::os { + + struct TransferMemoryType; + + Result CreateTransferMemory(TransferMemoryType *tmem, void *address, size_t size, MemoryPermission perm); + + Result AttachTransferMemory(TransferMemoryType *tmem, size_t size, Handle handle, bool managed); + Handle DetachTransferMemory(TransferMemoryType *tmem); + + void DestroyTransferMemory(TransferMemoryType *tmem); + + Result MapTransferMemory(void **out, TransferMemoryType *tmem, MemoryPermission owner_perm); + void UnmapTransferMemory(TransferMemoryType *tmem); + +} diff --git a/libraries/libstratosphere/include/stratosphere/os/os_transfer_memory_types.hpp b/libraries/libstratosphere/include/stratosphere/os/os_transfer_memory_types.hpp new file mode 100644 index 000000000..2b532c0b2 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/os/os_transfer_memory_types.hpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include + +namespace ams::os { + + struct TransferMemoryType { + enum State { + State_NotInitialized = 0, + State_Created = 1, + State_Mapped = 2, + State_Detached = 3, + }; + + u8 state; + bool handle_managed; + bool allocated; + + void *address; + size_t size; + Handle handle; + + mutable impl::InternalCriticalSectionStorage cs_transfer_memory; + }; + static_assert(std::is_trivial::value); + +} diff --git a/libraries/libstratosphere/include/stratosphere/pgl/sf/pgl_sf_i_event_observer.hpp b/libraries/libstratosphere/include/stratosphere/pgl/sf/pgl_sf_i_event_observer.hpp index d3e147e9c..d212ab30f 100644 --- a/libraries/libstratosphere/include/stratosphere/pgl/sf/pgl_sf_i_event_observer.hpp +++ b/libraries/libstratosphere/include/stratosphere/pgl/sf/pgl_sf_i_event_observer.hpp @@ -22,21 +22,10 @@ namespace ams::pgl::sf { - class IEventObserver : public ams::sf::IServiceObject { - protected: - enum class CommandId { - GetProcessEventHandle = 0, - GetProcessEventInfo = 1, - }; - public: - /* Actual commands. */ - virtual Result GetProcessEventHandle(ams::sf::OutCopyHandle out) = 0; - virtual Result GetProcessEventInfo(ams::sf::Out out) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetProcessEventHandle), - MAKE_SERVICE_COMMAND_META(GetProcessEventInfo), - }; - }; + #define AMS_PGL_I_EVENT_OBSERVER_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GetProcessEventHandle, (ams::sf::OutCopyHandle out)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, GetProcessEventInfo, (ams::sf::Out out)) + + AMS_SF_DEFINE_INTERFACE(IEventObserver, AMS_PGL_I_EVENT_OBSERVER_INTERFACE_INFO); } \ No newline at end of file diff --git a/libraries/libstratosphere/include/stratosphere/pgl/sf/pgl_sf_i_shell_interface.hpp b/libraries/libstratosphere/include/stratosphere/pgl/sf/pgl_sf_i_shell_interface.hpp index c44d72924..9046b2ef2 100644 --- a/libraries/libstratosphere/include/stratosphere/pgl/sf/pgl_sf_i_shell_interface.hpp +++ b/libraries/libstratosphere/include/stratosphere/pgl/sf/pgl_sf_i_shell_interface.hpp @@ -23,52 +23,22 @@ namespace ams::pgl::sf { - class IShellInterface : public ams::sf::IServiceObject { - protected: - enum class CommandId { - LaunchProgram = 0, - TerminateProcess = 1, - LaunchProgramFromHost = 2, - GetHostContentMetaInfo = 4, - GetApplicationProcessId = 5, - BoostSystemMemoryResourceLimit = 6, - IsProcessTracked = 7, - EnableApplicationCrashReport = 8, - IsApplicationCrashReportEnabled = 9, - EnableApplicationAllThreadDumpOnCrash = 10, - TriggerApplicationSnapShotDumper = 12, - GetShellEventObserver = 20, - }; - public: - /* Actual commands. */ - virtual Result LaunchProgram(ams::sf::Out out, const ncm::ProgramLocation &loc, u32 pm_flags, u8 pgl_flags) = 0; - virtual Result TerminateProcess(os::ProcessId process_id) = 0; - virtual Result LaunchProgramFromHost(ams::sf::Out out, const ams::sf::InBuffer &content_path, u32 pm_flags) = 0; - virtual Result GetHostContentMetaInfo(ams::sf::Out out, const ams::sf::InBuffer &content_path) = 0; - virtual Result GetApplicationProcessId(ams::sf::Out out) = 0; - virtual Result BoostSystemMemoryResourceLimit(u64 size) = 0; - virtual Result IsProcessTracked(ams::sf::Out out, os::ProcessId process_id) = 0; - virtual Result EnableApplicationCrashReport(bool enabled) = 0; - virtual Result IsApplicationCrashReportEnabled(ams::sf::Out out) = 0; - virtual Result EnableApplicationAllThreadDumpOnCrash(bool enabled) = 0; - virtual Result TriggerApplicationSnapShotDumper(SnapShotDumpType dump_type, const ams::sf::InBuffer &arg) = 0; + #define AMS_PGL_I_SHELL_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, LaunchProgram, (ams::sf::Out out, const ncm::ProgramLocation &loc, u32 pm_flags, u8 pgl_flags)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, TerminateProcess, (os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, LaunchProgramFromHost, (ams::sf::Out out, const ams::sf::InBuffer &content_path, u32 pm_flags)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, GetHostContentMetaInfo, (ams::sf::Out out, const ams::sf::InBuffer &content_path)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, GetApplicationProcessId, (ams::sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, BoostSystemMemoryResourceLimit, (u64 size)) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, IsProcessTracked, (ams::sf::Out out, os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 8, Result, EnableApplicationCrashReport, (bool enabled)) \ + AMS_SF_METHOD_INFO(C, H, 9, Result, IsApplicationCrashReportEnabled, (ams::sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 10, Result, EnableApplicationAllThreadDumpOnCrash, (bool enabled)) \ + AMS_SF_METHOD_INFO(C, H, 12, Result, TriggerApplicationSnapShotDumper, (SnapShotDumpType dump_type, const ams::sf::InBuffer &arg)) \ + AMS_SF_METHOD_INFO(C, H, 20, Result, GetShellEventObserver, (ams::sf::Out> out)) + + AMS_SF_DEFINE_INTERFACE(IShellInterface, AMS_PGL_I_SHELL_INTERFACE_INTERFACE_INFO); + - virtual Result GetShellEventObserver(ams::sf::Out> out) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(LaunchProgram), - MAKE_SERVICE_COMMAND_META(TerminateProcess), - MAKE_SERVICE_COMMAND_META(LaunchProgramFromHost), - MAKE_SERVICE_COMMAND_META(GetHostContentMetaInfo), - MAKE_SERVICE_COMMAND_META(GetApplicationProcessId), - MAKE_SERVICE_COMMAND_META(BoostSystemMemoryResourceLimit), - MAKE_SERVICE_COMMAND_META(IsProcessTracked), - MAKE_SERVICE_COMMAND_META(EnableApplicationCrashReport), - MAKE_SERVICE_COMMAND_META(IsApplicationCrashReportEnabled), - MAKE_SERVICE_COMMAND_META(EnableApplicationAllThreadDumpOnCrash), - MAKE_SERVICE_COMMAND_META(TriggerApplicationSnapShotDumper), - MAKE_SERVICE_COMMAND_META(GetShellEventObserver), - }; - }; } \ No newline at end of file diff --git a/libraries/libstratosphere/include/stratosphere/pgl/srv/pgl_srv_shell_interface.hpp b/libraries/libstratosphere/include/stratosphere/pgl/srv/pgl_srv_shell_interface.hpp index 40f74f3ba..3487c550b 100644 --- a/libraries/libstratosphere/include/stratosphere/pgl/srv/pgl_srv_shell_interface.hpp +++ b/libraries/libstratosphere/include/stratosphere/pgl/srv/pgl_srv_shell_interface.hpp @@ -20,7 +20,7 @@ namespace ams::pgl::srv { - class ShellInterface final : public pgl::sf::IShellInterface { + class ShellInterface final { NON_COPYABLE(ShellInterface); NON_MOVEABLE(ShellInterface); private: @@ -34,19 +34,20 @@ namespace ams::pgl::srv { } public: /* Interface commands. */ - virtual Result LaunchProgram(ams::sf::Out out, const ncm::ProgramLocation &loc, u32 pm_flags, u8 pgl_flags) override final; - virtual Result TerminateProcess(os::ProcessId process_id) override final; - virtual Result LaunchProgramFromHost(ams::sf::Out out, const ams::sf::InBuffer &content_path, u32 pm_flags) override final; - virtual Result GetHostContentMetaInfo(ams::sf::Out out, const ams::sf::InBuffer &content_path) override final; - virtual Result GetApplicationProcessId(ams::sf::Out out) override final; - virtual Result BoostSystemMemoryResourceLimit(u64 size) override final; - virtual Result IsProcessTracked(ams::sf::Out out, os::ProcessId process_id) override final; - virtual Result EnableApplicationCrashReport(bool enabled) override final; - virtual Result IsApplicationCrashReportEnabled(ams::sf::Out out) override final; - virtual Result EnableApplicationAllThreadDumpOnCrash(bool enabled) override final; - virtual Result TriggerApplicationSnapShotDumper(SnapShotDumpType dump_type, const ams::sf::InBuffer &arg) override final; + Result LaunchProgram(ams::sf::Out out, const ncm::ProgramLocation &loc, u32 pm_flags, u8 pgl_flags); + Result TerminateProcess(os::ProcessId process_id); + Result LaunchProgramFromHost(ams::sf::Out out, const ams::sf::InBuffer &content_path, u32 pm_flags); + Result GetHostContentMetaInfo(ams::sf::Out out, const ams::sf::InBuffer &content_path); + Result GetApplicationProcessId(ams::sf::Out out); + Result BoostSystemMemoryResourceLimit(u64 size); + Result IsProcessTracked(ams::sf::Out out, os::ProcessId process_id); + Result EnableApplicationCrashReport(bool enabled); + Result IsApplicationCrashReportEnabled(ams::sf::Out out); + Result EnableApplicationAllThreadDumpOnCrash(bool enabled); + Result TriggerApplicationSnapShotDumper(SnapShotDumpType dump_type, const ams::sf::InBuffer &arg); - virtual Result GetShellEventObserver(ams::sf::Out> out) override final; + Result GetShellEventObserver(ams::sf::Out> out); }; + static_assert(pgl::sf::IsIShellInterface); } diff --git a/libraries/libstratosphere/include/stratosphere/pm.hpp b/libraries/libstratosphere/include/stratosphere/pm.hpp index 801993fc1..5b252aefb 100644 --- a/libraries/libstratosphere/include/stratosphere/pm.hpp +++ b/libraries/libstratosphere/include/stratosphere/pm.hpp @@ -16,8 +16,12 @@ #pragma once -#include "pm/pm_types.hpp" -#include "pm/pm_boot_mode_api.hpp" -#include "pm/pm_info_api.hpp" -#include "pm/pm_shell_api.hpp" -#include "pm/pm_dmnt_api.hpp" \ No newline at end of file +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/libraries/libstratosphere/include/stratosphere/pm/impl/pm_boot_mode_interface.hpp b/libraries/libstratosphere/include/stratosphere/pm/impl/pm_boot_mode_interface.hpp new file mode 100644 index 000000000..9f37bacc5 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/pm/impl/pm_boot_mode_interface.hpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::pm::impl { + + #define AMS_PM_I_BOOT_MODE_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, void, GetBootMode, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 1, void, SetMaintenanceBoot, ()) + + AMS_SF_DEFINE_INTERFACE(IBootModeInterface, AMS_PM_I_BOOT_MODE_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/pm/impl/pm_debug_monitor_interface.hpp b/libraries/libstratosphere/include/stratosphere/pm/impl/pm_debug_monitor_interface.hpp new file mode 100644 index 000000000..1e3bfa6d6 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/pm/impl/pm_debug_monitor_interface.hpp @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::pm::impl { + + #define AMS_PM_I_DEBUG_MONITOR_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GetExceptionProcessIdList, (sf::Out out_count, const sf::OutArray &out_process_ids)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, StartProcess, (os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, GetProcessId, (sf::Out out, ncm::ProgramId program_id)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, HookToCreateProcess, (sf::OutCopyHandle out_hook, ncm::ProgramId program_id)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, GetApplicationProcessId, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, HookToCreateApplicationProcess, (sf::OutCopyHandle out_hook)) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, ClearHook, (u32 which), hos::Version_6_0_0) \ + AMS_SF_METHOD_INFO(C, H, 65000, Result, AtmosphereGetProcessInfo, (sf::OutCopyHandle out_process_handle, sf::Out out_loc, sf::Out out_status, os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 65001, Result, AtmosphereGetCurrentLimitInfo, (sf::Out out_cur_val, sf::Out out_lim_val, u32 group, u32 resource)) + + AMS_SF_DEFINE_INTERFACE(IDebugMonitorInterface, AMS_PM_I_DEBUG_MONITOR_INTERFACE_INTERFACE_INFO) + + #define AMS_PM_I_DEPRECATED_DEBUG_MONITOR_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GetModuleIdList, (sf::Out out_count, const sf::OutBuffer &out_buf, u64 unused)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, GetExceptionProcessIdList, (sf::Out out_count, const sf::OutArray &out_process_ids)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, StartProcess, (os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, GetProcessId, (sf::Out out, ncm::ProgramId program_id)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, HookToCreateProcess, (sf::OutCopyHandle out_hook, ncm::ProgramId program_id)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, GetApplicationProcessId, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, HookToCreateApplicationProcess, (sf::OutCopyHandle out_hook)) \ + AMS_SF_METHOD_INFO(C, H, 65000, Result, AtmosphereGetProcessInfo, (sf::OutCopyHandle out_process_handle, sf::Out out_loc, sf::Out out_status, os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 65001, Result, AtmosphereGetCurrentLimitInfo, (sf::Out out_cur_val, sf::Out out_lim_val, u32 group, u32 resource)) + + AMS_SF_DEFINE_INTERFACE(IDeprecatedDebugMonitorInterface, AMS_PM_I_DEPRECATED_DEBUG_MONITOR_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/pm/impl/pm_information_interface.hpp b/libraries/libstratosphere/include/stratosphere/pm/impl/pm_information_interface.hpp new file mode 100644 index 000000000..c09b3b208 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/pm/impl/pm_information_interface.hpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::pm::impl { + + #define AMS_PM_I_INFORMATION_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GetProgramId, (sf::Out out, os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 65000, Result, AtmosphereGetProcessId, (sf::Out out, ncm::ProgramId program_id)) \ + AMS_SF_METHOD_INFO(C, H, 65001, Result, AtmosphereHasLaunchedProgram, (sf::Out out, ncm::ProgramId program_id)) \ + AMS_SF_METHOD_INFO(C, H, 65002, Result, AtmosphereGetProcessInfo, (sf::Out out_loc, sf::Out out_status, os::ProcessId process_id)) + + AMS_SF_DEFINE_INTERFACE(IInformationInterface, AMS_PM_I_INFORMATION_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/pm/impl/pm_shell_interface.hpp b/libraries/libstratosphere/include/stratosphere/pm/impl/pm_shell_interface.hpp new file mode 100644 index 000000000..1343b7919 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/pm/impl/pm_shell_interface.hpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::pm::impl { + + #define AMS_PM_I_SHELL_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, LaunchProgram, (sf::Out out_process_id, const ncm::ProgramLocation &loc, u32 flags)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, TerminateProcess, (os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, TerminateProgram, (ncm::ProgramId program_id)) \ + AMS_SF_METHOD_INFO(C, H, 3, void, GetProcessEventHandle, (sf::OutCopyHandle out)) \ + AMS_SF_METHOD_INFO(C, H, 4, void, GetProcessEventInfo, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 5, void, NotifyBootFinished, ()) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, GetApplicationProcessIdForShell, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, BoostSystemMemoryResourceLimit, (u64 boost_size)) \ + AMS_SF_METHOD_INFO(C, H, 8, Result, BoostApplicationThreadResourceLimit, (), hos::Version_7_0_0) \ + AMS_SF_METHOD_INFO(C, H, 9, void, GetBootFinishedEventHandle, (sf::OutCopyHandle out), hos::Version_8_0_0) + + AMS_SF_DEFINE_INTERFACE(IShellInterface, AMS_PM_I_SHELL_INTERFACE_INTERFACE_INFO) + + #define AMS_PM_I_DEPRECATED_SHELL_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, LaunchProgram, (sf::Out out_process_id, const ncm::ProgramLocation &loc, u32 flags)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, TerminateProcess, (os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, TerminateProgram, (ncm::ProgramId program_id)) \ + AMS_SF_METHOD_INFO(C, H, 3, void, GetProcessEventHandle, (sf::OutCopyHandle out)) \ + AMS_SF_METHOD_INFO(C, H, 4, void, GetProcessEventInfo, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, CleanupProcess, (os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, ClearExceptionOccurred, (os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 7, void, NotifyBootFinished, ()) \ + AMS_SF_METHOD_INFO(C, H, 8, Result, GetApplicationProcessIdForShell, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 9, Result, BoostSystemMemoryResourceLimit, (u64 boost_size), hos::Version_4_0_0) + + AMS_SF_DEFINE_INTERFACE(IDeprecatedShellInterface, AMS_PM_I_DEPRECATED_SHELL_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/psc/sf/psc_sf_i_pm_module.hpp b/libraries/libstratosphere/include/stratosphere/psc/sf/psc_sf_i_pm_module.hpp index 5673bd4ab..42fd75d3d 100644 --- a/libraries/libstratosphere/include/stratosphere/psc/sf/psc_sf_i_pm_module.hpp +++ b/libraries/libstratosphere/include/stratosphere/psc/sf/psc_sf_i_pm_module.hpp @@ -21,30 +21,13 @@ namespace ams::psc::sf { - class IPmModule : public ams::sf::IServiceObject { - protected: - enum class CommandId { - Initialize = 0, - GetRequest = 1, - Acknowledge = 2, - Finalize = 3, - AcknowledgeEx = 4, - }; - public: - /* Actual commands. */ - virtual Result Initialize(ams::sf::OutCopyHandle out, psc::PmModuleId module_id, const ams::sf::InBuffer &child_list) = 0; - virtual Result GetRequest(ams::sf::Out out_state, ams::sf::Out out_flags) = 0; - virtual Result Acknowledge() = 0; - virtual Result Finalize() = 0; - virtual Result AcknowledgeEx(PmState state) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(Initialize), - MAKE_SERVICE_COMMAND_META(GetRequest), - MAKE_SERVICE_COMMAND_META(Acknowledge), - MAKE_SERVICE_COMMAND_META(Finalize), - MAKE_SERVICE_COMMAND_META(AcknowledgeEx, hos::Version_5_1_0), - }; - }; + #define AMS_PSC_I_PM_MODULE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, Initialize, (ams::sf::OutCopyHandle out, psc::PmModuleId module_id, const ams::sf::InBuffer &child_list)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, GetRequest, (ams::sf::Out out_state, ams::sf::Out out_flags)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, Acknowledge, ()) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, Finalize, ()) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, AcknowledgeEx, (PmState state), hos::Version_5_1_0) + + AMS_SF_DEFINE_INTERFACE(IPmModule, AMS_PSC_I_PM_MODULE_INTERFACE_INFO) } \ No newline at end of file diff --git a/libraries/libstratosphere/include/stratosphere/psc/sf/psc_sf_i_pm_service.hpp b/libraries/libstratosphere/include/stratosphere/psc/sf/psc_sf_i_pm_service.hpp index f4bf8cc8b..f4dffbeb0 100644 --- a/libraries/libstratosphere/include/stratosphere/psc/sf/psc_sf_i_pm_service.hpp +++ b/libraries/libstratosphere/include/stratosphere/psc/sf/psc_sf_i_pm_service.hpp @@ -20,18 +20,9 @@ namespace ams::psc::sf { - class IPmService : public ams::sf::IServiceObject { - protected: - enum class CommandId { - Initialize = 0, - }; - public: - /* Actual commands. */ - virtual Result Initialize(ams::sf::Out> out) = 0; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(Initialize), - }; - }; + #define AMS_PSC_I_PM_SERVICE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, Initialize, (ams::sf::Out> out)) + + AMS_SF_DEFINE_INTERFACE(IPmService, AMS_PSC_I_PM_SERVICE_INTERFACE_INFO) } \ No newline at end of file diff --git a/libraries/libstratosphere/include/stratosphere/ro.hpp b/libraries/libstratosphere/include/stratosphere/ro.hpp index a5972ab3a..d3a695220 100644 --- a/libraries/libstratosphere/include/stratosphere/ro.hpp +++ b/libraries/libstratosphere/include/stratosphere/ro.hpp @@ -16,4 +16,6 @@ #pragma once -#include "ro/ro_types.hpp" +#include +#include +#include diff --git a/libraries/libstratosphere/include/stratosphere/ro/impl/ro_debug_monitor_interface.hpp b/libraries/libstratosphere/include/stratosphere/ro/impl/ro_debug_monitor_interface.hpp new file mode 100644 index 000000000..40e703a8a --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/ro/impl/ro_debug_monitor_interface.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::ro::impl { + + #define AMS_RO_I_DEBUG_MONITOR_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GetProcessModuleInfo, (sf::Out out_count, const sf::OutArray &out_infos, os::ProcessId process_id)) + + AMS_SF_DEFINE_INTERFACE(IDebugMonitorInterface, AMS_RO_I_DEBUG_MONITOR_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/ro/impl/ro_ro_interface.hpp b/libraries/libstratosphere/include/stratosphere/ro/impl/ro_ro_interface.hpp new file mode 100644 index 000000000..624d1e5d8 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/ro/impl/ro_ro_interface.hpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::ro::impl { + + #define AMS_RO_I_RO_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, MapManualLoadModuleMemory, (sf::Out out_load_address, const sf::ClientProcessId &client_pid, u64 nro_address, u64 nro_size, u64 bss_address, u64 bss_size)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, UnmapManualLoadModuleMemory, (const sf::ClientProcessId &client_pid, u64 nro_address)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, RegisterModuleInfo, (const sf::ClientProcessId &client_pid, u64 nrr_address, u64 nrr_size)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, UnregisterModuleInfo, (const sf::ClientProcessId &client_pid, u64 nrr_address)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, RegisterProcessHandle, (const sf::ClientProcessId &client_pid, sf::CopyHandle process_h)) \ + AMS_SF_METHOD_INFO(C, H, 10, Result, RegisterModuleInfoEx, (const sf::ClientProcessId &client_pid, u64 nrr_address, u64 nrr_size, sf::CopyHandle process_h), hos::Version_7_0_0) + + AMS_SF_DEFINE_INTERFACE(IRoInterface, AMS_RO_I_RO_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/settings.hpp b/libraries/libstratosphere/include/stratosphere/settings.hpp index d78e42bef..5b995f595 100644 --- a/libraries/libstratosphere/include/stratosphere/settings.hpp +++ b/libraries/libstratosphere/include/stratosphere/settings.hpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include diff --git a/libraries/libstratosphere/include/stratosphere/settings/system/settings_platform_region.hpp b/libraries/libstratosphere/include/stratosphere/settings/system/settings_platform_region.hpp new file mode 100644 index 000000000..242358cf0 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/settings/system/settings_platform_region.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include + +namespace ams::settings::system { + + enum PlatformRegion { + PlatformRegion_Invalid = 0, + PlatformRegion_Global = 1, + PlatformRegion_China = 2, + }; + + PlatformRegion GetPlatformRegion(); + +} diff --git a/libraries/libstratosphere/include/stratosphere/sf.hpp b/libraries/libstratosphere/include/stratosphere/sf.hpp index 333391f71..77568edf1 100644 --- a/libraries/libstratosphere/include/stratosphere/sf.hpp +++ b/libraries/libstratosphere/include/stratosphere/sf.hpp @@ -28,6 +28,7 @@ #include #include #include +#include #include diff --git a/libraries/libstratosphere/include/stratosphere/sf/cmif/sf_cmif_service_dispatch.hpp b/libraries/libstratosphere/include/stratosphere/sf/cmif/sf_cmif_service_dispatch.hpp index cca03664c..4ee1c3f65 100644 --- a/libraries/libstratosphere/include/stratosphere/sf/cmif/sf_cmif_service_dispatch.hpp +++ b/libraries/libstratosphere/include/stratosphere/sf/cmif/sf_cmif_service_dispatch.hpp @@ -114,7 +114,7 @@ namespace ams::sf::cmif { explicit constexpr ServiceDispatchTable(Entries... entries) : impl::ServiceDispatchTableImpl(entries...) { /* ... */ } }; - #define DEFINE_SERVICE_DISPATCH_TABLE \ + #define AMS_SF_CMIF_IMPL_DEFINE_SERVICE_DISPATCH_TABLE \ template \ static constexpr inline ::ams::sf::cmif::ServiceDispatchTable s_CmifServiceDispatchTable @@ -127,23 +127,21 @@ namespace ams::sf::cmif { } }; - template + template requires sf::IsServiceObject struct ServiceDispatchTraits { - static_assert(std::is_base_of::value, "ServiceObjects must derive from sf::IServiceObject"); - using ProcessHandlerType = decltype(ServiceDispatchMeta::ProcessHandler); static constexpr inline auto DispatchTable = T::template s_CmifServiceDispatchTable; using DispatchTableType = decltype(DispatchTable); - static constexpr ProcessHandlerType ProcessHandlerImpl = ServiceObjectTraits::IsMitmServiceObject ? (&impl::ServiceDispatchTableBase::ProcessMessageForMitm) - : (&impl::ServiceDispatchTableBase::ProcessMessage); + static constexpr ProcessHandlerType ProcessHandlerImpl = sf::IsMitmServiceObject ? (&impl::ServiceDispatchTableBase::ProcessMessageForMitm) + : (&impl::ServiceDispatchTableBase::ProcessMessage); static constexpr inline ServiceDispatchMeta Meta{&DispatchTable, ProcessHandlerImpl}; }; template - NX_CONSTEXPR const ServiceDispatchMeta *GetServiceDispatchMeta() { + constexpr ALWAYS_INLINE const ServiceDispatchMeta *GetServiceDispatchMeta() { return &ServiceDispatchTraits::Meta; } diff --git a/libraries/libstratosphere/include/stratosphere/sf/hipc/sf_hipc_server_manager.hpp b/libraries/libstratosphere/include/stratosphere/sf/hipc/sf_hipc_server_manager.hpp index fabff0f93..bb0020129 100644 --- a/libraries/libstratosphere/include/stratosphere/sf/hipc/sf_hipc_server_manager.hpp +++ b/libraries/libstratosphere/include/stratosphere/sf/hipc/sf_hipc_server_manager.hpp @@ -69,12 +69,12 @@ namespace ams::sf::hipc { virtual void CreateSessionObjectHolder(cmif::ServiceObjectHolder *out_obj, std::shared_ptr<::Service> *out_fsrv) const = 0; }; - template> + template class Server : public ServerBase { NON_COPYABLE(Server); NON_MOVEABLE(Server); private: - static constexpr bool IsMitmServer = ServiceObjectTraits::IsMitmServiceObject; + static constexpr bool IsMitmServer = sf::IsMitmServiceObject; public: Server(Handle ph, sm::ServiceName sn, bool m, cmif::ServiceObjectHolder &&sh) : ServerBase(ph, sn, m, std::forward(sh)) { /* ... */ @@ -145,14 +145,14 @@ namespace ams::sf::hipc { void ProcessDeferredSessions(); - template> + template void RegisterServerImpl(Handle port_handle, sm::ServiceName service_name, bool managed, cmif::ServiceObjectHolder &&static_holder) { /* Allocate server memory. */ auto *server = this->AllocateServer(); AMS_ABORT_UNLESS(server != nullptr); - new (server) Server(port_handle, service_name, managed, std::forward(static_holder)); + new (server) Server(port_handle, service_name, managed, std::forward(static_holder)); - if constexpr (!ServiceObjectTraits::IsMitmServiceObject) { + if constexpr (!sf::IsMitmServiceObject) { /* Non-mitm server. */ os::SetWaitableHolderUserData(server, static_cast(UserDataTag::Server)); } else { @@ -163,9 +163,9 @@ namespace ams::sf::hipc { os::LinkWaitableHolder(std::addressof(this->waitable_manager), server); } - template - static constexpr inline std::shared_ptr MakeSharedMitm(std::shared_ptr<::Service> &&s, const sm::MitmProcessInfo &client_info) { - return std::make_shared(std::forward>(s), client_info); + template + static constexpr inline std::shared_ptr MakeSharedMitm(std::shared_ptr<::Service> &&s, const sm::MitmProcessInfo &client_info) { + return sf::MakeShared(std::forward>(s), client_info); } Result InstallMitmServerImpl(Handle *out_port_handle, sm::ServiceName service_name, MitmQueryFunction query_func); @@ -187,21 +187,18 @@ namespace ams::sf::hipc { os::InitializeWaitableManager(std::addressof(this->waitlist)); } - template> - void RegisterServer(Handle port_handle, std::shared_ptr static_object = nullptr) { - static_assert(!ServiceObjectTraits::IsMitmServiceObject, "RegisterServer requires non-mitm object. Use RegisterMitmServer instead."); + template> requires (sf::IsServiceObject && !sf::IsMitmServiceObject) + void RegisterServer(Handle port_handle, std::shared_ptr static_object = nullptr) { /* Register server. */ cmif::ServiceObjectHolder static_holder; if (static_object != nullptr) { static_holder = cmif::ServiceObjectHolder(std::move(static_object)); } - this->RegisterServerImpl(port_handle, sm::InvalidServiceName, false, std::move(static_holder)); + this->RegisterServerImpl(port_handle, sm::InvalidServiceName, false, std::move(static_holder)); } - template> - Result RegisterServer(sm::ServiceName service_name, size_t max_sessions, std::shared_ptr static_object = nullptr) { - static_assert(!ServiceObjectTraits::IsMitmServiceObject, "RegisterServer requires non-mitm object. Use RegisterMitmServer instead."); - + template> requires (sf::IsServiceObject && !sf::IsMitmServiceObject) + Result RegisterServer(sm::ServiceName service_name, size_t max_sessions, std::shared_ptr static_object = nullptr) { /* Register service. */ Handle port_handle; R_TRY(sm::RegisterService(&port_handle, service_name, max_sessions, false)); @@ -211,19 +208,18 @@ namespace ams::sf::hipc { if (static_object != nullptr) { static_holder = cmif::ServiceObjectHolder(std::move(static_object)); } - this->RegisterServerImpl(port_handle, service_name, true, std::move(static_holder)); + this->RegisterServerImpl(port_handle, service_name, true, std::move(static_holder)); return ResultSuccess(); } - template> + template> + requires (sf::IsMitmServiceObject && sf::IsMitmServiceImpl) Result RegisterMitmServer(sm::ServiceName service_name) { - static_assert(ServiceObjectTraits::IsMitmServiceObject, "RegisterMitmServer requires mitm object. Use RegisterServer instead."); - /* Install mitm service. */ Handle port_handle; R_TRY(this->InstallMitmServerImpl(&port_handle, service_name, &ServiceImpl::ShouldMitm)); - this->RegisterServerImpl(port_handle, service_name, true, cmif::ServiceObjectHolder()); + this->RegisterServerImpl(port_handle, service_name, true, cmif::ServiceObjectHolder()); return ResultSuccess(); } diff --git a/libraries/libstratosphere/include/stratosphere/sf/impl/sf_impl_command_serialization.hpp b/libraries/libstratosphere/include/stratosphere/sf/impl/sf_impl_command_serialization.hpp index 2247d6de5..aae4a5d2c 100644 --- a/libraries/libstratosphere/include/stratosphere/sf/impl/sf_impl_command_serialization.hpp +++ b/libraries/libstratosphere/include/stratosphere/sf/impl/sf_impl_command_serialization.hpp @@ -368,22 +368,13 @@ namespace ams::sf::impl { size_t out_object_index; }; - template + template struct CommandMetaInfo { - private: - template - static R GetReturnTypeImpl(R(C::*)(A...)); - - template - static C *GetClassTypePointerImpl(R(C::*)(A...)); - - template - static std::tuple::type...> GetArgsImpl(R(C::*)(A...)); public: - using ReturnType = decltype(GetReturnTypeImpl(MemberFunction)); - using ClassTypePointer = decltype(GetClassTypePointerImpl(MemberFunction)); - using ArgsType = decltype(GetArgsImpl(MemberFunction)); - using ClassType = typename std::remove_pointer::type; + using ReturnType = Return; + using ClassType = Class; + using ClassTypePointer = ClassType *; + using ArgsType = std::tuple::type...>; static constexpr bool ReturnsResult = std::is_same::value; static constexpr bool ReturnsVoid = std::is_same::value; @@ -1045,9 +1036,9 @@ namespace ams::sf::impl { return ResultSuccess(); } - template + template constexpr Result InvokeServiceCommandImpl(CmifOutHeader **out_header_ptr, cmif::ServiceDispatchContext &ctx, const cmif::PointerAndSize &in_raw_data) { - using CommandMeta = CommandMetaInfo; + using CommandMeta = CommandMetaInfo; using ImplProcessorType = HipcCommandProcessor; using BufferArrayType = std::array; using OutHandleHolderType = OutHandleHolder; @@ -1113,7 +1104,7 @@ namespace ams::sf::impl { } if constexpr (CommandMeta::ReturnsResult) { - const auto command_result = std::apply([=](auto&&... args) { return (this_ptr->*ServiceCommandImpl)(args...); }, args_tuple); + const auto command_result = std::apply([=](auto&&... args) { return (this_ptr->*ServiceCommandImpl)(std::forward(args)...); }, args_tuple); if (R_FAILED(command_result)) { cmif::PointerAndSize out_raw_data; ctx.processor->PrepareForErrorReply(ctx, out_raw_data, runtime_metadata); @@ -1121,7 +1112,7 @@ namespace ams::sf::impl { return command_result; } } else { - std::apply([=](auto&&... args) { (this_ptr->*ServiceCommandImpl)(args...); }, args_tuple); + std::apply([=](auto&&... args) { (this_ptr->*ServiceCommandImpl)(std::forward(args)...); }, args_tuple); } } @@ -1148,18 +1139,16 @@ namespace ams::sf::impl { } -namespace ams::sf { +namespace ams::sf::impl { - template - inline static constexpr cmif::ServiceCommandMeta MakeServiceCommandMeta() { + template + consteval inline cmif::ServiceCommandMeta MakeServiceCommandMeta() { return { - .hosver_low = Low, + .hosver_low = Low, .hosver_high = High, - .cmd_id = static_cast(CommandId), - .handler = ::ams::sf::impl::InvokeServiceCommandImpl, + .cmd_id = static_cast(CommandId), + .handler = ::ams::sf::impl::InvokeServiceCommandImpl, }; } } - -#define MAKE_SERVICE_COMMAND_META(Name, ...) ::ams::sf::MakeServiceCommandMeta() diff --git a/libraries/libstratosphere/include/stratosphere/sf/impl/sf_impl_service_object_macros.hpp b/libraries/libstratosphere/include/stratosphere/sf/impl/sf_impl_service_object_macros.hpp new file mode 100644 index 000000000..fa1b2feb3 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/sf/impl/sf_impl_service_object_macros.hpp @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include + +namespace ams::sf::impl { + + template + concept ServiceCommandResult = std::same_as<::ams::Result, T> || std::same_as; + + template + concept Invokable = requires (Arguments &&... args) { + { F(std::forward(args)...) }; + }; + + struct FunctionTraits { + public: + template + static std::tuple GetArgsImpl(R(A...)); + }; + + template + using FunctionArgsType = decltype(FunctionTraits::GetArgsImpl(F)); + + template + struct ClassFunctionPointerHelper; + + template + struct ClassFunctionPointerHelper> { + using Type = Return (*)(Class *, Arguments &&...); + }; + + template + using ClassFunctionPointer = typename ClassFunctionPointerHelper::Type; + + template + struct TypeTag{}; + + #define AMS_SF_IMPL_HELPER_FUNCTION_NAME_IMPL(CLASSNAME, FUNCNAME, SUFFIX) \ + __ams_sf_impl_helper_##CLASSNAME##_##FUNCNAME##_##SUFFIX + + #define AMS_SF_IMPL_HELPER_FUNCTION_NAME(CLASSNAME, FUNCNAME) \ + AMS_SF_IMPL_HELPER_FUNCTION_NAME_IMPL(CLASSNAME, FUNCNAME, intf) + + #define AMS_SF_IMPL_HELPER_FUNCTION_ARGS(CLASSNAME, FUNCNAME) \ + ::ams::sf::impl::FunctionArgsType + + #define AMS_SF_IMPL_CONCEPT_HELPER_FUNCTION_NAME(CLASSNAME, FUNCNAME) \ + AMS_SF_IMPL_HELPER_FUNCTION_NAME_IMPL(CLASSNAME, FUNCNAME, intf_for_concept) + + #define AMS_SF_IMPL_DECLARE_HELPER_FUNCTIONS(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + static void AMS_SF_IMPL_HELPER_FUNCTION_NAME(CLASSNAME, NAME) ARGS { __builtin_unreachable(); } \ + template \ + requires std::same_as, AMS_SF_IMPL_HELPER_FUNCTION_ARGS(CLASSNAME, NAME)> \ + static auto AMS_SF_IMPL_CONCEPT_HELPER_FUNCTION_NAME(CLASSNAME, NAME) (T &t, std::tuple a) { \ + return [&](std::index_sequence) { \ + return t.NAME(std::forward::type>(std::get(a))...); \ + }(std::make_index_sequence()); \ + } + + #define AMS_SF_IMPL_DECLARE_HELPERS(CLASSNAME, CMD_MACRO) \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DECLARE_HELPER_FUNCTIONS) + + #define AMS_SF_IMPL_DECLARE_CONCEPT_REQUIREMENT(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + { AMS_SF_IMPL_CONCEPT_HELPER_FUNCTION_NAME(CLASSNAME, NAME) (impl, std::declval()) } -> ::ams::sf::impl::ServiceCommandResult; + + #define AMS_SF_IMPL_DEFINE_CONCEPT(CLASSNAME, CMD_MACRO) \ + template \ + concept Is##CLASSNAME = requires (Impl &impl) { \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DECLARE_CONCEPT_REQUIREMENT) \ + }; + + #define AMS_SF_IMPL_FUNCTION_POINTER_TYPE(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + ::ams::sf::impl::ClassFunctionPointer + + #define AMS_SF_IMPL_DECLARE_FUNCTION_POINTER(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + AMS_SF_IMPL_FUNCTION_POINTER_TYPE(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) NAME; + + #define AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + template \ + requires std::same_as, AMS_SF_IMPL_HELPER_FUNCTION_ARGS(CLASSNAME, NAME)> \ + RETURN Invoke##NAME##ByCommandTable (Arguments &&... args) { \ + return this->cmd_table->NAME(this, std::forward(args)...); \ + } \ + template \ + requires (::ams::sf::impl::Invokable && \ + std::same_as, AMS_SF_IMPL_HELPER_FUNCTION_ARGS(CLASSNAME, NAME)>) \ + ALWAYS_INLINE RETURN NAME (Arguments &&... args) { \ + return this->Invoke##NAME##ByCommandTable(std::forward(args)...); \ + } \ + template \ + requires (::ams::sf::impl::Invokable && \ + !std::same_as, AMS_SF_IMPL_HELPER_FUNCTION_ARGS(CLASSNAME, NAME)>) \ + ALWAYS_INLINE RETURN NAME (Arguments &&... args) { \ + return [this] ALWAYS_INLINE_LAMBDA (::ams::sf::impl::TypeTag>, PassedArguments &&...args_) -> RETURN { \ + return this->template NAME(std::forward(args_)...); \ + }(::ams::sf::impl::TypeTag{}, std::forward(args)...); \ + } + + #define AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION_INVOKER_HOLDER(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + template \ + requires std::same_as, AMS_SF_IMPL_HELPER_FUNCTION_ARGS(CLASSNAME, NAME)> \ + static RETURN NAME##Invoker (CLASSNAME *_this, Arguments &&... args) { \ + return static_cast(_this)->NAME(std::forward(args)...); \ + } + + #define AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION_INVOKER_POINTER(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + template \ + requires std::same_as, AMS_SF_IMPL_HELPER_FUNCTION_ARGS(CLASSNAME, NAME)> \ + static RETURN NAME##Invoker (CLASSNAME *_this, Arguments &&... args) { \ + return static_cast(_this)->NAME(std::forward(args)...); \ + } + + #define AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION_INVOKER_SHARED_POINTER(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + template \ + requires std::same_as, AMS_SF_IMPL_HELPER_FUNCTION_ARGS(CLASSNAME, NAME)> \ + static RETURN NAME##Invoker (CLASSNAME *_this, Arguments &&... args) { \ + return static_cast(_this)->NAME(std::forward(args)...); \ + } + + #define AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION_IMPL(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + template \ + requires (std::same_as, AMS_SF_IMPL_HELPER_FUNCTION_ARGS(CLASSNAME, NAME)> && \ + std::same_as()...))>) \ + RETURN NAME (Arguments &&... args) { \ + return this->impl.NAME(std::forward(args)...); \ + } + + #define AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION_IMPL_PTR(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + template \ + requires (std::same_as, AMS_SF_IMPL_HELPER_FUNCTION_ARGS(CLASSNAME, NAME)> && \ + std::same_asNAME(std::declval()...))>) \ + RETURN NAME (Arguments &&... args) { \ + return this->impl->NAME(std::forward(args)...); \ + } + + #define AMS_SF_IMPL_DEFINE_INTERFACE_IMPL_FUNCTION_POINTER_HOLDER(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + template struct NAME##FunctionPointerHolder; \ + \ + template \ + requires std::same_as, AMS_SF_IMPL_HELPER_FUNCTION_ARGS(CLASSNAME, NAME)> \ + struct NAME##FunctionPointerHolder> { \ + static constexpr auto Value = static_cast(&NAME##Invoker); \ + }; + + #define AMS_SF_IMPL_DEFINE_INTERFACE_SERVICE_COMMAND_META_HOLDER(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + template struct NAME##ServiceCommandMetaHolder; \ + \ + template \ + requires std::same_as, AMS_SF_IMPL_HELPER_FUNCTION_ARGS(CLASSNAME, NAME)> \ + struct NAME##ServiceCommandMetaHolder> { \ + static constexpr auto Value = ::ams::sf::impl::MakeServiceCommandMeta, RETURN, CLASSNAME, Arguments...>(); \ + }; + + #define AMS_SF_IMPL_DEFINE_INTERFACE_COMMAND_POINTER_TABLE_MEMBER(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + .NAME = NAME##FunctionPointerHolder::Value, + + #define AMS_SF_IMPL_DEFINE_CMIF_SERVICE_COMMAND_META_TABLE_ENTRY(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + NAME##ServiceCommandMetaHolder::Value, + + template + struct Print; + + #define AMS_SF_IMPL_DEFINE_CLASS(BASECLASS, CLASSNAME, CMD_MACRO) \ + class CLASSNAME : public BASECLASS { \ + NON_COPYABLE(CLASSNAME); \ + NON_MOVEABLE(CLASSNAME); \ + private: \ + struct CommandPointerTable { \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DECLARE_FUNCTION_POINTER) \ + }; \ + private: \ + const CommandPointerTable * const cmd_table; \ + private: \ + CLASSNAME() = delete; \ + protected: \ + constexpr CLASSNAME(const CommandPointerTable *ct) \ + : cmd_table(ct) { /* ... */ } \ + virtual ~CLASSNAME() { /* ... */ } \ + public: \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION) \ + private: \ + template \ + requires ((std::same_as && !std::same_as&& Is##CLASSNAME) && \ + (::ams::sf::IsMitmServiceObject == ::ams::sf::IsMitmServiceImpl)) \ + struct ImplGenerator { \ + public: \ + class ImplHolder : public S { \ + private: \ + T impl; \ + public: \ + template requires std::constructible_from \ + constexpr ImplHolder(Args &&... args) \ + : S(std::addressof(CommandPointerTableImpl)), impl(std::forward(args)...) \ + { \ + /* ... */ \ + } \ + ALWAYS_INLINE T &GetImpl() { return this->impl; } \ + ALWAYS_INLINE const T &GetImpl() const { return this->impl; } \ + \ + template requires ::ams::sf::IsMitmServiceObject && std::same_as \ + static ALWAYS_INLINE bool ShouldMitm(os::ProcessId p, ncm::ProgramId r) { return T::ShouldMitm(p, r); } \ + private: \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION_INVOKER_HOLDER) \ + public: \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION_IMPL) \ + private: \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DEFINE_INTERFACE_IMPL_FUNCTION_POINTER_HOLDER) \ + public: \ + static constexpr CommandPointerTable CommandPointerTableImpl = { \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DEFINE_INTERFACE_COMMAND_POINTER_TABLE_MEMBER) \ + }; \ + }; \ + static_assert(Is##CLASSNAME); \ + \ + class ImplPointer : public S { \ + private: \ + T *impl; \ + public: \ + constexpr ImplPointer(T *t) \ + : S(std::addressof(CommandPointerTableImpl)), impl(t) \ + { \ + /* ... */ \ + } \ + ALWAYS_INLINE T &GetImpl() { return *this->impl; } \ + ALWAYS_INLINE const T &GetImpl() const { return *this->impl; } \ + private: \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION_INVOKER_POINTER) \ + public: \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION_IMPL_PTR) \ + private: \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DEFINE_INTERFACE_IMPL_FUNCTION_POINTER_HOLDER) \ + public: \ + static constexpr CommandPointerTable CommandPointerTableImpl = { \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DEFINE_INTERFACE_COMMAND_POINTER_TABLE_MEMBER) \ + }; \ + }; \ + \ + class ImplSharedPointer : public S { \ + private: \ + std::shared_ptr impl; \ + public: \ + constexpr ImplSharedPointer(std::shared_ptr &&t) \ + : S(std::addressof(CommandPointerTableImpl)), impl(std::move(t)) \ + { \ + /* ... */ \ + } \ + ALWAYS_INLINE T &GetImpl() { return *this->impl; } \ + ALWAYS_INLINE const T &GetImpl() const { return *this->impl; } \ + private: \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION_INVOKER_SHARED_POINTER) \ + public: \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DECLARE_INTERFACE_FUNCTION_IMPL_PTR) \ + private: \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DEFINE_INTERFACE_IMPL_FUNCTION_POINTER_HOLDER) \ + public: \ + static constexpr CommandPointerTable CommandPointerTableImpl = { \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DEFINE_INTERFACE_COMMAND_POINTER_TABLE_MEMBER) \ + }; \ + }; \ + static_assert(Is##CLASSNAME); \ + }; \ + private: \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DEFINE_INTERFACE_SERVICE_COMMAND_META_HOLDER) \ + public: \ + template requires (!std::same_as&& Is##CLASSNAME) \ + using ImplHolder = typename ImplGenerator::ImplHolder; \ + \ + template requires (!std::same_as&& Is##CLASSNAME) \ + using ImplPointer = typename ImplGenerator::ImplPointer; \ + \ + template requires (!std::same_as&& Is##CLASSNAME && \ + std::derived_from>) \ + using ImplSharedPointer = typename ImplGenerator::ImplSharedPointer; \ + \ + AMS_SF_CMIF_IMPL_DEFINE_SERVICE_DISPATCH_TABLE { \ + CMD_MACRO(CLASSNAME, AMS_SF_IMPL_DEFINE_CMIF_SERVICE_COMMAND_META_TABLE_ENTRY) \ + }; \ + }; + + #define AMS_SF_METHOD_INFO_6(CLASSNAME, HANDLER, CMD_ID, RETURN, NAME, ARGS) \ + HANDLER(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, hos::Version_Min, hos::Version_Max) + + #define AMS_SF_METHOD_INFO_7(CLASSNAME, HANDLER, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN) \ + HANDLER(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, hos::Version_Max) + + #define AMS_SF_METHOD_INFO_8(CLASSNAME, HANDLER, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + HANDLER(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) + + #define AMS_SF_METHOD_INFO_X(_, _0, _1, _2, _3, _4, _5, _6, _7, FUNC, ...) FUNC + + #define AMS_SF_METHOD_INFO(...) \ + AMS_SF_METHOD_INFO_X(, ## __VA_ARGS__, AMS_SF_METHOD_INFO_8(__VA_ARGS__), AMS_SF_METHOD_INFO_7(__VA_ARGS__), AMS_SF_METHOD_INFO_6(__VA_ARGS__)) + + #define AMS_SF_DEFINE_INTERFACE(CLASSNAME, CMD_MACRO) \ + AMS_SF_IMPL_DECLARE_HELPERS(CLASSNAME,CMD_MACRO) \ + AMS_SF_IMPL_DEFINE_CONCEPT(CLASSNAME, CMD_MACRO) \ + AMS_SF_IMPL_DEFINE_CLASS( ::ams::sf::IServiceObject, CLASSNAME, CMD_MACRO) \ + static_assert(Is##CLASSNAME); + + #define AMS_SF_DEFINE_MITM_INTERFACE(CLASSNAME, CMD_MACRO) \ + AMS_SF_IMPL_DECLARE_HELPERS(CLASSNAME,CMD_MACRO) \ + AMS_SF_IMPL_DEFINE_CONCEPT(CLASSNAME, CMD_MACRO) \ + AMS_SF_IMPL_DEFINE_CLASS(::ams::sf::IMitmServiceObject, CLASSNAME, CMD_MACRO) \ + static_assert(Is##CLASSNAME); + + #define AMS_SF_IMPL_DECLARE_INTERFACE_METHODS(CLASSNAME, CMD_ID, RETURN, NAME, ARGS, VERSION_MIN, VERSION_MAX) \ + RETURN NAME ARGS; + + #define AMS_SF_DECLARE_INTERFACE_METHODS(CMD_MACRO) \ + CMD_MACRO(_, AMS_SF_IMPL_DECLARE_INTERFACE_METHODS) + +} \ No newline at end of file diff --git a/libraries/libstratosphere/include/stratosphere/sf/sf_buffer_tags.hpp b/libraries/libstratosphere/include/stratosphere/sf/sf_buffer_tags.hpp index 985a42775..4467e1776 100644 --- a/libraries/libstratosphere/include/stratosphere/sf/sf_buffer_tags.hpp +++ b/libraries/libstratosphere/include/stratosphere/sf/sf_buffer_tags.hpp @@ -13,7 +13,6 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ - #pragma once namespace ams::sf { diff --git a/libraries/libstratosphere/include/stratosphere/sf/sf_fs_inline_context.hpp b/libraries/libstratosphere/include/stratosphere/sf/sf_fs_inline_context.hpp index 376246dfc..11d0aac6d 100644 --- a/libraries/libstratosphere/include/stratosphere/sf/sf_fs_inline_context.hpp +++ b/libraries/libstratosphere/include/stratosphere/sf/sf_fs_inline_context.hpp @@ -17,9 +17,15 @@ #pragma once #include -namespace ams::sf { +namespace ams::os { - u8 GetFsInlineContext(); - u8 SetFsInlineContext(u8 ctx); + struct ThreadType; + +} + +namespace ams::sf { + + u8 GetFsInlineContext(os::ThreadType *thread); + u8 SetFsInlineContext(os::ThreadType *thread, u8 ctx); } diff --git a/libraries/libstratosphere/include/stratosphere/sf/sf_service_object.hpp b/libraries/libstratosphere/include/stratosphere/sf/sf_service_object.hpp index 70b93ec6f..bee400c81 100644 --- a/libraries/libstratosphere/include/stratosphere/sf/sf_service_object.hpp +++ b/libraries/libstratosphere/include/stratosphere/sf/sf_service_object.hpp @@ -25,38 +25,51 @@ namespace ams::sf { virtual ~IServiceObject() { /* ... */ } }; + template + concept IsServiceObject = std::derived_from; + class IMitmServiceObject : public IServiceObject { + public: + virtual ~IMitmServiceObject() { /* ... */ } + }; + + class MitmServiceImplBase { protected: std::shared_ptr<::Service> forward_service; sm::MitmProcessInfo client_info; public: - IMitmServiceObject(std::shared_ptr<::Service> &&s, const sm::MitmProcessInfo &c) : forward_service(std::move(s)), client_info(c) { /* ... */ } - - virtual ~IMitmServiceObject() { /* ... */ } - - static bool ShouldMitm(os::ProcessId process_id, ncm::ProgramId program_id); + MitmServiceImplBase(std::shared_ptr<::Service> &&s, const sm::MitmProcessInfo &c) : forward_service(std::move(s)), client_info(c) { /* ... */ } }; - /* Utility. */ - #define SF_MITM_SERVICE_OBJECT_CTOR(cls) cls(std::shared_ptr<::Service> &&s, const sm::MitmProcessInfo &c) : ::ams::sf::IMitmServiceObject(std::forward>(s), c) - template - struct ServiceObjectTraits { - static_assert(std::is_base_of::value, "ServiceObjectTraits requires ServiceObject"); + concept IsMitmServiceObject = IsServiceObject && std::derived_from; - static constexpr bool IsMitmServiceObject = std::is_base_of::value; - - struct SharedPointerHelper { - - static constexpr void EmptyDelete(T *) { /* Empty deleter, for fake shared pointer. */ } - - static constexpr std::shared_ptr GetEmptyDeleteSharedPointer(T *srv_obj) { - return std::shared_ptr(srv_obj, EmptyDelete); - } - - }; + template + concept IsMitmServiceImpl = requires (std::shared_ptr<::Service> &&s, const sm::MitmProcessInfo &c) { + { T(std::forward>(s), c) }; + { T::ShouldMitm(c) } -> std::same_as; }; + template + requires std::constructible_from + constexpr ALWAYS_INLINE std::shared_ptr> MakeShared(Arguments &&... args) { + return std::make_shared>(std::forward(args)...); + } + template + requires (std::constructible_from && std::derived_from>) + constexpr ALWAYS_INLINE std::shared_ptr> MakeShared(Arguments &&... args) { + return std::make_shared>(std::make_shared(std::forward(args)...)); + } + + template + constexpr ALWAYS_INLINE std::shared_ptr> GetSharedPointerTo(Impl *impl) { + return std::make_shared>(impl); + } + + template + constexpr ALWAYS_INLINE std::shared_ptr> GetSharedPointerTo(Impl &impl) { + return GetSharedPointerTo(std::addressof(impl)); + } } \ No newline at end of file diff --git a/libraries/libstratosphere/include/stratosphere/sm.hpp b/libraries/libstratosphere/include/stratosphere/sm.hpp index d5c4456d1..98e3f83ac 100644 --- a/libraries/libstratosphere/include/stratosphere/sm.hpp +++ b/libraries/libstratosphere/include/stratosphere/sm.hpp @@ -16,9 +16,13 @@ #pragma once -#include "sm/sm_types.hpp" -#include "sm/sm_api.hpp" -#include "sm/sm_mitm_api.hpp" -#include "sm/sm_scoped_holder.hpp" +#include +#include +#include +#include -#include "sm/sm_manager_api.hpp" +#include + +#include +#include +#include diff --git a/libraries/libstratosphere/include/stratosphere/sm/impl/sm_debug_monitor_interface.hpp b/libraries/libstratosphere/include/stratosphere/sm/impl/sm_debug_monitor_interface.hpp new file mode 100644 index 000000000..af79293c8 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/sm/impl/sm_debug_monitor_interface.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::sm::impl { + + #define AMS_SM_I_DEBUG_MONITOR_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 65000, Result, AtmosphereGetRecord, (sf::Out record, ServiceName service)) \ + AMS_SF_METHOD_INFO(C, H, 65001, void, AtmosphereListRecords, (const sf::OutArray &records, sf::Out out_count, u64 offset)) \ + AMS_SF_METHOD_INFO(C, H, 65002, void, AtmosphereGetRecordSize, (sf::Out record_size)) + + AMS_SF_DEFINE_INTERFACE(IDebugMonitorInterface, AMS_SM_I_DEBUG_MONITOR_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/sm/impl/sm_manager_interface.hpp b/libraries/libstratosphere/include/stratosphere/sm/impl/sm_manager_interface.hpp new file mode 100644 index 000000000..6037bafe0 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/sm/impl/sm_manager_interface.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::sm::impl { + + #define AMS_SM_I_MANAGER_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, RegisterProcess, (os::ProcessId process_id, const sf::InBuffer &acid_sac, const sf::InBuffer &aci_sac)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, UnregisterProcess, (os::ProcessId process_id)) \ + AMS_SF_METHOD_INFO(C, H, 65000, void, AtmosphereEndInitDefers, ()) \ + AMS_SF_METHOD_INFO(C, H, 65001, void, AtmosphereHasMitm, (sf::Out out, ServiceName service)) \ + AMS_SF_METHOD_INFO(C, H, 65002, Result, AtmosphereRegisterProcess, (os::ProcessId process_id, ncm::ProgramId program_id, cfg::OverrideStatus override_status, const sf::InBuffer &acid_sac, const sf::InBuffer &aci_sac)) + + AMS_SF_DEFINE_INTERFACE(IManagerInterface, AMS_SM_I_MANAGER_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/sm/impl/sm_user_interface.hpp b/libraries/libstratosphere/include/stratosphere/sm/impl/sm_user_interface.hpp new file mode 100644 index 000000000..843162857 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/sm/impl/sm_user_interface.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::sm::impl { + + #define AMS_SM_I_USER_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, RegisterClient, (const sf::ClientProcessId &client_process_id)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, GetServiceHandle, (sf::OutMoveHandle out_h, ServiceName service)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, RegisterService, (sf::OutMoveHandle out_h, ServiceName service, u32 max_sessions, bool is_light)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, UnregisterService, (ServiceName service)) \ + AMS_SF_METHOD_INFO(C, H, 65000, Result, AtmosphereInstallMitm, (sf::OutMoveHandle srv_h, sf::OutMoveHandle qry_h, ServiceName service)) \ + AMS_SF_METHOD_INFO(C, H, 65001, Result, AtmosphereUninstallMitm, (ServiceName service)) \ + AMS_SF_METHOD_INFO(C, H, 65003, Result, AtmosphereAcknowledgeMitmSession, (sf::Out client_info, sf::OutMoveHandle fwd_h, ServiceName service)) \ + AMS_SF_METHOD_INFO(C, H, 65004, Result, AtmosphereHasMitm, (sf::Out out, ServiceName service)) \ + AMS_SF_METHOD_INFO(C, H, 65005, Result, AtmosphereWaitMitm, (ServiceName service)) \ + AMS_SF_METHOD_INFO(C, H, 65006, Result, AtmosphereDeclareFutureMitm, (ServiceName service)) \ + AMS_SF_METHOD_INFO(C, H, 65100, Result, AtmosphereHasService, (sf::Out out, ServiceName service)) \ + AMS_SF_METHOD_INFO(C, H, 65101, Result, AtmosphereWaitService, (ServiceName service)) + + AMS_SF_DEFINE_INTERFACE(IUserInterface, AMS_SM_I_USER_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/spl.hpp b/libraries/libstratosphere/include/stratosphere/spl.hpp index d8b7518f6..e9ec412fc 100644 --- a/libraries/libstratosphere/include/stratosphere/spl.hpp +++ b/libraries/libstratosphere/include/stratosphere/spl.hpp @@ -16,6 +16,15 @@ #pragma once -#include "spl/spl_types.hpp" -#include "spl/spl_api.hpp" -#include "spl/smc/spl_smc.hpp" \ No newline at end of file +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/libraries/libstratosphere/include/stratosphere/spl/impl/spl_crypto_interface.hpp b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_crypto_interface.hpp new file mode 100644 index 000000000..d3e68b95c --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_crypto_interface.hpp @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include +#include + +namespace ams::spl::impl { + + #define AMS_SPL_I_CRYPTO_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SPL_I_GENERAL_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, GenerateAesKek, (sf::Out out_access_key, KeySource key_source, u32 generation, u32 option)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, LoadAesKey, (s32 keyslot, AccessKey access_key, KeySource key_source)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, GenerateAesKey, (sf::Out out_key, AccessKey access_key, KeySource key_source)) \ + AMS_SF_METHOD_INFO(C, H, 14, Result, DecryptAesKey, (sf::Out out_key, KeySource key_source, u32 generation, u32 option)) \ + AMS_SF_METHOD_INFO(C, H, 15, Result, ComputeCtr, (const sf::OutNonSecureBuffer &out_buf, s32 keyslot, const sf::InNonSecureBuffer &in_buf, IvCtr iv_ctr)) \ + AMS_SF_METHOD_INFO(C, H, 16, Result, ComputeCmac, (sf::Out out_cmac, s32 keyslot, const sf::InPointerBuffer &in_buf)) \ + AMS_SF_METHOD_INFO(C, H, 21, Result, AllocateAesKeySlot, (sf::Out out_keyslot)) \ + AMS_SF_METHOD_INFO(C, H, 22, Result, DeallocateAesKeySlot, (s32 keyslot)) \ + AMS_SF_METHOD_INFO(C, H, 23, Result, GetAesKeySlotAvailableEvent, (sf::OutCopyHandle out_hnd)) + + AMS_SF_DEFINE_INTERFACE(ICryptoInterface, AMS_SPL_I_CRYPTO_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/spl/impl/spl_deprecated_general_interface.hpp b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_deprecated_general_interface.hpp new file mode 100644 index 000000000..3c9eb09c5 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_deprecated_general_interface.hpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::spl::impl { + + #define AMS_SPL_I_DEPRECATED_GENERAL_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GetConfig, (sf::Out out, u32 which)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, ModularExponentiate, (const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &exp, const sf::InPointerBuffer &mod)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, GenerateAesKek, (sf::Out out_access_key, KeySource key_source, u32 generation, u32 option)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, LoadAesKey, (s32 keyslot, AccessKey access_key, KeySource key_source)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, GenerateAesKey, (sf::Out out_key, AccessKey access_key, KeySource key_source)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, SetConfig, (u32 which, u64 value)) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, GenerateRandomBytes, (const sf::OutPointerBuffer &out)) \ + AMS_SF_METHOD_INFO(C, H, 9, Result, DecryptAndStoreGcKey, (const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option)) \ + AMS_SF_METHOD_INFO(C, H, 10, Result, DecryptGcMessage, (sf::Out out_size, const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest)) \ + AMS_SF_METHOD_INFO(C, H, 11, Result, IsDevelopment, (sf::Out is_dev)) \ + AMS_SF_METHOD_INFO(C, H, 12, Result, GenerateSpecificAesKey, (sf::Out out_key, KeySource key_source, u32 generation, u32 which)) \ + AMS_SF_METHOD_INFO(C, H, 13, Result, DecryptDeviceUniqueData, (const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option)) \ + AMS_SF_METHOD_INFO(C, H, 14, Result, DecryptAesKey, (sf::Out out_key, KeySource key_source, u32 generation, u32 option)) \ + AMS_SF_METHOD_INFO(C, H, 15, Result, ComputeCtrDeprecated, (const sf::OutBuffer &out_buf, s32 keyslot, const sf::InBuffer &in_buf, IvCtr iv_ctr), hos::Version_1_0_0, hos::Version_1_0_0) \ + AMS_SF_METHOD_INFO(C, H, 15, Result, ComputeCtr, (const sf::OutNonSecureBuffer &out_buf, s32 keyslot, const sf::InNonSecureBuffer &in_buf, IvCtr iv_ctr), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 16, Result, ComputeCmac, (sf::Out out_cmac, s32 keyslot, const sf::InPointerBuffer &in_buf)) \ + AMS_SF_METHOD_INFO(C, H, 17, Result, LoadEsDeviceKey, (const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option)) \ + AMS_SF_METHOD_INFO(C, H, 18, Result, PrepareEsTitleKeyDeprecated, (sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest), hos::Version_1_0_0, hos::Version_2_3_0) \ + AMS_SF_METHOD_INFO(C, H, 18, Result, PrepareEsTitleKey, (sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 19, Result, LoadPreparedAesKey, (s32 keyslot, AccessKey access_key)) \ + AMS_SF_METHOD_INFO(C, H, 20, Result, PrepareCommonEsTitleKeyDeprecated, (sf::Out out_access_key, KeySource key_source), hos::Version_2_0_0, hos::Version_2_3_0) \ + AMS_SF_METHOD_INFO(C, H, 20, Result, PrepareCommonEsTitleKey, (sf::Out out_access_key, KeySource key_source, u32 generation), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 21, Result, AllocateAesKeySlot, (sf::Out out_keyslot)) \ + AMS_SF_METHOD_INFO(C, H, 22, Result, DeallocateAesKeySlot, (s32 keyslot)) \ + AMS_SF_METHOD_INFO(C, H, 23, Result, GetAesKeySlotAvailableEvent, (sf::OutCopyHandle out_hnd)) \ + AMS_SF_METHOD_INFO(C, H, 24, Result, SetBootReason, (spl::BootReasonValue boot_reason), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 25, Result, GetBootReason, (sf::Out out), hos::Version_3_0_0) + + AMS_SF_DEFINE_INTERFACE(IDeprecatedGeneralInterface, AMS_SPL_I_DEPRECATED_GENERAL_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/spl/impl/spl_device_unique_data_interface.hpp b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_device_unique_data_interface.hpp new file mode 100644 index 000000000..307f238ce --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_device_unique_data_interface.hpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include +#include + +namespace ams::spl::impl { + + #define AMS_SPL_I_DEVICE_UNIQUE_DATA_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SPL_I_CRYPTO_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 13, Result, DecryptDeviceUniqueDataDeprecated, (const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option), hos::Version_Min, hos::Version_4_1_0) \ + AMS_SF_METHOD_INFO(C, H, 13, Result, DecryptDeviceUniqueData, (const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source), hos::Version_5_0_0) + + AMS_SF_DEFINE_INTERFACE(IDeviceUniqueDataInterface, AMS_SPL_I_DEVICE_UNIQUE_DATA_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/spl/impl/spl_es_interface.hpp b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_es_interface.hpp new file mode 100644 index 000000000..e29ddc743 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_es_interface.hpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include +#include + +namespace ams::spl::impl { + + #define AMS_SPL_I_ES_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SPL_I_DEVICE_UNIQUE_DATA_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 17, Result, LoadEsDeviceKeyDeprecated, (const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option), hos::Version_Min, hos::Version_4_1_0) \ + AMS_SF_METHOD_INFO(C, H, 17, Result, LoadEsDeviceKey, (const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 18, Result, PrepareEsTitleKey, (sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation)) \ + AMS_SF_METHOD_INFO(C, H, 20, Result, PrepareCommonEsTitleKey, (sf::Out out_access_key, KeySource key_source, u32 generation), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 28, Result, DecryptAndStoreDrmDeviceCertKey, (const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 29, Result, ModularExponentiateWithDrmDeviceCertKey, (const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 31, Result, PrepareEsArchiveKey, (sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation), hos::Version_6_0_0) \ + AMS_SF_METHOD_INFO(C, H, 32, Result, LoadPreparedAesKey, (s32 keyslot, AccessKey access_key), hos::Version_6_0_0) + + AMS_SF_DEFINE_INTERFACE(IEsInterface, AMS_SPL_I_ES_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/spl/impl/spl_fs_interface.hpp b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_fs_interface.hpp new file mode 100644 index 000000000..d487280ec --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_fs_interface.hpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include +#include + +namespace ams::spl::impl { + + #define AMS_SPL_I_FS_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SPL_I_CRYPTO_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 9, Result, DecryptAndStoreGcKeyDeprecated, (const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option), hos::Version_Min, hos::Version_4_1_0) \ + AMS_SF_METHOD_INFO(C, H, 9, Result, DecryptAndStoreGcKey, (const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 10, Result, DecryptGcMessage, (sf::Out out_size, const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest)) \ + AMS_SF_METHOD_INFO(C, H, 12, Result, GenerateSpecificAesKey, (sf::Out out_key, KeySource key_source, u32 generation, u32 which)) \ + AMS_SF_METHOD_INFO(C, H, 19, Result, LoadPreparedAesKey, (s32 keyslot, AccessKey access_key)) \ + AMS_SF_METHOD_INFO(C, H, 31, Result, GetPackage2Hash, (const sf::OutPointerBuffer &dst), hos::Version_5_0_0) + + AMS_SF_DEFINE_INTERFACE(IFsInterface, AMS_SPL_I_FS_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/spl/impl/spl_general_interface.hpp b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_general_interface.hpp new file mode 100644 index 000000000..28f596b94 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_general_interface.hpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::spl::impl { + + #define AMS_SPL_I_GENERAL_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GetConfig, (sf::Out out, u32 which)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, ModularExponentiate, (const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &exp, const sf::InPointerBuffer &mod)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, SetConfig, (u32 which, u64 value)) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, GenerateRandomBytes, (const sf::OutPointerBuffer &out)) \ + AMS_SF_METHOD_INFO(C, H, 11, Result, IsDevelopment, (sf::Out is_dev)) \ + AMS_SF_METHOD_INFO(C, H, 24, Result, SetBootReason, (spl::BootReasonValue boot_reason), hos::Version_3_0_0) \ + AMS_SF_METHOD_INFO(C, H, 25, Result, GetBootReason, (sf::Out out), hos::Version_3_0_0) + + AMS_SF_DEFINE_INTERFACE(IGeneralInterface, AMS_SPL_I_GENERAL_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/spl/impl/spl_manu_interface.hpp b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_manu_interface.hpp new file mode 100644 index 000000000..5e9695dde --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_manu_interface.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include +#include + +namespace ams::spl::impl { + + #define AMS_SPL_I_MANU_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SPL_I_DEVICE_UNIQUE_DATA_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 30, Result, ReencryptDeviceUniqueData, (const sf::OutPointerBuffer &out, const sf::InPointerBuffer &src, AccessKey access_key_dec, KeySource source_dec, AccessKey access_key_enc, KeySource source_enc, u32 option)) \ + + AMS_SF_DEFINE_INTERFACE(IManuInterface, AMS_SPL_I_MANU_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/spl/impl/spl_random_interface.hpp b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_random_interface.hpp new file mode 100644 index 000000000..6181ccc30 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_random_interface.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include + +namespace ams::spl::impl { + + #define AMS_SPL_I_RANDOM_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GenerateRandomBytes, (const sf::OutBuffer &out)) + + AMS_SF_DEFINE_INTERFACE(IRandomInterface, AMS_SPL_I_RANDOM_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/spl/impl/spl_ssl_interface.hpp b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_ssl_interface.hpp new file mode 100644 index 000000000..3df104ef3 --- /dev/null +++ b/libraries/libstratosphere/include/stratosphere/spl/impl/spl_ssl_interface.hpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include +#include +#include +#include + +namespace ams::spl::impl { + + #define AMS_SPL_I_SSL_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SPL_I_DEVICE_UNIQUE_DATA_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 26, Result, DecryptAndStoreSslClientCertKey, (const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source), hos::Version_5_0_0) \ + AMS_SF_METHOD_INFO(C, H, 27, Result, ModularExponentiateWithSslClientCertKey, (const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod), hos::Version_5_0_0) + + AMS_SF_DEFINE_INTERFACE(ISslInterface, AMS_SPL_I_SSL_INTERFACE_INTERFACE_INFO) + +} diff --git a/libraries/libstratosphere/include/stratosphere/spl/smc/spl_smc.hpp b/libraries/libstratosphere/include/stratosphere/spl/smc/spl_smc.hpp index 3d83ac749..38a98d5df 100644 --- a/libraries/libstratosphere/include/stratosphere/spl/smc/spl_smc.hpp +++ b/libraries/libstratosphere/include/stratosphere/spl/smc/spl_smc.hpp @@ -20,54 +20,47 @@ namespace ams::spl::smc { /* Helpers for converting arguments. */ - inline u32 GetCryptAesMode(CipherMode mode, u32 keyslot) { + inline u32 GetComputeAesMode(CipherMode mode, u32 keyslot) { return static_cast((static_cast(mode) << 4) | (keyslot & 7)); } - inline u32 GetUnwrapEsKeyOption(EsKeyType type, u32 generation) { + inline u32 GetPrepareEsDeviceUniqueKeyOption(EsCommonKeyType type, u32 generation) { return static_cast((static_cast(type) << 6) | (generation & 0x3F)); } /* Functions. */ - Result SetConfig(SplConfigItem which, const u64 *value, size_t num_qwords); - Result GetConfig(u64 *out, size_t num_qwords, SplConfigItem which); - Result CheckStatus(Result *out, AsyncOperationKey op); - Result GetResult(Result *out, void *out_buf, size_t out_buf_size, AsyncOperationKey op); - Result ExpMod(AsyncOperationKey *out_op, const void *base, const void *exp, size_t exp_size, const void *mod); + Result SetConfig(spl::ConfigItem which, const u64 *value, size_t num_qwords); + Result GetConfig(u64 *out, size_t num_qwords, spl::ConfigItem which); + Result GetResult(Result *out, AsyncOperationKey op); + Result GetResultData(Result *out, void *out_buf, size_t out_buf_size, AsyncOperationKey op); + Result ModularExponentiate(AsyncOperationKey *out_op, const void *base, const void *exp, size_t exp_size, const void *mod); Result GenerateRandomBytes(void *out, size_t size); Result GenerateAesKek(AccessKey *out, const KeySource &source, u32 generation, u32 option); Result LoadAesKey(u32 keyslot, const AccessKey &access_key, const KeySource &source); - Result CryptAes(AsyncOperationKey *out_op, u32 mode, const IvCtr &iv_ctr, u32 dst_addr, u32 src_addr, size_t size); + Result ComputeAes(AsyncOperationKey *out_op, u32 mode, const IvCtr &iv_ctr, u32 dst_addr, u32 src_addr, size_t size); Result GenerateSpecificAesKey(AesKey *out_key, const KeySource &source, u32 generation, u32 which); Result ComputeCmac(Cmac *out_mac, u32 keyslot, const void *data, size_t size); - Result ReEncryptRsaPrivateKey(void *data, size_t size, const AccessKey &access_key_dec, const KeySource &source_dec, const AccessKey &access_key_enc, const KeySource &source_enc, u32 option); - Result DecryptOrImportRsaPrivateKey(void *data, size_t size, const AccessKey &access_key, const KeySource &source, DecryptOrImportMode mode); - Result SecureExpMod(AsyncOperationKey *out_op, const void *base, const void *mod, SecureExpModMode mode); - Result UnwrapTitleKey(AsyncOperationKey *out_op, const void *base, const void *mod, const void *label_digest, size_t label_digest_size, u32 option); - Result LoadTitleKey(u32 keyslot, const AccessKey &access_key); - Result UnwrapCommonTitleKey(AccessKey *out, const KeySource &source, u32 generation); + Result ReencryptDeviceUniqueData(void *data, size_t size, const AccessKey &access_key_dec, const KeySource &source_dec, const AccessKey &access_key_enc, const KeySource &source_enc, u32 option); + Result DecryptDeviceUniqueData(void *data, size_t size, const AccessKey &access_key, const KeySource &source, DeviceUniqueDataMode mode); + Result ModularExponentiateWithStorageKey(AsyncOperationKey *out_op, const void *base, const void *mod, ModularExponentiateWithStorageKeyMode mode); + Result PrepareEsDeviceUniqueKey(AsyncOperationKey *out_op, const void *base, const void *mod, const void *label_digest, size_t label_digest_size, u32 option); + Result LoadPreparedAesKey(u32 keyslot, const AccessKey &access_key); + Result PrepareCommonEsTitleKey(AccessKey *out, const KeySource &source, u32 generation); /* Deprecated functions. */ - Result ImportEsKey(const void *data, size_t size, const AccessKey &access_key, const KeySource &source, u32 option); - Result DecryptRsaPrivateKey(size_t *out_size, void *data, size_t size, const AccessKey &access_key, const KeySource &source, u32 option); - Result ImportSecureExpModKey(const void *data, size_t size, const AccessKey &access_key, const KeySource &source, u32 option); + Result LoadEsDeviceKey(const void *data, size_t size, const AccessKey &access_key, const KeySource &source, u32 option); + Result DecryptDeviceUniqueData(size_t *out_size, void *data, size_t size, const AccessKey &access_key, const KeySource &source, u32 option); + Result DecryptAndStoreGcKey(const void *data, size_t size, const AccessKey &access_key, const KeySource &source, u32 option); /* Atmosphere functions. */ Result AtmosphereCopyToIram(uintptr_t iram_dst, const void *dram_src, size_t size); Result AtmosphereCopyFromIram(void *dram_dst, uintptr_t iram_src, size_t size); Result AtmosphereReadWriteRegister(uint64_t address, uint32_t mask, uint32_t value, uint32_t *out_value); - Result AtmosphereWriteAddress(void *dst, const void *src, size_t size); Result AtmosphereGetEmummcConfig(void *out_config, void *out_paths, u32 storage_id); /* Helpers. */ - inline Result SetConfig(SplConfigItem which, const u64 value) { + inline Result SetConfig(spl::ConfigItem which, const u64 value) { return SetConfig(which, &value, 1); } - template - inline Result AtmosphereWriteAddress(void *dst, const T value) { - static_assert(std::is_integral::value && sizeof(T) <= 8 && (sizeof(T) & (sizeof(T) - 1)) == 0, "AtmosphereWriteAddress requires integral type."); - return AtmosphereWriteAddress(dst, &value, sizeof(T)); - } - } diff --git a/libraries/libstratosphere/include/stratosphere/spl/spl_types.hpp b/libraries/libstratosphere/include/stratosphere/spl/spl_types.hpp index b02bf1fd8..310fcebc9 100644 --- a/libraries/libstratosphere/include/stratosphere/spl/spl_types.hpp +++ b/libraries/libstratosphere/include/stratosphere/spl/spl_types.hpp @@ -22,35 +22,34 @@ namespace ams::spl { namespace smc { enum class FunctionId : u32 { - SetConfig = 0xC3000401, - GetConfig = 0xC3000002, - CheckStatus = 0xC3000003, - GetResult = 0xC3000404, - ExpMod = 0xC3000E05, - GenerateRandomBytes = 0xC3000006, - GenerateAesKek = 0xC3000007, - LoadAesKey = 0xC3000008, - CryptAes = 0xC3000009, - GenerateSpecificAesKey = 0xC300000A, - ComputeCmac = 0xC300040B, - ReEncryptRsaPrivateKey = 0xC300D60C, - DecryptOrImportRsaPrivateKey = 0xC300100D, + SetConfig = 0xC3000401, + GetConfig = 0xC3000002, + GetResult = 0xC3000003, + GetResultData = 0xC3000404, + ModularExponentiate = 0xC3000E05, + GenerateRandomBytes = 0xC3000006, + GenerateAesKek = 0xC3000007, + LoadAesKey = 0xC3000008, + ComputeAes = 0xC3000009, + GenerateSpecificAesKey = 0xC300000A, + ComputeCmac = 0xC300040B, + ReencryptDeviceUniqueData = 0xC300D60C, + DecryptDeviceUniqueData = 0xC300100D, - SecureExpMod = 0xC300060F, - UnwrapTitleKey = 0xC3000610, - LoadTitleKey = 0xC3000011, - UnwrapCommonTitleKey = 0xC3000012, + ModularExponentiateWithStorageKey = 0xC300060F, + PrepareEsDeviceUniqueKey = 0xC3000610, + LoadPreparedAesKey = 0xC3000011, + PrepareCommonEsTitleKey = 0xC3000012, /* Deprecated functions. */ - ImportEsKey = 0xC300100C, - DecryptRsaPrivateKey = 0xC300100D, - ImportSecureExpModKey = 0xC300100E, + LoadEsDeviceKey = 0xC300100C, + DecryptAndStoreGcKey = 0xC300100E, /* Atmosphere functions. */ - AtmosphereIramCopy = 0xF0000201, - AtmosphereReadWriteRegister = 0xF0000002, - AtmosphereWriteAddress = 0xF0000003, - AtmosphereGetEmummcConfig = 0xF0000404, + AtmosphereIramCopy = 0xF0000201, + AtmosphereReadWriteRegister = 0xF0000002, + + AtmosphereGetEmummcConfig = 0xF0000404, }; enum class Result { @@ -61,6 +60,7 @@ namespace ams::spl { NoAsyncOperation = 4, InvalidAsyncOperation = 5, NotPermitted = 6, + NotInitialized = 7, }; constexpr inline ::ams::Result ConvertResult(Result smc_result) { @@ -69,11 +69,10 @@ namespace ams::spl { /* Convert to the list of known SecureMonitorErrors. */ const auto converted = R_MAKE_NAMESPACE_RESULT(::ams::spl, static_cast(smc_result)); - if (spl::ResultSecureMonitorError::Includes(converted)) { - return converted; - } + R_UNLESS(spl::ResultSecureMonitorError::Includes(converted), spl::ResultUnknownSecureMonitorError()); - return spl::ResultUnknownSecureMonitorError(); + /* Return the error. */ + return converted; } enum class CipherMode { @@ -82,23 +81,23 @@ namespace ams::spl { Ctr = 2, }; - enum class DecryptOrImportMode { - DecryptRsaPrivateKey = 0, - ImportLotusKey = 1, - ImportEsKey = 2, - ImportSslKey = 3, - ImportDrmKey = 4, + enum class DeviceUniqueDataMode { + DecryptDeviceUniqueData = 0, + DecryptAndStoreGcKey = 1, + DecryptAndStoreEsDeviceKey = 2, + DecryptAndStoreSslKey = 3, + DecryptAndStoreDrmDeviceCertKey = 4, }; - enum class SecureExpModMode { - Lotus = 0, - Ssl = 1, - Drm = 2, + enum class ModularExponentiateWithStorageKeyMode { + Gc = 0, + Ssl = 1, + DrmDeviceCert = 2, }; - enum class EsKeyType { - TitleKey = 0, - ElicenseKey = 1, + enum class EsCommonKeyType { + TitleKey = 0, + ArchiveKey = 1, }; struct AsyncOperationKey { @@ -196,23 +195,23 @@ namespace ams::spl { enum class ConfigItem : u32 { /* Standard config items. */ - DisableProgramVerification = 1, - DramId = 2, - SecurityEngineIrqNumber = 3, - FuseVersion = 4, - HardwareType = 5, - HardwareState = 6, - IsRecoveryBoot = 7, - DeviceId = 8, - BootReason = 9, - MemoryMode = 10, - IsDevelopmentFunctionEnabled = 11, - KernelConfiguration = 12, - IsChargerHiZModeEnabled = 13, - IsQuest = 14, - RegulatorType = 15, - DeviceUniqueKeyGeneration = 16, - Package2Hash = 17, + DisableProgramVerification = 1, + DramId = 2, + SecurityEngineInterruptNumber = 3, + FuseVersion = 4, + HardwareType = 5, + HardwareState = 6, + IsRecoveryBoot = 7, + DeviceId = 8, + BootReason = 9, + MemoryMode = 10, + IsDevelopmentFunctionEnabled = 11, + KernelConfiguration = 12, + IsChargerHiZModeEnabled = 13, + QuestState = 14, + RegulatorType = 15, + DeviceUniqueKeyGeneration = 16, + Package2Hash = 17, /* Extension config items for exosphere. */ ExosphereApiVersion = 65000, @@ -222,6 +221,7 @@ namespace ams::spl { ExosphereHasRcmBugPatch = 65004, ExosphereBlankProdInfo = 65005, ExosphereAllowCalWrites = 65006, + ExosphereEmummcType = 65007, }; } @@ -234,3 +234,4 @@ constexpr inline SplConfigItem SplConfigItem_ExosphereGitCommitHash = static_ca constexpr inline SplConfigItem SplConfigItem_ExosphereHasRcmBugPatch = static_cast(65004); constexpr inline SplConfigItem SplConfigItem_ExosphereBlankProdInfo = static_cast(65005); constexpr inline SplConfigItem SplConfigItem_ExosphereAllowCalWrites = static_cast(65006); +constexpr inline SplConfigItem SplConfigItem_ExosphereEmummcType = static_cast(65007); diff --git a/libraries/libstratosphere/include/stratosphere/svc/svc_stratosphere_shims.hpp b/libraries/libstratosphere/include/stratosphere/svc/svc_stratosphere_shims.hpp index 56b7f8278..a98ba09e7 100644 --- a/libraries/libstratosphere/include/stratosphere/svc/svc_stratosphere_shims.hpp +++ b/libraries/libstratosphere/include/stratosphere/svc/svc_stratosphere_shims.hpp @@ -110,8 +110,8 @@ namespace ams::svc::aarch64::lp64 { return ::svcResetSignal(handle); } - ALWAYS_INLINE Result WaitSynchronization(int32_t *out_index, ::ams::svc::UserPointer handles, int32_t numHandles, int64_t timeout_ns) { - return ::svcWaitSynchronization(out_index, handles.GetPointerUnsafe(), numHandles, timeout_ns); + ALWAYS_INLINE Result WaitSynchronization(int32_t *out_index, ::ams::svc::UserPointer handles, int32_t num_handles, int64_t timeout_ns) { + return ::svcWaitSynchronization(out_index, handles.GetPointerUnsafe(), num_handles, timeout_ns); } ALWAYS_INLINE Result CancelSynchronization(::ams::svc::Handle handle) { diff --git a/libraries/libstratosphere/source/ams/ams_emummc_api.cpp b/libraries/libstratosphere/source/ams/ams_emummc_api.cpp index f1c1c8880..585cac264 100644 --- a/libraries/libstratosphere/source/ams/ams_emummc_api.cpp +++ b/libraries/libstratosphere/source/ams/ams_emummc_api.cpp @@ -86,22 +86,17 @@ namespace ams::emummc { const Storage storage = static_cast(g_exo_config.base_cfg.type); g_is_emummc = g_exo_config.base_cfg.magic == StorageMagic && storage != Storage_Emmc; - /* Format paths. Ignore string format warnings. */ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wformat-truncation" - { - if (storage == Storage_SdFile) { - std::snprintf(g_exo_config.file_cfg.path, sizeof(g_exo_config.file_cfg.path), "/%s", paths->file_path); - } - - std::snprintf(g_exo_config.emu_dir_path, sizeof(g_exo_config.emu_dir_path), "/%s", paths->nintendo_path); - - /* If we're emummc, implement default nintendo redirection path. */ - if (g_is_emummc && std::strcmp(g_exo_config.emu_dir_path, "/") == 0) { - std::snprintf(g_exo_config.emu_dir_path, sizeof(g_exo_config.emu_dir_path), "/emummc/Nintendo_%04x", g_exo_config.base_cfg.id); - } + /* Format paths. */ + if (storage == Storage_SdFile) { + std::snprintf(g_exo_config.file_cfg.path, sizeof(g_exo_config.file_cfg.path), "/%s", paths->file_path); + } + + std::snprintf(g_exo_config.emu_dir_path, sizeof(g_exo_config.emu_dir_path), "/%s", paths->nintendo_path); + + /* If we're emummc, implement default nintendo redirection path. */ + if (g_is_emummc && std::strcmp(g_exo_config.emu_dir_path, "/") == 0) { + std::snprintf(g_exo_config.emu_dir_path, sizeof(g_exo_config.emu_dir_path), "/emummc/Nintendo_%04x", g_exo_config.base_cfg.id); } - #pragma GCC diagnostic pop } g_has_cached = true; diff --git a/libraries/libstratosphere/source/ams/ams_environment.cpp b/libraries/libstratosphere/source/ams/ams_environment.cpp index 7679570a1..67e60b853 100644 --- a/libraries/libstratosphere/source/ams/ams_environment.cpp +++ b/libraries/libstratosphere/source/ams/ams_environment.cpp @@ -77,11 +77,26 @@ namespace ams { ams_ctx.afsr1 = ctx->afsr1; ams_ctx.far = ctx->far.x; ams_ctx.report_identifier = armGetSystemTick(); + + /* Detect stack overflow. */ + if (ams_ctx.error_desc == FatalErrorContext::DataAbortErrorDesc) { + svc::lp64::MemoryInfo mem_info; + svc::PageInfo page_info; + + if (/* Check if stack pointer is in guard page. */ + R_SUCCEEDED(svc::QueryMemory(std::addressof(mem_info), std::addressof(page_info), ams_ctx.sp)) && + mem_info.state == svc::MemoryState_Free && + /* Check if stack pointer fell off stack. */ + R_SUCCEEDED(svc::QueryMemory(std::addressof(mem_info), std::addressof(page_info), ams_ctx.sp + 0x1000)) && + mem_info.state == svc::MemoryState_Stack) { + ams_ctx.error_desc = FatalErrorContext::StackOverflowErrorDesc; + } + } /* Grab module base. */ { - MemoryInfo mem_info; - u32 page_info; - if (R_SUCCEEDED(svcQueryMemory(&mem_info, &page_info, GetPc()))) { + svc::lp64::MemoryInfo mem_info; + svc::PageInfo page_info; + if (R_SUCCEEDED(svc::QueryMemory(std::addressof(mem_info), std::addressof(page_info), GetPc()))) { ams_ctx.module_base = mem_info.addr; } else { ams_ctx.module_base = 0; @@ -97,9 +112,9 @@ namespace ams { /* Read a new frame. */ StackFrame cur_frame; - MemoryInfo mem_info; - u32 page_info; - if (R_SUCCEEDED(svcQueryMemory(&mem_info, &page_info, cur_fp)) && (mem_info.perm & Perm_R) == Perm_R) { + svc::lp64::MemoryInfo mem_info; + svc::PageInfo page_info; + if (R_SUCCEEDED(svc::QueryMemory(std::addressof(mem_info), std::addressof(page_info), cur_fp)) && (mem_info.perm & Perm_R) == Perm_R) { std::memcpy(&cur_frame, reinterpret_cast(cur_fp), sizeof(cur_frame)); } else { break; @@ -116,9 +131,9 @@ namespace ams { /* Grab up to 0x100 of stack. */ { - MemoryInfo mem_info; - u32 page_info; - if (R_SUCCEEDED(svcQueryMemory(&mem_info, &page_info, ams_ctx.sp)) && (mem_info.perm & Perm_R) == Perm_R) { + svc::lp64::MemoryInfo mem_info; + svc::PageInfo page_info; + if (R_SUCCEEDED(svc::QueryMemory(std::addressof(mem_info), std::addressof(page_info), ams_ctx.sp)) && (mem_info.perm & Perm_R) == Perm_R) { size_t copy_size = std::min(FatalErrorContext::MaxStackDumpSize, static_cast(mem_info.addr + mem_info.size - ams_ctx.sp)); ams_ctx.stack_dump_size = copy_size; std::memcpy(ams_ctx.stack_dump, reinterpret_cast(ams_ctx.sp), copy_size); diff --git a/libraries/libstratosphere/source/ams/ams_exosphere_api.cpp b/libraries/libstratosphere/source/ams/ams_exosphere_api.cpp index e964d3931..a8cf21b9a 100644 --- a/libraries/libstratosphere/source/ams/ams_exosphere_api.cpp +++ b/libraries/libstratosphere/source/ams/ams_exosphere_api.cpp @@ -21,7 +21,7 @@ namespace ams::exosphere { ApiInfo GetApiInfo() { u64 exosphere_cfg; - if (spl::smc::GetConfig(&exosphere_cfg, 1, SplConfigItem_ExosphereApiVersion) != spl::smc::Result::Success) { + if (spl::smc::GetConfig(&exosphere_cfg, 1, spl::ConfigItem::ExosphereApiVersion) != spl::smc::Result::Success) { R_ABORT_UNLESS(ResultNotPresent()); } @@ -29,15 +29,15 @@ namespace ams::exosphere { } void ForceRebootToRcm() { - R_ABORT_UNLESS(spl::smc::ConvertResult(spl::smc::SetConfig(SplConfigItem_ExosphereNeedsReboot, 1))); + R_ABORT_UNLESS(spl::smc::ConvertResult(spl::smc::SetConfig(spl::ConfigItem::ExosphereNeedsReboot, 1))); } void ForceRebootToIramPayload() { - R_ABORT_UNLESS(spl::smc::ConvertResult(spl::smc::SetConfig(SplConfigItem_ExosphereNeedsReboot, 2))); + R_ABORT_UNLESS(spl::smc::ConvertResult(spl::smc::SetConfig(spl::ConfigItem::ExosphereNeedsReboot, 2))); } void ForceShutdown() { - R_ABORT_UNLESS(spl::smc::ConvertResult(spl::smc::SetConfig(SplConfigItem_ExosphereNeedsShutdown, 1))); + R_ABORT_UNLESS(spl::smc::ConvertResult(spl::smc::SetConfig(spl::ConfigItem::ExosphereNeedsShutdown, 1))); } void CopyToIram(uintptr_t iram_dst, const void *dram_src, size_t size) { @@ -52,7 +52,7 @@ namespace ams::exosphere { inline u64 GetU64ConfigItem(spl::ConfigItem cfg) { u64 tmp; - R_ABORT_UNLESS(spl::smc::ConvertResult(spl::smc::GetConfig(std::addressof(tmp), 1, static_cast<::SplConfigItem>(cfg)))); + R_ABORT_UNLESS(spl::smc::ConvertResult(spl::smc::GetConfig(std::addressof(tmp), 1, cfg))); return tmp; } diff --git a/libraries/libstratosphere/source/boot2/boot2_api.cpp b/libraries/libstratosphere/source/boot2/boot2_api.cpp index b18b56529..c2c050f39 100644 --- a/libraries/libstratosphere/source/boot2/boot2_api.cpp +++ b/libraries/libstratosphere/source/boot2/boot2_api.cpp @@ -183,14 +183,16 @@ namespace ams::boot2 { return R_SUCCEEDED(gpioPadGetValue(&button, &val)) && val == GpioValue_Low; } + bool IsForceMaintenance() { + u8 force_maintenance = 1; + settings::fwdbg::GetSettingsItemValue(&force_maintenance, sizeof(force_maintenance), "boot", "force_maintenance"); + return force_maintenance != 0; + } + bool IsMaintenanceMode() { /* Contact set:sys, retrieve boot!force_maintenance. */ - { - u8 force_maintenance = 1; - settings::fwdbg::GetSettingsItemValue(&force_maintenance, sizeof(force_maintenance), "boot", "force_maintenance"); - if (force_maintenance != 0) { - return true; - } + if (IsForceMaintenance()) { + return true; } /* Contact GPIO, read plus/minus buttons. */ @@ -313,7 +315,38 @@ namespace ams::boot2 { R_ABORT_UNLESS(sm::mitm::WaitMitm(sm::ServiceName::Encode("fsp-srv"))); /* Launch programs required to mount the SD card. */ - LaunchList(PreSdCardLaunchPrograms, NumPreSdCardLaunchPrograms); + /* psc, bus, pcv (and usb on newer firmwares) is the minimal set of required programs. */ + /* bus depends on pcie, and pcv depends on settings. */ + { + /* Launch psc. */ + LaunchProgram(nullptr, ncm::ProgramLocation::Make(ncm::SystemProgramId::Psc, ncm::StorageId::BuiltInSystem), 0); + + /* Launch pcie. */ + LaunchProgram(nullptr, ncm::ProgramLocation::Make(ncm::SystemProgramId::Pcie, ncm::StorageId::BuiltInSystem), 0); + + /* Launch bus. */ + LaunchProgram(nullptr, ncm::ProgramLocation::Make(ncm::SystemProgramId::Bus, ncm::StorageId::BuiltInSystem), 0); + + /* Launch settings. */ + LaunchProgram(nullptr, ncm::ProgramLocation::Make(ncm::SystemProgramId::Settings, ncm::StorageId::BuiltInSystem), 0); + + /* NOTE: Here we work around a race condition in the boot process by ensuring that settings initializes its db. */ + { + /* Connect to set:sys. */ + sm::ScopedServiceHolder<::setsysInitialize, ::setsysExit> setsys_holder; + AMS_ABORT_UNLESS(setsys_holder); + + /* Retrieve setting from the database. */ + u8 force_maintenance = 0; + settings::fwdbg::GetSettingsItemValue(&force_maintenance, sizeof(force_maintenance), "boot", "force_maintenance"); + } + + /* Launch pcv. */ + LaunchProgram(nullptr, ncm::ProgramLocation::Make(ncm::SystemProgramId::Pcv, ncm::StorageId::BuiltInSystem), 0); + + /* Launch usb. */ + LaunchProgram(nullptr, ncm::ProgramLocation::Make(ncm::SystemProgramId::Usb, ncm::StorageId::BuiltInSystem), 0); + } /* Wait for the SD card required services to be ready. */ cfg::WaitSdCardRequiredServicesReady(); diff --git a/libraries/libstratosphere/source/capsrv/server/decodersrv/decodersrv_decoder_control_server_manager.cpp b/libraries/libstratosphere/source/capsrv/server/decodersrv/decodersrv_decoder_control_server_manager.cpp index 42553cfcb..b5e9a6232 100644 --- a/libraries/libstratosphere/source/capsrv/server/decodersrv/decodersrv_decoder_control_server_manager.cpp +++ b/libraries/libstratosphere/source/capsrv/server/decodersrv/decodersrv_decoder_control_server_manager.cpp @@ -24,7 +24,7 @@ namespace ams::capsrv::server { this->server_manager_holder.emplace(); /* Register the service. */ - R_ABORT_UNLESS(this->server_manager_holder->RegisterServer(ServiceName, MaxSessions, sf::ServiceObjectTraits::SharedPointerHelper::GetEmptyDeleteSharedPointer(std::addressof(*this->service_holder)))); + R_ABORT_UNLESS((this->server_manager_holder->RegisterServer(ServiceName, MaxSessions, sf::GetSharedPointerTo(*this->service_holder)))); /* Initialize the idle event, we're idle initially. */ os::InitializeEvent(std::addressof(this->idle_event), true, os::EventClearMode_ManualClear); diff --git a/libraries/libstratosphere/source/capsrv/server/decodersrv/decodersrv_decoder_control_server_manager.hpp b/libraries/libstratosphere/source/capsrv/server/decodersrv/decodersrv_decoder_control_server_manager.hpp index d8db8697f..3d7fdbf17 100644 --- a/libraries/libstratosphere/source/capsrv/server/decodersrv/decodersrv_decoder_control_server_manager.hpp +++ b/libraries/libstratosphere/source/capsrv/server/decodersrv/decodersrv_decoder_control_server_manager.hpp @@ -26,6 +26,7 @@ namespace ams::capsrv::server { static constexpr inline size_t MaxSessions = 2; static constexpr inline sm::ServiceName ServiceName = sm::ServiceName::Encode("caps:dc"); + using Interface = IDecoderControlService; using Service = DecoderControlService; using ServerOptions = sf::hipc::DefaultServerManagerOptions; using ServerManager = sf::hipc::ServerManager; diff --git a/libraries/libstratosphere/source/capsrv/server/decodersrv/decodersrv_decoder_control_service.hpp b/libraries/libstratosphere/source/capsrv/server/decodersrv/decodersrv_decoder_control_service.hpp index 0a095883b..961018cc1 100644 --- a/libraries/libstratosphere/source/capsrv/server/decodersrv/decodersrv_decoder_control_service.hpp +++ b/libraries/libstratosphere/source/capsrv/server/decodersrv/decodersrv_decoder_control_service.hpp @@ -18,17 +18,15 @@ namespace ams::capsrv::server { - class DecoderControlService final : public sf::IServiceObject { - protected: - enum class CommandId { - DecodeJpeg = 3001, - }; + #define AMS_CAPSRV_DECODER_CONTROL_SERVICE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 3001, Result, DecodeJpeg, (const sf::OutNonSecureBuffer &out, const sf::InBuffer &in, u32 width, u32 height, const ScreenShotDecodeOption &option)) + + AMS_SF_DEFINE_INTERFACE(IDecoderControlService, AMS_CAPSRV_DECODER_CONTROL_SERVICE_INTERFACE_INFO) + + class DecoderControlService final { public: - /* Actual commands. */ - virtual Result DecodeJpeg(const sf::OutNonSecureBuffer &out, const sf::InBuffer &in, u32 width, u32 height, const ScreenShotDecodeOption &option); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(DecodeJpeg) - }; + Result DecodeJpeg(const sf::OutNonSecureBuffer &out, const sf::InBuffer &in, u32 width, u32 height, const ScreenShotDecodeOption &option); }; + static_assert(IsIDecoderControlService); + } \ No newline at end of file diff --git a/libraries/libstratosphere/source/erpt/srv/erpt_srv_attachment_impl.hpp b/libraries/libstratosphere/source/erpt/srv/erpt_srv_attachment_impl.hpp index 8d0e65670..05aebe943 100644 --- a/libraries/libstratosphere/source/erpt/srv/erpt_srv_attachment_impl.hpp +++ b/libraries/libstratosphere/source/erpt/srv/erpt_srv_attachment_impl.hpp @@ -20,19 +20,20 @@ namespace ams::erpt::srv { class Attachment; - class AttachmentImpl final : public erpt::sf::IAttachment { + class AttachmentImpl final { private: Attachment *attachment; public: AttachmentImpl(); ~AttachmentImpl(); public: - virtual Result Open(const AttachmentId &attachment_id) override final; - virtual Result Read(ams::sf::Out out_count, const ams::sf::OutBuffer &out_buffer) override final; - virtual Result SetFlags(AttachmentFlagSet flags) override final; - virtual Result GetFlags(ams::sf::Out out) override final; - virtual Result Close() override final; - virtual Result GetSize(ams::sf::Out out) override final; + Result Open(const AttachmentId &attachment_id); + Result Read(ams::sf::Out out_count, const ams::sf::OutBuffer &out_buffer); + Result SetFlags(AttachmentFlagSet flags); + Result GetFlags(ams::sf::Out out); + Result Close(); + Result GetSize(ams::sf::Out out); }; + static_assert(erpt::sf::IsIAttachment); } diff --git a/libraries/libstratosphere/source/erpt/srv/erpt_srv_context_impl.hpp b/libraries/libstratosphere/source/erpt/srv/erpt_srv_context_impl.hpp index 662f02022..f2660638a 100644 --- a/libraries/libstratosphere/source/erpt/srv/erpt_srv_context_impl.hpp +++ b/libraries/libstratosphere/source/erpt/srv/erpt_srv_context_impl.hpp @@ -18,19 +18,20 @@ namespace ams::erpt::srv { - class ContextImpl final : public erpt::sf::IContext { + class ContextImpl final { public: - virtual Result SubmitContext(const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer) override final; - virtual Result CreateReport(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &meta_buffer) override final; - virtual Result SetInitialLaunchSettingsCompletionTime(const time::SteadyClockTimePoint &time_point) override final; - virtual Result ClearInitialLaunchSettingsCompletionTime() override final; - virtual Result UpdatePowerOnTime() override final; - virtual Result UpdateAwakeTime() override final; - virtual Result SubmitMultipleCategoryContext(const MultipleCategoryContextEntry &ctx_entry, const ams::sf::InBuffer &str_buffer) override final; - virtual Result UpdateApplicationLaunchTime() override final; - virtual Result ClearApplicationLaunchTime() override final; - virtual Result SubmitAttachment(ams::sf::Out out, const ams::sf::InBuffer &attachment_name, const ams::sf::InBuffer &attachment_data) override final; - virtual Result CreateReportWithAttachments(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &attachment_ids_buffer) override final; + Result SubmitContext(const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer); + Result CreateReport(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &meta_buffer); + Result SetInitialLaunchSettingsCompletionTime(const time::SteadyClockTimePoint &time_point); + Result ClearInitialLaunchSettingsCompletionTime(); + Result UpdatePowerOnTime(); + Result UpdateAwakeTime(); + Result SubmitMultipleCategoryContext(const MultipleCategoryContextEntry &ctx_entry, const ams::sf::InBuffer &str_buffer); + Result UpdateApplicationLaunchTime(); + Result ClearApplicationLaunchTime(); + Result SubmitAttachment(ams::sf::Out out, const ams::sf::InBuffer &attachment_name, const ams::sf::InBuffer &attachment_data); + Result CreateReportWithAttachments(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &attachment_ids_buffer); }; + static_assert(erpt::sf::IsIContext); } diff --git a/libraries/libstratosphere/source/erpt/srv/erpt_srv_journal_for_reports.cpp b/libraries/libstratosphere/source/erpt/srv/erpt_srv_journal_for_reports.cpp index 5db553eb2..f32c4f10f 100644 --- a/libraries/libstratosphere/source/erpt/srv/erpt_srv_journal_for_reports.cpp +++ b/libraries/libstratosphere/source/erpt/srv/erpt_srv_journal_for_reports.cpp @@ -98,7 +98,7 @@ namespace ams::erpt::srv { } s64 JournalForReports::GetMaxReportSize() { - s64 max_size; + s64 max_size = 0; for (auto it = s_record_list.begin(); it != s_record_list.end(); it++) { max_size = std::max(max_size, it->info.report_size); } diff --git a/libraries/libstratosphere/source/erpt/srv/erpt_srv_manager_impl.hpp b/libraries/libstratosphere/source/erpt/srv/erpt_srv_manager_impl.hpp index b75d01955..e59dddb51 100644 --- a/libraries/libstratosphere/source/erpt/srv/erpt_srv_manager_impl.hpp +++ b/libraries/libstratosphere/source/erpt/srv/erpt_srv_manager_impl.hpp @@ -18,7 +18,7 @@ namespace ams::erpt::srv { - class ManagerImpl final : public erpt::sf::IManager, public util::IntrusiveListBaseNode { + class ManagerImpl final : public util::IntrusiveListBaseNode { private: os::SystemEvent system_event; public: @@ -29,12 +29,13 @@ namespace ams::erpt::srv { public: static Result NotifyAll(); public: - virtual Result GetReportList(const ams::sf::OutBuffer &out_list, ReportType type_filter) override final; - virtual Result GetEvent(ams::sf::OutCopyHandle out) override final; - virtual Result CleanupReports() override final; - virtual Result DeleteReport(const ReportId &report_id) override final; - virtual Result GetStorageUsageStatistics(ams::sf::Out out) override final; - virtual Result GetAttachmentList(const ams::sf::OutBuffer &out_buf, const ReportId &report_id) override final; + Result GetReportList(const ams::sf::OutBuffer &out_list, ReportType type_filter); + Result GetEvent(ams::sf::OutCopyHandle out); + Result CleanupReports(); + Result DeleteReport(const ReportId &report_id); + Result GetStorageUsageStatistics(ams::sf::Out out); + Result GetAttachmentList(const ams::sf::OutBuffer &out_buf, const ReportId &report_id); }; + static_assert(erpt::sf::IsIManager); } diff --git a/libraries/libstratosphere/source/erpt/srv/erpt_srv_report_impl.hpp b/libraries/libstratosphere/source/erpt/srv/erpt_srv_report_impl.hpp index 8b60e536a..c1adc2529 100644 --- a/libraries/libstratosphere/source/erpt/srv/erpt_srv_report_impl.hpp +++ b/libraries/libstratosphere/source/erpt/srv/erpt_srv_report_impl.hpp @@ -20,19 +20,20 @@ namespace ams::erpt::srv { class Report; - class ReportImpl final : public erpt::sf::IReport { + class ReportImpl final { private: Report *report; public: ReportImpl(); ~ReportImpl(); public: - virtual Result Open(const ReportId &report_id) override final; - virtual Result Read(ams::sf::Out out_count, const ams::sf::OutBuffer &out_buffer) override final; - virtual Result SetFlags(ReportFlagSet flags) override final; - virtual Result GetFlags(ams::sf::Out out) override final; - virtual Result Close() override final; - virtual Result GetSize(ams::sf::Out out) override final; + Result Open(const ReportId &report_id); + Result Read(ams::sf::Out out_count, const ams::sf::OutBuffer &out_buffer); + Result SetFlags(ReportFlagSet flags); + Result GetFlags(ams::sf::Out out); + Result Close(); + Result GetSize(ams::sf::Out out); }; + static_assert(erpt::sf::IsIReport); } diff --git a/libraries/libstratosphere/source/erpt/srv/erpt_srv_service.cpp b/libraries/libstratosphere/source/erpt/srv/erpt_srv_service.cpp index b6fd8725e..293a5358e 100644 --- a/libraries/libstratosphere/source/erpt/srv/erpt_srv_service.cpp +++ b/libraries/libstratosphere/source/erpt/srv/erpt_srv_service.cpp @@ -42,7 +42,7 @@ namespace ams::erpt::srv { class ErrorReportServiceManager : public ams::sf::hipc::ServerManager { private: os::ThreadType thread; - std::shared_ptr context_session_object; + std::shared_ptr context_session_object; private: static void ThreadFunction(void *_this) { reinterpret_cast(_this)->SetupAndLoopProcess(); @@ -51,14 +51,14 @@ namespace ams::erpt::srv { void SetupAndLoopProcess(); public: ErrorReportServiceManager(erpt::srv::ContextImpl *c) - : context_session_object(ams::sf::ServiceObjectTraits::SharedPointerHelper::GetEmptyDeleteSharedPointer(c)) + : context_session_object(ams::sf::GetSharedPointerTo(c)) { /* ... */ } Result Initialize() { - R_ABORT_UNLESS(this->RegisterServer(ErrorReportContextServiceName, ErrorReportContextSessions, this->context_session_object)); - R_ABORT_UNLESS(this->RegisterServer(ErrorReportReportServiceName, ErrorReportReportSessions)); + R_ABORT_UNLESS((this->RegisterServer(ErrorReportContextServiceName, ErrorReportContextSessions, this->context_session_object))); + R_ABORT_UNLESS((this->RegisterServer(ErrorReportReportServiceName, ErrorReportReportSessions))); this->ResumeProcessing(); @@ -117,7 +117,7 @@ namespace ams::erpt::srv { } } - erpt::srv::ContextImpl g_context_object; + constinit erpt::srv::ContextImpl g_context_object; ErrorReportServiceManager g_erpt_server_manager(std::addressof(g_context_object)); } diff --git a/libraries/libstratosphere/source/erpt/srv/erpt_srv_session_impl.cpp b/libraries/libstratosphere/source/erpt/srv/erpt_srv_session_impl.cpp index 3a0eda8b0..4aec973b6 100644 --- a/libraries/libstratosphere/source/erpt/srv/erpt_srv_session_impl.cpp +++ b/libraries/libstratosphere/source/erpt/srv/erpt_srv_session_impl.cpp @@ -21,34 +21,34 @@ namespace ams::erpt::srv { - Result SessionImpl::OpenReport(ams::sf::Out> out) { - /* Create an interface. */ - auto intf = std::shared_ptr(new (std::nothrow) ReportImpl); - R_UNLESS(intf != nullptr, erpt::ResultOutOfMemory()); + namespace { - /* Return it. */ - out.SetValue(std::move(intf)); - return ResultSuccess(); + template + ALWAYS_INLINE Result OpenInterface(ams::sf::Out> &out) { + /* Define holder type. */ + using Holder = typename Interface::ImplHolder; + + /* Create an interface holder. */ + auto intf = std::shared_ptr(new (std::nothrow) Holder); + R_UNLESS(intf != nullptr, erpt::ResultOutOfMemory()); + + /* Return it. */ + out.SetValue(std::move(intf)); + return ResultSuccess(); + } + + } + + Result SessionImpl::OpenReport(ams::sf::Out> out) { + return OpenInterface(out); } Result SessionImpl::OpenManager(ams::sf::Out> out) { - /* Create an interface. */ - auto intf = std::shared_ptr(new (std::nothrow) ManagerImpl); - R_UNLESS(intf != nullptr, erpt::ResultOutOfMemory()); - - /* Return it. */ - out.SetValue(std::move(intf)); - return ResultSuccess(); + return OpenInterface(out); } Result SessionImpl::OpenAttachment(ams::sf::Out> out) { - /* Create an interface. */ - auto intf = std::shared_ptr(new (std::nothrow) AttachmentImpl); - R_UNLESS(intf != nullptr, erpt::ResultOutOfMemory()); - - /* Return it. */ - out.SetValue(std::move(intf)); - return ResultSuccess(); + return OpenInterface(out); } } diff --git a/libraries/libstratosphere/source/erpt/srv/erpt_srv_session_impl.hpp b/libraries/libstratosphere/source/erpt/srv/erpt_srv_session_impl.hpp index 4dcc165b0..8d68c701d 100644 --- a/libraries/libstratosphere/source/erpt/srv/erpt_srv_session_impl.hpp +++ b/libraries/libstratosphere/source/erpt/srv/erpt_srv_session_impl.hpp @@ -18,11 +18,12 @@ namespace ams::erpt::srv { - class SessionImpl final : public erpt::sf::ISession { + class SessionImpl final { public: - virtual Result OpenReport(ams::sf::Out> out) override final; - virtual Result OpenManager(ams::sf::Out> out) override final; - virtual Result OpenAttachment(ams::sf::Out> out) override final; + Result OpenReport(ams::sf::Out> out); + Result OpenManager(ams::sf::Out> out); + Result OpenAttachment(ams::sf::Out> out); }; + static_assert(erpt::sf::IsISession); } diff --git a/libraries/libstratosphere/source/erpt/srv/erpt_srv_stream.cpp b/libraries/libstratosphere/source/erpt/srv/erpt_srv_stream.cpp index e67f0b623..83c6879d5 100644 --- a/libraries/libstratosphere/source/erpt/srv/erpt_srv_stream.cpp +++ b/libraries/libstratosphere/source/erpt/srv/erpt_srv_stream.cpp @@ -75,6 +75,7 @@ namespace ams::erpt::srv { auto file_guard = SCOPE_GUARD { if (mode == StreamMode_Write) { fs::CloseFile(this->file_handle); } }; std::strncpy(this->file_name, path, sizeof(this->file_name)); + this->file_name[sizeof(this->file_name) - 1] = '\x00'; this->buffer = reinterpret_cast(Allocate(buffer_size)); R_UNLESS(this->buffer != nullptr, erpt::ResultOutOfMemory()); diff --git a/libraries/libstratosphere/source/fs/fs_access_log.cpp b/libraries/libstratosphere/source/fs/fs_access_log.cpp new file mode 100644 index 000000000..7f06278a6 --- /dev/null +++ b/libraries/libstratosphere/source/fs/fs_access_log.cpp @@ -0,0 +1,535 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "fsa/fs_user_mount_table.hpp" +#include "fsa/fs_directory_accessor.hpp" +#include "fsa/fs_file_accessor.hpp" +#include "fsa/fs_filesystem_accessor.hpp" + +#define AMS_FS_IMPL_ACCESS_LOG_AMS_API_VERSION "ams_version: " STRINGIZE(ATMOSPHERE_RELEASE_VERSION_MAJOR) "." STRINGIZE(ATMOSPHERE_RELEASE_VERSION_MINOR) "." STRINGIZE(ATMOSPHERE_RELEASE_VERSION_MICRO) + +/* TODO: Other boards? */ +#define AMS_FS_IMPL_ACCESS_LOG_SPEC "spec: NX" + +namespace ams::fs { + + /* Forward declare priority getter. */ + fs::PriorityRaw GetPriorityRawOnCurrentThreadInternal(); + + namespace { + + constinit u32 g_global_access_log_mode = fs::AccessLogMode_None; + constinit u32 g_local_access_log_target = fs::impl::AccessLogTarget_None; + + constinit std::atomic_bool g_access_log_initialized = false; + constinit os::SdkMutex g_access_log_initialization_mutex; + + void SetLocalAccessLogImpl(bool enabled) { + if (enabled) { + g_local_access_log_target |= fs::impl::AccessLogTarget_Application; + } else { + g_local_access_log_target &= ~fs::impl::AccessLogTarget_Application; + } + } + + } + + Result GetGlobalAccessLogMode(u32 *out) { + /* Use libnx bindings. */ + return ::fsGetGlobalAccessLogMode(out); + } + + Result SetGlobalAccessLogMode(u32 mode) { + /* Use libnx bindings. */ + return ::fsSetGlobalAccessLogMode(mode); + } + + void SetLocalAccessLog(bool enabled) { + SetLocalAccessLogImpl(enabled); + } + + void SetLocalApplicationAccessLog(bool enabled) { + SetLocalAccessLogImpl(enabled); + } + + void SetLocalSystemAccessLogForDebug(bool enabled) { + #if defined(AMS_BUILD_FOR_DEBUGGING) + if (enabled) { + g_local_access_log_target |= (fs::impl::AccessLogTarget_Application | fs::impl::AccessLogTarget_System); + } else { + g_local_access_log_target &= ~(fs::impl::AccessLogTarget_Application | fs::impl::AccessLogTarget_System); + } + #endif + } + +} + +namespace ams::fs::impl { + + const char *IdString::ToValueString(int id) { + const int len = std::snprintf(this->buffer, sizeof(this->buffer), "%d", id); + AMS_ASSERT(static_cast(len) < sizeof(this->buffer)); + return this->buffer; + } + + template<> const char *IdString::ToString(fs::Priority id) { + switch (id) { + case fs::Priority_Realtime: return "Realtime"; + case fs::Priority_Normal: return "Normal"; + case fs::Priority_Low: return "Low"; + default: return ToValueString(static_cast(id)); + } + } + + template<> const char *IdString::ToString(fs::PriorityRaw id) { + switch (id) { + case fs::PriorityRaw_Realtime: return "Realtime"; + case fs::PriorityRaw_Normal: return "Normal"; + case fs::PriorityRaw_Low: return "Low"; + case fs::PriorityRaw_Background: return "Realtime"; + default: return ToValueString(static_cast(id)); + } + } + + template<> const char *IdString::ToString(fs::ContentStorageId id) { + switch (id) { + case fs::ContentStorageId::User: return "User"; + case fs::ContentStorageId::System: return "System"; + case fs::ContentStorageId::SdCard: return "SdCard"; + default: return ToValueString(static_cast(id)); + } + } + + template<> const char *IdString::ToString(fs::SaveDataSpaceId id) { + switch (id) { + case fs::SaveDataSpaceId::System: return "System"; + case fs::SaveDataSpaceId::User: return "User"; + case fs::SaveDataSpaceId::SdSystem: return "SdSystem"; + case fs::SaveDataSpaceId::ProperSystem: return "ProperSystem"; + default: return ToValueString(static_cast(id)); + } + } + + template<> const char *IdString::ToString(fs::ContentType id) { + switch (id) { + case fs::ContentType_Meta: return "Meta"; + case fs::ContentType_Control: return "Control"; + case fs::ContentType_Manual: return "Manual"; + case fs::ContentType_Logo: return "Logo"; + case fs::ContentType_Data: return "Data"; + default: return ToValueString(static_cast(id)); + } + } + + template<> const char *IdString::ToString(fs::BisPartitionId id) { + switch (id) { + case fs::BisPartitionId::BootPartition1Root: return "BootPartition1Root"; + case fs::BisPartitionId::BootPartition2Root: return "BootPartition2Root"; + case fs::BisPartitionId::UserDataRoot: return "UserDataRoot"; + case fs::BisPartitionId::BootConfigAndPackage2Part1: return "BootConfigAndPackage2Part1"; + case fs::BisPartitionId::BootConfigAndPackage2Part2: return "BootConfigAndPackage2Part2"; + case fs::BisPartitionId::BootConfigAndPackage2Part3: return "BootConfigAndPackage2Part3"; + case fs::BisPartitionId::BootConfigAndPackage2Part4: return "BootConfigAndPackage2Part4"; + case fs::BisPartitionId::BootConfigAndPackage2Part5: return "BootConfigAndPackage2Part5"; + case fs::BisPartitionId::BootConfigAndPackage2Part6: return "BootConfigAndPackage2Part6"; + case fs::BisPartitionId::CalibrationBinary: return "CalibrationBinary"; + case fs::BisPartitionId::CalibrationFile: return "CalibrationFile"; + case fs::BisPartitionId::SafeMode: return "SafeMode"; + case fs::BisPartitionId::User: return "User"; + case fs::BisPartitionId::System: return "System"; + case fs::BisPartitionId::SystemProperEncryption: return "SystemProperEncryption"; + case fs::BisPartitionId::SystemProperPartition: return "SystemProperPartition"; + default: return ToValueString(static_cast(id)); + } + } + + namespace { + + class AccessLogPrinterCallbackManager { + private: + AccessLogPrinterCallback callback; + public: + constexpr AccessLogPrinterCallbackManager() : callback(nullptr) { /* ... */ } + + constexpr bool IsRegisteredCallback() const { return this->callback != nullptr; } + + constexpr void RegisterCallback(AccessLogPrinterCallback c) { + AMS_ASSERT(this->callback == nullptr); + this->callback = c; + } + + constexpr int InvokeCallback(char *buf, size_t size) const { + AMS_ASSERT(this->callback != nullptr); + return this->callback(buf, size); + } + }; + + constinit AccessLogPrinterCallbackManager g_access_log_manager_printer_callback_manager; + + ALWAYS_INLINE AccessLogPrinterCallbackManager &GetStartAccessLogPrinterCallbackManager() { + return g_access_log_manager_printer_callback_manager; + } + + const char *GetPriorityRawName(fs::impl::IdString &id_string) { + return id_string.ToString(fs::GetPriorityRawOnCurrentThreadInternal()); + } + + Result OutputAccessLogToSdCardImpl(const char *log, size_t size) { + /* Use libnx bindings. */ + return ::fsOutputAccessLogToSdCard(log, size); + } + + void OutputAccessLogToSdCard(const char *format, std::va_list vl) { + if ((g_global_access_log_mode & AccessLogMode_SdCard) != 0) { + /* Create a buffer to hold the log's input string. */ + int log_buffer_size = 1_KB; + auto log_buffer = fs::impl::MakeUnique(log_buffer_size); + while (true) { + if (log_buffer == nullptr) { + return; + } + + const auto size = std::vsnprintf(log_buffer.get(), log_buffer_size, format, vl); + if (size < log_buffer_size) { + break; + } + + log_buffer_size = size + 1; + log_buffer = fs::impl::MakeUnique(log_buffer_size); + } + + /* Output. */ + OutputAccessLogToSdCardImpl(log_buffer.get(), log_buffer_size - 1); + } + } + + void OutputAccessLogImpl(const char *log, size_t size) { + if ((g_global_access_log_mode & AccessLogMode_Log) != 0) { + /* TODO: Support logging. */ + } else if ((g_global_access_log_mode & AccessLogMode_SdCard) != 0) { + OutputAccessLogToSdCardImpl(log, size - 1); + } + } + + void OutputAccessLog(Result result, const char *priority, os::Tick start, os::Tick end, const char *name, const void *handle, const char *format, std::va_list vl) { + /* Create a buffer to hold the log's input string. */ + int str_buffer_size = 1_KB; + auto str_buffer = fs::impl::MakeUnique(str_buffer_size); + while (true) { + if (str_buffer == nullptr) { + return; + } + + const auto size = std::vsnprintf(str_buffer.get(), str_buffer_size, format, vl); + if (size < str_buffer_size) { + break; + } + + str_buffer_size = size + 1; + str_buffer = fs::impl::MakeUnique(str_buffer_size); + } + + /* Create a buffer to hold the log. */ + int log_buffer_size = 0; + decltype(str_buffer) log_buffer; + { + /* Declare format string. */ + constexpr const char FormatString[] = "FS_ACCESS { " + "start: %9" PRId64 ", " + "end: %9" PRId64 ", " + "result: 0x%08" PRIX32 ", " + "handle: 0x%p, " + "priority: %s, " + "function: \"%s\"" + "%s" + " }\n"; + + /* Convert the timing to ms. */ + const s64 start_ms = start.ToTimeSpan().GetMilliSeconds(); + const s64 end_ms = end.ToTimeSpan().GetMilliSeconds(); + + /* Print the log. */ + int try_size = std::max(str_buffer_size + sizeof(FormatString) + 0x100, 1_KB); + while (true) { + log_buffer = fs::impl::MakeUnique(try_size); + if (log_buffer == nullptr) { + return; + } + + log_buffer_size = 1 + std::snprintf(log_buffer.get(), try_size, FormatString, start_ms, end_ms, result.GetValue(), handle, priority, name, str_buffer.get()); + if (log_buffer_size <= try_size) { + break; + } + try_size = log_buffer_size; + } + } + + OutputAccessLogImpl(log_buffer.get(), log_buffer_size); + } + + void GetProgramIndexFortAccessLog(u32 *out_index, u32 *out_count) { + if (hos::GetVersion() >= hos::Version_7_0_0) { + /* Use libnx bindings if available. */ + R_ABORT_UNLESS(::fsGetProgramIndexForAccessLog(out_index, out_count)); + } else { + /* Use hardcoded defaults. */ + *out_index = 0; + *out_count = 0; + } + } + + void OutputAccessLogStart() { + /* Get the program index. */ + u32 program_index = 0, program_count = 0; + GetProgramIndexFortAccessLog(std::addressof(program_index), std::addressof(program_count)); + + /* Print the log buffer. */ + if (program_count < 2) { + constexpr const char StartLog[] = "FS_ACCESS: { " + AMS_FS_IMPL_ACCESS_LOG_AMS_API_VERSION ", " + AMS_FS_IMPL_ACCESS_LOG_SPEC + " }\n"; + + OutputAccessLogImpl(StartLog, sizeof(StartLog)); + } else { + constexpr const char StartLog[] = "FS_ACCESS: { " + AMS_FS_IMPL_ACCESS_LOG_AMS_API_VERSION ", " + AMS_FS_IMPL_ACCESS_LOG_SPEC ", " + "program_index: %d" + " }\n"; + + char log_buffer[0x80]; + const int len = 1 + std::snprintf(log_buffer, sizeof(log_buffer), StartLog, static_cast(program_index)); + if (static_cast(len) <= sizeof(log_buffer)) { + OutputAccessLogImpl(log_buffer, len); + } + } + } + + [[maybe_unused]] void OutputAccessLogStartForSystem() { + constexpr const char StartLog[] = "FS_ACCESS: { " + AMS_FS_IMPL_ACCESS_LOG_AMS_API_VERSION ", " + AMS_FS_IMPL_ACCESS_LOG_SPEC ", " + "for_system: true" + " }\n"; + OutputAccessLogImpl(StartLog, sizeof(StartLog)); + } + + void OutputAccessLogStartGeneratedByCallback() { + /* Get the manager. */ + const auto &manager = GetStartAccessLogPrinterCallbackManager(); + if (manager.IsRegisteredCallback()) { + /* Invoke the callback. */ + char log_buffer[0x80]; + const int len = 1 + manager.InvokeCallback(log_buffer, sizeof(log_buffer)); + + /* Print, if we fit. */ + if (static_cast(len) <= sizeof(log_buffer)) { + OutputAccessLogImpl(log_buffer, len); + } + } + } + + } + + bool IsEnabledAccessLog(u32 target) { + /* If we don't need to log to the target, return false. */ + if ((g_local_access_log_target & target) == 0) { + return false; + } + + /* Ensure we've initialized. */ + if (!g_access_log_initialized) { + std::scoped_lock lk(g_access_log_initialization_mutex); + if (!g_access_log_initialized) { + + #if defined (AMS_BUILD_FOR_DEBUGGING) + if ((g_local_access_log_target & fs::impl::AccessLogTarget_System) != 0) + { + g_global_access_log_mode = AccessLogMode_Log; + OutputAccessLogStartForSystem(); + OutputAccessLogStartGeneratedByCallback(); + } + else + #endif + { + AMS_FS_R_ABORT_UNLESS(GetGlobalAccessLogMode(std::addressof(g_global_access_log_mode))); + if (g_global_access_log_mode != AccessLogMode_None) { + OutputAccessLogStart(); + OutputAccessLogStartGeneratedByCallback(); + } + } + + g_access_log_initialized = true; + } + } + + return g_global_access_log_mode != AccessLogMode_None; + } + + bool IsEnabledAccessLog() { + return IsEnabledAccessLog(fs::impl::AccessLogTarget_Application | fs::impl::AccessLogTarget_System); + } + + void RegisterStartAccessLogPrinterCallback(AccessLogPrinterCallback callback) { + GetStartAccessLogPrinterCallbackManager().RegisterCallback(callback); + } + + void OutputAccessLog(Result result, fs::Priority priority, os::Tick start, os::Tick end, const char *name, const void *handle, const char *fmt, ...) { + std::va_list vl; + va_start(vl, fmt); + OutputAccessLog(result, fs::impl::IdString().ToString(priority), start, end, name, handle, fmt, vl); + va_end(vl); + } + + void OutputAccessLog(Result result, fs::PriorityRaw priority_raw, os::Tick start, os::Tick end, const char *name, const void *handle, const char *fmt, ...){ + std::va_list vl; + va_start(vl, fmt); + OutputAccessLog(result, fs::impl::IdString().ToString(priority_raw), start, end, name, handle, fmt, vl); + va_end(vl); + } + + void OutputAccessLog(Result result, os::Tick start, os::Tick end, const char *name, fs::FileHandle handle, const char *fmt, ...) { + std::va_list vl; + va_start(vl, fmt); + fs::impl::IdString id_string; + OutputAccessLog(result, GetPriorityRawName(id_string), start, end, name, handle.handle, fmt, vl); + va_end(vl); + } + + void OutputAccessLog(Result result, os::Tick start, os::Tick end, const char *name, fs::DirectoryHandle handle, const char *fmt, ...) { + std::va_list vl; + va_start(vl, fmt); + fs::impl::IdString id_string; + OutputAccessLog(result, GetPriorityRawName(id_string), start, end, name, handle.handle, fmt, vl); + va_end(vl); + } + + void OutputAccessLog(Result result, os::Tick start, os::Tick end, const char *name, fs::impl::IdentifyAccessLogHandle handle, const char *fmt, ...) { + std::va_list vl; + va_start(vl, fmt); + fs::impl::IdString id_string; + OutputAccessLog(result, GetPriorityRawName(id_string), start, end, name, handle.handle, fmt, vl); + va_end(vl); + } + + void OutputAccessLog(Result result, os::Tick start, os::Tick end, const char *name, const void *handle, const char *fmt, ...) { + std::va_list vl; + va_start(vl, fmt); + fs::impl::IdString id_string; + OutputAccessLog(result, GetPriorityRawName(id_string), start, end, name, handle, fmt, vl); + va_end(vl); + } + + void OutputAccessLogToOnlySdCard(const char *fmt, ...) { + std::va_list vl; + va_start(vl, fmt); + OutputAccessLogToSdCard(fmt, vl); + va_end(vl); + } + + void OutputAccessLogUnlessResultSuccess(Result result, os::Tick start, os::Tick end, const char *name, fs::FileHandle handle, const char *fmt, ...) { + if (R_FAILED(result)) { + std::va_list vl; + va_start(vl, fmt); + fs::impl::IdString id_string; + OutputAccessLog(result, GetPriorityRawName(id_string), start, end, name, handle.handle, fmt, vl); + va_end(vl); + } + } + + void OutputAccessLogUnlessResultSuccess(Result result, os::Tick start, os::Tick end, const char *name, fs::DirectoryHandle handle, const char *fmt, ...) { + if (R_FAILED(result)) { + std::va_list vl; + va_start(vl, fmt); + fs::impl::IdString id_string; + OutputAccessLog(result, GetPriorityRawName(id_string), start, end, name, handle.handle, fmt, vl); + va_end(vl); + } + } + + void OutputAccessLogUnlessResultSuccess(Result result, os::Tick start, os::Tick end, const char *name, const void *handle, const char *fmt, ...) { + if (R_FAILED(result)) { + std::va_list vl; + va_start(vl, fmt); + fs::impl::IdString id_string; + OutputAccessLog(result, GetPriorityRawName(id_string), start, end, name, handle, fmt, vl); + va_end(vl); + } + } + + bool IsEnabledHandleAccessLog(fs::FileHandle handle) { + /* Get the file accessor. */ + impl::FileAccessor *accessor = reinterpret_cast(handle.handle); + if (accessor == nullptr) { + return true; + } + + /* Check the parent. */ + if (auto *parent = accessor->GetParent(); parent != nullptr) { + return parent->IsEnabledAccessLog(); + } else { + return false; + } + } + + bool IsEnabledHandleAccessLog(fs::DirectoryHandle handle) { + /* Get the file accessor. */ + impl::DirectoryAccessor *accessor = reinterpret_cast(handle.handle); + if (accessor == nullptr) { + return true; + } + + /* Check the parent. */ + if (auto *parent = accessor->GetParent(); parent != nullptr) { + return parent->IsEnabledAccessLog(); + } else { + return false; + } + } + + bool IsEnabledHandleAccessLog(fs::impl::IdentifyAccessLogHandle handle) { + return true; + } + + bool IsEnabledHandleAccessLog(const void *handle) { + if (handle == nullptr) { + return true; + } + + /* We should never receive non-null here. */ + AMS_ASSERT(handle == nullptr); + return false; + } + + bool IsEnabledFileSystemAccessorAccessLog(const char *mount_name) { + /* Get the accessor. */ + impl::FileSystemAccessor *accessor; + if (R_FAILED(impl::Find(std::addressof(accessor), mount_name))) { + return true; + } + + return accessor->IsEnabledAccessLog(); + } + + void EnableFileSystemAccessorAccessLog(const char *mount_name) { + /* Get the accessor. */ + impl::FileSystemAccessor *accessor; + AMS_FS_R_ABORT_UNLESS(impl::Find(std::addressof(accessor), mount_name)); + accessor->SetAccessLogEnabled(true); + } + +} diff --git a/libraries/libstratosphere/source/fs/fs_content_storage.cpp b/libraries/libstratosphere/source/fs/fs_content_storage.cpp index 5e778eb3c..3a0a7a717 100644 --- a/libraries/libstratosphere/source/fs/fs_content_storage.cpp +++ b/libraries/libstratosphere/source/fs/fs_content_storage.cpp @@ -60,22 +60,26 @@ namespace ams::fs { /* It can take some time for the system partition to be ready (if it's on the SD card). */ /* Thus, we will retry up to 10 times, waiting one second each time. */ - constexpr size_t MaxRetries = 10; - constexpr u64 RetryInterval = 1'000'000'000ul; + constexpr size_t MaxRetries = 10; + constexpr auto RetryInterval = TimeSpan::FromSeconds(1); /* Mount the content storage, use libnx bindings. */ ::FsFileSystem fs; for (size_t i = 0; i < MaxRetries; i++) { + /* Try to open the filesystem. */ R_TRY_CATCH(fsOpenContentStorageFileSystem(std::addressof(fs), static_cast<::FsContentStorageId>(id))) { R_CATCH(fs::ResultSystemPartitionNotReady) { if (i < MaxRetries - 1) { - /* TODO: os::SleepThread */ - svcSleepThread(RetryInterval); + os::SleepThread(RetryInterval); + continue; } else { return fs::ResultSystemPartitionNotReady(); } } } R_END_TRY_CATCH; + + /* The filesystem was opened successfully. */ + break; } /* Allocate a new filesystem wrapper. */ diff --git a/libraries/libstratosphere/source/fs/fs_context.cpp b/libraries/libstratosphere/source/fs/fs_context.cpp new file mode 100644 index 000000000..7e00d708b --- /dev/null +++ b/libraries/libstratosphere/source/fs/fs_context.cpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::fs { + + namespace { + + constinit bool g_auto_abort_enabled = true; + + /* NOTE: This generates a global constructor. */ + os::SdkThreadLocalStorage g_context_tls; + + } + + void SetEnabledAutoAbort(bool enabled) { + g_auto_abort_enabled = enabled; + } + + AbortSpecifier DefaultResultHandler(Result result) { + if (g_auto_abort_enabled) { + return AbortSpecifier::Default; + } else { + return AbortSpecifier::Return; + } + } + + AbortSpecifier AlwaysReturnResultHandler(Result result) { + return AbortSpecifier::Return; + } + + constinit FsContext g_default_context(DefaultResultHandler); + constinit FsContext g_always_return_context(AlwaysReturnResultHandler); + + void SetDefaultFsContextResultHandler(const ResultHandler handler) { + if (handler == nullptr) { + g_default_context.SetHandler(DefaultResultHandler); + } else { + g_default_context.SetHandler(handler); + } + } + + const FsContext *GetCurrentThreadFsContext() { + const FsContext *context = reinterpret_cast(g_context_tls.GetValue()); + + if (context == nullptr) { + context = std::addressof(g_default_context); + } + + return context; + } + + void SetCurrentThreadFsContext(const FsContext *context) { + g_context_tls.SetValue(reinterpret_cast(context)); + } + + ScopedAutoAbortDisabler::ScopedAutoAbortDisabler() : prev_context(GetCurrentThreadFsContext()) { + SetCurrentThreadFsContext(std::addressof(g_always_return_context)); + } + +} diff --git a/libraries/libstratosphere/source/fs/fs_priority.cpp b/libraries/libstratosphere/source/fs/fs_priority.cpp new file mode 100644 index 000000000..8c23df02a --- /dev/null +++ b/libraries/libstratosphere/source/fs/fs_priority.cpp @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::fs { + + namespace { + + constexpr bool IsValidPriority(fs::Priority priority) { + return priority == Priority_Low || priority == Priority_Normal || priority == Priority_Realtime; + } + + constexpr bool IsValidPriorityRaw(fs::PriorityRaw priority_raw) { + return priority_raw == PriorityRaw_Background || priority_raw == PriorityRaw_Low || priority_raw == PriorityRaw_Normal || priority_raw == PriorityRaw_Realtime; + } + + fs::PriorityRaw ConvertPriorityToPriorityRaw(fs::Priority priority) { + AMS_ASSERT(IsValidPriority(priority)); + + switch (priority) { + case Priority_Low: return PriorityRaw_Low; + case Priority_Normal: return PriorityRaw_Normal; + case Priority_Realtime: return PriorityRaw_Realtime; + AMS_UNREACHABLE_DEFAULT_CASE(); + } + } + + fs::Priority ConvertPriorityRawToPriority(fs::PriorityRaw priority_raw) { + AMS_ASSERT(IsValidPriorityRaw(priority_raw)); + + switch (priority_raw) { + case PriorityRaw_Background: return Priority_Low; + case PriorityRaw_Low: return Priority_Low; + case PriorityRaw_Normal: return Priority_Normal; + case PriorityRaw_Realtime: return Priority_Realtime; + AMS_UNREACHABLE_DEFAULT_CASE(); + } + } + + void UpdateTlsIoPriority(os::ThreadType *thread, u8 tls_io) { + sf::SetFsInlineContext(thread, (tls_io & impl::TlsIoPriorityMask) | (sf::GetFsInlineContext(thread) & ~impl::TlsIoPriorityMask)); + } + + Result GetPriorityRawImpl(fs::PriorityRaw *out, os::ThreadType *thread) { + /* Validate arguments. */ + R_UNLESS(thread != nullptr, fs::ResultNullptrArgument()); + + /* Get the raw priority. */ + PriorityRaw priority_raw; + R_TRY(impl::ConvertTlsIoPriorityToFsPriority(std::addressof(priority_raw), impl::GetTlsIoPriority(thread))); + + /* Set output. */ + *out = priority_raw; + return ResultSuccess(); + } + + Result GetPriorityImpl(fs::Priority *out, os::ThreadType *thread) { + /* Validate arguments. */ + R_UNLESS(thread != nullptr, fs::ResultNullptrArgument()); + + /* Get the raw priority. */ + PriorityRaw priority_raw; + R_TRY(impl::ConvertTlsIoPriorityToFsPriority(std::addressof(priority_raw), impl::GetTlsIoPriority(thread))); + + /* Set output. */ + *out = ConvertPriorityRawToPriority(priority_raw); + return ResultSuccess(); + } + + Result SetPriorityRawImpl(os::ThreadType *thread, fs::PriorityRaw priority_raw) { + /* Validate arguments. */ + R_UNLESS(thread != nullptr, fs::ResultNullptrArgument()); + R_UNLESS(IsValidPriorityRaw(priority_raw), fs::ResultInvalidArgument()); + + /* Convert to tls io. */ + u8 tls_io; + R_TRY(impl::ConvertFsPriorityToTlsIoPriority(std::addressof(tls_io), priority_raw)); + + /* Update the priority. */ + UpdateTlsIoPriority(thread, tls_io); + return ResultSuccess(); + } + + Result SetPriorityImpl(os::ThreadType *thread, fs::Priority priority) { + /* Validate arguments. */ + R_UNLESS(thread != nullptr, fs::ResultNullptrArgument()); + R_UNLESS(IsValidPriority(priority), fs::ResultInvalidArgument()); + + /* Convert to tls io. */ + u8 tls_io; + R_TRY(impl::ConvertFsPriorityToTlsIoPriority(std::addressof(tls_io), ConvertPriorityToPriorityRaw(priority))); + + /* Update the priority. */ + UpdateTlsIoPriority(thread, tls_io); + return ResultSuccess(); + } + + } + + Priority GetPriorityOnCurrentThread() { + fs::Priority priority; + AMS_FS_R_ABORT_UNLESS(AMS_FS_IMPL_ACCESS_LOG(GetPriorityImpl(std::addressof(priority), os::GetCurrentThread()), nullptr, AMS_FS_IMPL_ACCESS_LOG_FORMAT_NONE)); + return priority; + } + + Priority GetPriority(os::ThreadType *thread) { + fs::Priority priority; + AMS_FS_R_ABORT_UNLESS(AMS_FS_IMPL_ACCESS_LOG(GetPriorityImpl(std::addressof(priority), thread), nullptr, AMS_FS_IMPL_ACCESS_LOG_FORMAT_THREAD_ID, thread != nullptr ? os::GetThreadId(thread) : static_cast(0))); + return priority; + } + + PriorityRaw GetPriorityRawOnCurrentThread() { + fs::PriorityRaw priority_raw; + AMS_FS_R_ABORT_UNLESS(AMS_FS_IMPL_ACCESS_LOG(GetPriorityRawImpl(std::addressof(priority_raw), os::GetCurrentThread()), nullptr, AMS_FS_IMPL_ACCESS_LOG_FORMAT_NONE)); + return priority_raw; + } + + PriorityRaw GetPriorityRawOnCurrentThreadInternal() { + fs::PriorityRaw priority_raw; + R_ABORT_UNLESS(GetPriorityRawImpl(std::addressof(priority_raw), os::GetCurrentThread())); + return priority_raw; + + } + + PriorityRaw GetPriorityRaw(os::ThreadType *thread) { + fs::PriorityRaw priority_raw; + AMS_FS_R_ABORT_UNLESS(AMS_FS_IMPL_ACCESS_LOG(GetPriorityRawImpl(std::addressof(priority_raw), thread), nullptr, AMS_FS_IMPL_ACCESS_LOG_FORMAT_THREAD_ID, thread != nullptr ? os::GetThreadId(thread) : static_cast(0))); + return priority_raw; + } + + void SetPriorityOnCurrentThread(Priority priority) { + AMS_FS_R_ABORT_UNLESS(AMS_FS_IMPL_ACCESS_LOG(SetPriorityImpl(os::GetCurrentThread(), priority), nullptr, AMS_FS_IMPL_ACCESS_LOG_FORMAT_NONE)); + } + + void SetPriority(os::ThreadType *thread, Priority priority) { + AMS_FS_R_ABORT_UNLESS(AMS_FS_IMPL_ACCESS_LOG(SetPriorityImpl(os::GetCurrentThread(), priority), nullptr, AMS_FS_IMPL_ACCESS_LOG_FORMAT_THREAD_ID, thread != nullptr ? os::GetThreadId(thread) : static_cast(0))); + } + + void SetPriorityRawOnCurrentThread(PriorityRaw priority_raw) { + AMS_FS_R_ABORT_UNLESS(AMS_FS_IMPL_ACCESS_LOG(SetPriorityRawImpl(os::GetCurrentThread(), priority_raw), nullptr, AMS_FS_IMPL_ACCESS_LOG_FORMAT_NONE)); + } + + void SetPriorityRaw(os::ThreadType *thread, PriorityRaw priority_raw) { + AMS_FS_R_ABORT_UNLESS(AMS_FS_IMPL_ACCESS_LOG(SetPriorityRawImpl(os::GetCurrentThread(), priority_raw), nullptr, AMS_FS_IMPL_ACCESS_LOG_FORMAT_THREAD_ID, thread != nullptr ? os::GetThreadId(thread) : static_cast(0))); + } + +} diff --git a/libraries/libstratosphere/source/fs/fs_result_utils.cpp b/libraries/libstratosphere/source/fs/fs_result_utils.cpp new file mode 100644 index 000000000..c6e1b4f70 --- /dev/null +++ b/libraries/libstratosphere/source/fs/fs_result_utils.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::fs { + + namespace { + + constinit bool g_handled_by_application = false; + + } + + void SetResultHandledByApplication(bool application) { + g_handled_by_application = application; + } + + namespace impl { + + bool IsAbortNeeded(Result result) { + /* If the result succeeded, we never need to abort. */ + if (R_SUCCEEDED(result)) { + return false; + } + + /* Get the abort specifier from current context. */ + switch (GetCurrentThreadFsContext()->HandleResult(result)) { + case AbortSpecifier::Default: + if (g_handled_by_application) { + return !fs::ResultHandledByAllProcess::Includes(result); + } else { + return !(fs::ResultHandledByAllProcess::Includes(result) || fs::ResultHandledBySystemProcess::Includes(result)); + } + case AbortSpecifier::Abort: + return true; + case AbortSpecifier::Return: + return false; + AMS_UNREACHABLE_DEFAULT_CASE(); + } + } + + void LogResultErrorMessage(Result result) { + /* TODO: log specific results */ + } + + void LogErrorMessage(Result result, const char *function) { + /* If the result succeeded, there's nothing to log. */ + if (R_SUCCEEDED(result)) { + return; + } + + /* TODO: Actually log stuff. */ + } + + } + +} diff --git a/libraries/libstratosphere/source/fs/fs_sd_card.cpp b/libraries/libstratosphere/source/fs/fs_sd_card.cpp index 66c22c2c5..576116ef9 100644 --- a/libraries/libstratosphere/source/fs/fs_sd_card.cpp +++ b/libraries/libstratosphere/source/fs/fs_sd_card.cpp @@ -43,10 +43,9 @@ namespace ams::fs { } - Result MountSdCard(const char *name) { /* Validate the mount name. */ - R_TRY(impl::CheckMountName(name)); + R_TRY(impl::CheckMountNameAllowingReserved(name)); /* Open the SD card. This uses libnx bindings. */ FsFileSystem fs; diff --git a/libraries/libstratosphere/source/fs/fsa/fs_directory_accessor.hpp b/libraries/libstratosphere/source/fs/fsa/fs_directory_accessor.hpp index 77bc44af5..c47dc6699 100644 --- a/libraries/libstratosphere/source/fs/fsa/fs_directory_accessor.hpp +++ b/libraries/libstratosphere/source/fs/fsa/fs_directory_accessor.hpp @@ -32,7 +32,7 @@ namespace ams::fs::impl { Result Read(s64 *out_count, DirectoryEntry *out_entries, s64 max_entries); Result GetEntryCount(s64 *out); - FileSystemAccessor &GetParent() const { return this->parent; } + FileSystemAccessor *GetParent() const { return std::addressof(this->parent); } }; } diff --git a/libraries/libstratosphere/source/fs/fsa/fs_file_accessor.cpp b/libraries/libstratosphere/source/fs/fsa/fs_file_accessor.cpp index d620d2a53..d7e772a0f 100644 --- a/libraries/libstratosphere/source/fs/fsa/fs_file_accessor.cpp +++ b/libraries/libstratosphere/source/fs/fsa/fs_file_accessor.cpp @@ -29,8 +29,8 @@ namespace ams::fs::impl { FileAccessor::~FileAccessor() { /* Ensure that all files are flushed. */ - if (R_FAILED(this->write_result)) { - AMS_ABORT_UNLESS(this->write_state != WriteState::NeedsFlush); + if (R_SUCCEEDED(this->write_result)) { + AMS_FS_ABORT_UNLESS_WITH_RESULT(this->write_state != WriteState::NeedsFlush, fs::ResultNeedFlush()); } this->impl.reset(); @@ -40,6 +40,7 @@ namespace ams::fs::impl { } Result FileAccessor::ReadWithCacheAccessLog(size_t *out, s64 offset, void *buf, size_t size, const ReadOption &option, bool use_path_cache, bool use_data_cache) { + /* TODO */ AMS_ABORT(); } @@ -48,10 +49,12 @@ namespace ams::fs::impl { } Result FileAccessor::Read(size_t *out, s64 offset, void *buf, size_t size, const ReadOption &option) { - /* Fail after a write fails. */ - R_TRY(this->write_result); + /* Get a handle to this file for use in logging. */ + FileHandle handle = { this }; + + /* Fail after a write fails. */ + R_UNLESS(R_SUCCEEDED(this->write_result), AMS_FS_IMPL_ACCESS_LOG_WITH_NAME(this->write_result, handle, "ReadFile", AMS_FS_IMPL_ACCESS_LOG_FORMAT_READ_FILE(out, offset, size))); - /* TODO: Logging. */ /* TODO: Support cache. */ const bool use_path_cache = this->parent != nullptr && this->file_path_hash != nullptr; const bool use_data_cache = /* TODO */false && this->parent != nullptr && this->parent->IsFileDataCacheAttachable(); @@ -60,7 +63,7 @@ namespace ams::fs::impl { /* TODO */ return this->ReadWithCacheAccessLog(out, offset, buf, size, option, use_path_cache, use_data_cache); } else { - return this->ReadWithoutCacheAccessLog(out, offset, buf, size, option); + return AMS_FS_IMPL_ACCESS_LOG_WITH_NAME(this->ReadWithoutCacheAccessLog(out, offset, buf, size, option), handle, "ReadFile", AMS_FS_IMPL_ACCESS_LOG_FORMAT_READ_FILE(out, offset, size)); } } diff --git a/libraries/libstratosphere/source/fs/fsa/fs_filesystem_accessor.hpp b/libraries/libstratosphere/source/fs/fsa/fs_filesystem_accessor.hpp index 5302af4a3..856059c16 100644 --- a/libraries/libstratosphere/source/fs/fsa/fs_filesystem_accessor.hpp +++ b/libraries/libstratosphere/source/fs/fsa/fs_filesystem_accessor.hpp @@ -71,7 +71,7 @@ namespace ams::fs::impl { void SetPathBasedFileDataCacheAttachable(bool en) { this->path_cache_attachable = en; } void SetMultiCommitSupported(bool en) { this->multi_commit_supported = en; } - bool IsAccessLogEnabled() const { return this->access_log_enabled; } + bool IsEnabledAccessLog() const { return this->access_log_enabled; } bool IsFileDataCacheAttachable() const { return this->data_cache_attachable; } bool IsPathBasedFileDataCacheAttachable() const { return this->path_cache_attachable; } diff --git a/libraries/libstratosphere/source/fs/fsa/fs_mount_utils.cpp b/libraries/libstratosphere/source/fs/fsa/fs_mount_utils.cpp index 8f94eb750..79c226526 100644 --- a/libraries/libstratosphere/source/fs/fsa/fs_mount_utils.cpp +++ b/libraries/libstratosphere/source/fs/fsa/fs_mount_utils.cpp @@ -120,49 +120,49 @@ namespace ams::fs::impl { return impl::Find(out_accessor, mount_name.str); } + Result Unmount(const char *name) { + impl::FileSystemAccessor *accessor; + R_TRY(impl::Find(std::addressof(accessor), name)); + + if (accessor->IsFileDataCacheAttachable()) { + /* TODO: Data cache purge */ + } + + impl::Unregister(name); + return ResultSuccess(); + } + } namespace ams::fs { namespace { - Result UnmountImpl(const char *name) { - impl::FileSystemAccessor *accessor; - R_TRY(impl::Find(std::addressof(accessor), name)); - - if (accessor->IsFileDataCacheAttachable()) { - /* TODO: Data cache purge */ - } - - impl::Unregister(name); - return ResultSuccess(); - } - } Result ConvertToFsCommonPath(char *dst, size_t dst_size, const char *src) { /* Ensure neither argument is nullptr. */ - R_UNLESS(dst != nullptr, fs::ResultNullptrArgument()); - R_UNLESS(src != nullptr, fs::ResultNullptrArgument()); + AMS_FS_R_UNLESS(dst != nullptr, fs::ResultNullptrArgument()); + AMS_FS_R_UNLESS(src != nullptr, fs::ResultNullptrArgument()); /* Get the mount name and sub path for the path. */ MountName mount_name; const char *sub_path; - R_TRY(impl::GetMountNameAndSubPath(std::addressof(mount_name), std::addressof(sub_path), src)); + AMS_FS_R_TRY(impl::GetMountNameAndSubPath(std::addressof(mount_name), std::addressof(sub_path), src)); impl::FileSystemAccessor *accessor; - R_TRY(impl::Find(std::addressof(accessor), mount_name.str)); - R_TRY(accessor->GetCommonMountName(dst, dst_size)); + AMS_FS_R_TRY(impl::Find(std::addressof(accessor), mount_name.str)); + AMS_FS_R_TRY(accessor->GetCommonMountName(dst, dst_size)); const auto mount_name_len = strnlen(dst, dst_size); const auto common_path_len = std::snprintf(dst + mount_name_len, dst_size - mount_name_len, "%s", sub_path); - R_UNLESS(static_cast(common_path_len) < dst_size - mount_name_len, fs::ResultTooLongPath()); + AMS_FS_R_UNLESS(static_cast(common_path_len) < dst_size - mount_name_len, fs::ResultTooLongPath()); return ResultSuccess(); } void Unmount(const char *mount_name) { - R_ABORT_UNLESS(UnmountImpl(mount_name)); + AMS_FS_R_ABORT_UNLESS(AMS_FS_IMPL_ACCESS_LOG_UNMOUNT(impl::Unmount(mount_name), mount_name, AMS_FS_IMPL_ACCESS_LOG_FORMAT_MOUNT, mount_name)); } } diff --git a/libraries/libstratosphere/source/fs/fsa/fs_mount_utils.hpp b/libraries/libstratosphere/source/fs/fsa/fs_mount_utils.hpp index 1b174f5c0..997abdd62 100644 --- a/libraries/libstratosphere/source/fs/fsa/fs_mount_utils.hpp +++ b/libraries/libstratosphere/source/fs/fsa/fs_mount_utils.hpp @@ -24,6 +24,7 @@ namespace ams::fs::impl { bool IsWindowsDrive(const char *name); bool IsReservedMountName(const char *name); + bool IsValidMountName(const char *name); Result CheckMountName(const char *name); Result CheckMountNameAllowingReserved(const char *name); diff --git a/libraries/libstratosphere/source/fs/fsa/fs_user_directory.cpp b/libraries/libstratosphere/source/fs/fsa/fs_user_directory.cpp index e29957c60..5375e4d6a 100644 --- a/libraries/libstratosphere/source/fs/fsa/fs_user_directory.cpp +++ b/libraries/libstratosphere/source/fs/fsa/fs_user_directory.cpp @@ -28,15 +28,17 @@ namespace ams::fs { } Result ReadDirectory(s64 *out_count, DirectoryEntry *out_entries, DirectoryHandle handle, s64 max_entries) { - return Get(handle)->Read(out_count, out_entries, max_entries); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG(Get(handle)->Read(out_count, out_entries, max_entries), handle, AMS_FS_IMPL_ACCESS_LOG_FORMAT_READ_DIRECTORY(out_count, max_entries))); + return ResultSuccess(); } Result GetDirectoryEntryCount(s64 *out, DirectoryHandle handle) { - return Get(handle)->GetEntryCount(out); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG(Get(handle)->GetEntryCount(out), handle, AMS_FS_IMPL_ACCESS_LOG_FORMAT_GET_DIRECTORY_ENTRY_COUNT(out))); + return ResultSuccess(); } void CloseDirectory(DirectoryHandle handle) { - delete Get(handle); + AMS_FS_IMPL_ACCESS_LOG((delete Get(handle), ResultSuccess()), handle, AMS_FS_IMPL_ACCESS_LOG_FORMAT_NONE); } } diff --git a/libraries/libstratosphere/source/fs/fsa/fs_user_file.cpp b/libraries/libstratosphere/source/fs/fsa/fs_user_file.cpp index 7e2a816ca..6b33f0475 100644 --- a/libraries/libstratosphere/source/fs/fsa/fs_user_file.cpp +++ b/libraries/libstratosphere/source/fs/fsa/fs_user_file.cpp @@ -25,53 +25,70 @@ namespace ams::fs { return reinterpret_cast(handle.handle); } + Result ReadFileImpl(size_t *out, FileHandle handle, s64 offset, void *buffer, size_t size, const fs::ReadOption &option) { + R_TRY(Get(handle)->Read(out, offset, buffer, size, option)); + return ResultSuccess(); + } + } Result ReadFile(FileHandle handle, s64 offset, void *buffer, size_t size, const fs::ReadOption &option) { size_t read_size; - R_TRY(ReadFile(std::addressof(read_size), handle, offset, buffer, size, option)); - R_UNLESS(read_size == size, fs::ResultOutOfRange()); + AMS_FS_R_TRY(ReadFileImpl(std::addressof(read_size), handle, offset, buffer, size, option)); + AMS_FS_R_UNLESS(read_size == size, fs::ResultOutOfRange()); return ResultSuccess(); } Result ReadFile(FileHandle handle, s64 offset, void *buffer, size_t size) { - return ReadFile(handle, offset, buffer, size, ReadOption()); + size_t read_size; + AMS_FS_R_TRY(ReadFileImpl(std::addressof(read_size), handle, offset, buffer, size, ReadOption())); + AMS_FS_R_UNLESS(read_size == size, fs::ResultOutOfRange()); + return ResultSuccess(); } Result ReadFile(size_t *out, FileHandle handle, s64 offset, void *buffer, size_t size, const fs::ReadOption &option) { - return Get(handle)->Read(out, offset, buffer, size, option); + AMS_FS_R_TRY(ReadFileImpl(out, handle, offset, buffer, size, option)); + return ResultSuccess(); } Result ReadFile(size_t *out, FileHandle handle, s64 offset, void *buffer, size_t size) { - return ReadFile(out, handle, offset, buffer, size, ReadOption()); + AMS_FS_R_TRY(ReadFileImpl(out, handle, offset, buffer, size, ReadOption())); + return ResultSuccess(); } Result GetFileSize(s64 *out, FileHandle handle) { - return Get(handle)->GetSize(out); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG(Get(handle)->GetSize(out), handle, AMS_FS_IMPL_ACCESS_LOG_FORMAT_GET_FILE_SIZE(out))); + return ResultSuccess(); } Result FlushFile(FileHandle handle) { - return Get(handle)->Flush(); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG(Get(handle)->Flush(), handle, AMS_FS_IMPL_ACCESS_LOG_FORMAT_NONE)); + return ResultSuccess(); } Result WriteFile(FileHandle handle, s64 offset, const void *buffer, size_t size, const fs::WriteOption &option) { - return Get(handle)->Write(offset, buffer, size, option); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG(Get(handle)->Write(offset, buffer, size, option), handle, AMS_FS_IMPL_ACCESS_LOG_FORMAT_WRITE_FILE(option), offset, size)); + return ResultSuccess(); } Result SetFileSize(FileHandle handle, s64 size) { - return Get(handle)->SetSize(size); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG(Get(handle)->SetSize(size), handle, AMS_FS_IMPL_ACCESS_LOG_FORMAT_SIZE, size)); + return ResultSuccess(); } int GetFileOpenMode(FileHandle handle) { - return Get(handle)->GetOpenMode(); + const int mode = Get(handle)->GetOpenMode(); + AMS_FS_IMPL_ACCESS_LOG(ResultSuccess(), handle, AMS_FS_IMPL_ACCESS_LOG_FORMAT_OPEN_MODE, static_cast(mode)); + return mode; } void CloseFile(FileHandle handle) { - delete Get(handle); + AMS_FS_IMPL_ACCESS_LOG((delete Get(handle), ResultSuccess()), handle, AMS_FS_IMPL_ACCESS_LOG_FORMAT_NONE); } Result QueryRange(QueryRangeInfo *out, FileHandle handle, s64 offset, s64 size) { - return Get(handle)->OperateRange(out, sizeof(*out), OperationId::QueryRange, offset, size, nullptr, 0); + AMS_FS_R_TRY(Get(handle)->OperateRange(out, sizeof(*out), OperationId::QueryRange, offset, size, nullptr, 0)); + return ResultSuccess(); } } diff --git a/libraries/libstratosphere/source/fs/fsa/fs_user_filesystem.cpp b/libraries/libstratosphere/source/fs/fsa/fs_user_filesystem.cpp index b4c77cda7..6308ef9de 100644 --- a/libraries/libstratosphere/source/fs/fsa/fs_user_filesystem.cpp +++ b/libraries/libstratosphere/source/fs/fsa/fs_user_filesystem.cpp @@ -29,41 +29,46 @@ namespace ams::fs { Result CreateFile(const char* path, s64 size, int option) { impl::FileSystemAccessor *accessor; const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); - return accessor->CreateFile(sub_path, size, option); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(accessor->CreateFile(sub_path, size, option), nullptr, accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH_AND_SIZE, path, size)); + return ResultSuccess(); } Result DeleteFile(const char *path) { impl::FileSystemAccessor *accessor; const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); - return accessor->DeleteFile(sub_path); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(accessor->DeleteFile(sub_path), nullptr, accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); + return ResultSuccess(); } Result CreateDirectory(const char *path) { impl::FileSystemAccessor *accessor; const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); - return accessor->CreateDirectory(sub_path); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(accessor->CreateDirectory(sub_path), nullptr, accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); + return ResultSuccess(); } Result DeleteDirectory(const char *path) { impl::FileSystemAccessor *accessor; const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); - return accessor->DeleteDirectory(sub_path); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(accessor->DeleteDirectory(sub_path), nullptr, accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); + return ResultSuccess(); } Result DeleteDirectoryRecursively(const char *path) { impl::FileSystemAccessor *accessor; const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); - return accessor->DeleteDirectoryRecursively(sub_path); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(accessor->DeleteDirectoryRecursively(sub_path), nullptr, accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); + return ResultSuccess(); } Result RenameFile(const char *old_path, const char *new_path) { @@ -71,11 +76,17 @@ namespace ams::fs { impl::FileSystemAccessor *new_accessor; const char *old_sub_path; const char *new_sub_path; - R_TRY(impl::FindFileSystem(std::addressof(old_accessor), std::addressof(old_sub_path), old_path)); - R_TRY(impl::FindFileSystem(std::addressof(new_accessor), std::addressof(new_sub_path), new_path)); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(old_accessor), std::addressof(old_sub_path), old_path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_RENAME, old_path, new_path)); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(new_accessor), std::addressof(new_sub_path), new_path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_RENAME, old_path, new_path)); - R_UNLESS(old_accessor == new_accessor, fs::ResultRenameToOtherFileSystem()); - return old_accessor->RenameFile(old_sub_path, new_sub_path); + auto rename_impl = [=]() -> Result { + R_UNLESS(old_accessor == new_accessor, fs::ResultRenameToOtherFileSystem()); + R_TRY(old_accessor->RenameFile(old_sub_path, new_sub_path)); + return ResultSuccess(); + }; + + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(rename_impl(), nullptr, old_accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_RENAME, old_path, new_path)); + return ResultSuccess(); } Result RenameDirectory(const char *old_path, const char *new_path) { @@ -83,30 +94,42 @@ namespace ams::fs { impl::FileSystemAccessor *new_accessor; const char *old_sub_path; const char *new_sub_path; - R_TRY(impl::FindFileSystem(std::addressof(old_accessor), std::addressof(old_sub_path), old_path)); - R_TRY(impl::FindFileSystem(std::addressof(new_accessor), std::addressof(new_sub_path), new_path)); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(old_accessor), std::addressof(old_sub_path), old_path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_RENAME, old_path, new_path)); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(new_accessor), std::addressof(new_sub_path), new_path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_RENAME, old_path, new_path)); - R_UNLESS(old_accessor == new_accessor, fs::ResultRenameToOtherFileSystem()); - return old_accessor->RenameDirectory(old_sub_path, new_sub_path); + auto rename_impl = [=]() -> Result { + R_UNLESS(old_accessor == new_accessor, fs::ResultRenameToOtherFileSystem()); + R_TRY(old_accessor->RenameDirectory(old_sub_path, new_sub_path)); + return ResultSuccess(); + }; + + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(rename_impl(), nullptr, old_accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_RENAME, old_path, new_path)); + return ResultSuccess(); } Result GetEntryType(DirectoryEntryType *out, const char *path) { impl::FileSystemAccessor *accessor; const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); - return accessor->GetEntryType(out, sub_path); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(accessor->GetEntryType(out, sub_path), nullptr, accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_GET_ENTRY_TYPE(out, path))); + return ResultSuccess(); } Result OpenFile(FileHandle *out_file, const char *path, int mode) { impl::FileSystemAccessor *accessor; const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); - - R_UNLESS(out_file != nullptr, fs::ResultNullptrArgument()); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH_AND_OPEN_MODE, path, static_cast(mode))); std::unique_ptr file_accessor; - R_TRY(accessor->OpenFile(std::addressof(file_accessor), sub_path, static_cast(mode))); + + auto open_impl = [&]() -> Result { + R_UNLESS(out_file != nullptr, fs::ResultNullptrArgument()); + R_TRY(accessor->OpenFile(std::addressof(file_accessor), sub_path, static_cast(mode))); + return ResultSuccess(); + }; + + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(open_impl(), nullptr, accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH_AND_OPEN_MODE, path, static_cast(mode))); out_file->handle = file_accessor.release(); return ResultSuccess(); @@ -115,12 +138,17 @@ namespace ams::fs { Result OpenDirectory(DirectoryHandle *out_dir, const char *path, int mode) { impl::FileSystemAccessor *accessor; const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); - - R_UNLESS(out_dir != nullptr, fs::ResultNullptrArgument()); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH_AND_OPEN_MODE, path, static_cast(mode))); std::unique_ptr dir_accessor; - R_TRY(accessor->OpenDirectory(std::addressof(dir_accessor), sub_path, static_cast(mode))); + + auto open_impl = [&]() -> Result { + R_UNLESS(out_dir != nullptr, fs::ResultNullptrArgument()); + R_TRY(accessor->OpenDirectory(std::addressof(dir_accessor), sub_path, static_cast(mode))); + return ResultSuccess(); + }; + + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(open_impl(), nullptr, accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH_AND_OPEN_MODE, path, static_cast(mode))); out_dir->handle = dir_accessor.release(); return ResultSuccess(); @@ -129,48 +157,86 @@ namespace ams::fs { Result CleanDirectoryRecursively(const char *path) { impl::FileSystemAccessor *accessor; const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); - return accessor->CleanDirectoryRecursively(sub_path); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(accessor->CleanDirectoryRecursively(sub_path), nullptr, accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); + return ResultSuccess(); } Result GetFreeSpaceSize(s64 *out, const char *path) { impl::FileSystemAccessor *accessor; - const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + const char *sub_path = nullptr; - return accessor->GetFreeSpaceSize(out, sub_path); + /* Get the accessor. */ + auto find_impl = [&]() -> Result { + R_UNLESS(out != nullptr, fs::ResultNullptrArgument()); + R_UNLESS(path != nullptr, fs::ResultNullptrArgument()); + if (impl::IsValidMountName(path)) { + R_TRY(impl::Find(std::addressof(accessor), path)); + } else { + R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + } + return ResultSuccess(); + }; + + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(find_impl(), AMS_FS_IMPL_ACCESS_LOG_FORMAT_GET_SPACE_SIZE(out, path))); + + /* Get the space size. */ + auto get_size_impl = [&]() -> Result { + R_UNLESS(sub_path == nullptr || std::strcmp(sub_path, "/") == 0, fs::ResultInvalidMountName()); + R_TRY(accessor->GetFreeSpaceSize(out, "/")); + return ResultSuccess(); + }; + + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(get_size_impl(), nullptr, accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_GET_SPACE_SIZE(out, path))); + return ResultSuccess(); } Result GetTotalSpaceSize(s64 *out, const char *path) { + /* NOTE: Nintendo does not do access logging here, and does not support mount-name instead of path. */ impl::FileSystemAccessor *accessor; - const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + const char *sub_path = nullptr; - return accessor->GetTotalSpaceSize(out, sub_path); + /* Get the accessor. */ + auto find_impl = [&]() -> Result { + R_UNLESS(out != nullptr, fs::ResultNullptrArgument()); + R_UNLESS(path != nullptr, fs::ResultNullptrArgument()); + if (impl::IsValidMountName(path)) { + R_TRY(impl::Find(std::addressof(accessor), path)); + } else { + R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + } + return ResultSuccess(); + }; + + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(find_impl(), AMS_FS_IMPL_ACCESS_LOG_FORMAT_GET_SPACE_SIZE(out, path))); + + /* Get the space size. */ + auto get_size_impl = [&]() -> Result { + R_UNLESS(sub_path == nullptr || std::strcmp(sub_path, "/") == 0, fs::ResultInvalidMountName()); + R_TRY(accessor->GetTotalSpaceSize(out, "/")); + return ResultSuccess(); + }; + + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM(get_size_impl(), nullptr, accessor, AMS_FS_IMPL_ACCESS_LOG_FORMAT_GET_SPACE_SIZE(out, path))); + return ResultSuccess(); } Result SetConcatenationFileAttribute(const char *path) { impl::FileSystemAccessor *accessor; const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + AMS_FS_R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); - return accessor->QueryEntry(nullptr, 0, nullptr, 0, fsa::QueryId::SetConcatenationFileAttribute, sub_path); - } + AMS_FS_R_TRY(accessor->QueryEntry(nullptr, 0, nullptr, 0, fsa::QueryId::SetConcatenationFileAttribute, sub_path)); - Result GetFileTimeStampRaw(FileTimeStampRaw *out, const char *path) { - impl::FileSystemAccessor *accessor; - const char *sub_path; - R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); - - return accessor->GetFileTimeStampRaw(out, sub_path); + return ResultSuccess(); } Result OpenFile(FileHandle *out, std::unique_ptr &&file, int mode) { - R_UNLESS(out != nullptr, fs::ResultNullptrArgument()); + AMS_FS_R_UNLESS(out != nullptr, fs::ResultNullptrArgument()); auto file_accessor = std::make_unique(std::move(file), nullptr, static_cast(mode)); - R_UNLESS(file_accessor != nullptr, fs::ResultAllocationFailureInNew()); + AMS_FS_R_UNLESS(file_accessor != nullptr, fs::ResultAllocationFailureInNew()); out->handle = file_accessor.release(); return ResultSuccess(); @@ -178,22 +244,22 @@ namespace ams::fs { namespace { - Result CommitImpl(const char *path) { + Result CommitImpl(const char *path, const char *func_name) { impl::FileSystemAccessor *accessor; - R_TRY(impl::Find(std::addressof(accessor), path)); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_UNLESS_R_SUCCEEDED(impl::Find(std::addressof(accessor), path), AMS_FS_IMPL_ACCESS_LOG_FORMAT_PATH, path)); - return accessor->Commit(); + AMS_FS_R_TRY(AMS_FS_IMPL_ACCESS_LOG_FILESYSTEM_WITH_NAME(accessor->Commit(), nullptr, accessor, func_name, AMS_FS_IMPL_ACCESS_LOG_FORMAT_MOUNT, path)); + return ResultSuccess(); } } Result Commit(const char *path) { - return CommitImpl(path); + return CommitImpl(path, AMS_CURRENT_FUNCTION_NAME); } Result CommitSaveData(const char *path) { - return CommitImpl(path); + return CommitImpl(path, AMS_CURRENT_FUNCTION_NAME); } - } diff --git a/libraries/libstratosphere/source/fs/fsa/fs_user_filesystem_for_debug.cpp b/libraries/libstratosphere/source/fs/fsa/fs_user_filesystem_for_debug.cpp new file mode 100644 index 000000000..d10e5be6b --- /dev/null +++ b/libraries/libstratosphere/source/fs/fsa/fs_user_filesystem_for_debug.cpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "fs_filesystem_accessor.hpp" +#include "fs_file_accessor.hpp" +#include "fs_directory_accessor.hpp" +#include "fs_mount_utils.hpp" +#include "fs_user_mount_table.hpp" + +namespace ams::fs { + + namespace impl { + + Result GetFileTimeStampRawForDebug(FileTimeStampRaw *out, const char *path) { + impl::FileSystemAccessor *accessor; + const char *sub_path; + R_TRY(impl::FindFileSystem(std::addressof(accessor), std::addressof(sub_path), path)); + + R_TRY(accessor->GetFileTimeStampRaw(out, sub_path)); + + return ResultSuccess(); + } + + } + + Result GetFileTimeStampRawForDebug(FileTimeStampRaw *out, const char *path) { + AMS_FS_R_TRY(GetFileTimeStampRawForDebug(out, path)); + return ResultSuccess(); + } + +} diff --git a/libraries/libstratosphere/source/fssrv/fssrv_filesystem_interface_adapter.cpp b/libraries/libstratosphere/source/fssrv/fssrv_filesystem_interface_adapter.cpp index 75b2dbd15..23040e25c 100644 --- a/libraries/libstratosphere/source/fssrv/fssrv_filesystem_interface_adapter.cpp +++ b/libraries/libstratosphere/source/fssrv/fssrv_filesystem_interface_adapter.cpp @@ -232,7 +232,7 @@ namespace ams::fssrv::impl { return this->base_fs->GetEntryType(reinterpret_cast(out.GetPointer()), normalizer.GetPath()); } - Result FileSystemInterfaceAdapter::OpenFile(ams::sf::Out> out, const fssrv::sf::Path &path, u32 mode) { + Result FileSystemInterfaceAdapter::OpenFile(ams::sf::Out> out, const fssrv::sf::Path &path, u32 mode) { auto read_lock = this->AcquireCacheInvalidationReadLock(); std::unique_lock open_count_semaphore; @@ -254,14 +254,14 @@ namespace ams::fssrv::impl { /* TODO: N creates an nn::fssystem::AsynchronousAccessFile here. */ std::shared_ptr shared_this = this->shared_from_this(); - std::shared_ptr file_intf = std::make_shared(std::move(file), std::move(shared_this), std::move(open_count_semaphore)); + auto file_intf = ams::sf::MakeShared(std::move(file), std::move(shared_this), std::move(open_count_semaphore)); R_UNLESS(file_intf != nullptr, fs::ResultAllocationFailureInFileSystemInterfaceAdapter()); out.SetValue(std::move(file_intf), target_object_id); return ResultSuccess(); } - Result FileSystemInterfaceAdapter::OpenDirectory(ams::sf::Out> out, const fssrv::sf::Path &path, u32 mode) { + Result FileSystemInterfaceAdapter::OpenDirectory(ams::sf::Out> out, const fssrv::sf::Path &path, u32 mode) { auto read_lock = this->AcquireCacheInvalidationReadLock(); std::unique_lock open_count_semaphore; @@ -281,7 +281,7 @@ namespace ams::fssrv::impl { const auto target_object_id = dir->GetDomainObjectId(); std::shared_ptr shared_this = this->shared_from_this(); - std::shared_ptr dir_intf = std::make_shared(std::move(dir), std::move(shared_this), std::move(open_count_semaphore)); + auto dir_intf = ams::sf::MakeShared(std::move(dir), std::move(shared_this), std::move(open_count_semaphore)); R_UNLESS(dir_intf != nullptr, fs::ResultAllocationFailureInFileSystemInterfaceAdapter()); out.SetValue(std::move(dir_intf), target_object_id); diff --git a/libraries/libstratosphere/source/fssystem/fssystem_alignment_matching_storage_impl.cpp b/libraries/libstratosphere/source/fssystem/fssystem_alignment_matching_storage_impl.cpp index 225a9656d..920e8a2e8 100644 --- a/libraries/libstratosphere/source/fssystem/fssystem_alignment_matching_storage_impl.cpp +++ b/libraries/libstratosphere/source/fssystem/fssystem_alignment_matching_storage_impl.cpp @@ -73,6 +73,8 @@ namespace ams::fssystem { core_size = (size < buffer_round_up_difference) ? 0 : util::AlignDown(size - buffer_round_up_difference, data_alignment); buffer_gap = buffer_round_up_difference; offset_gap = GetRoundDownDifference(offset, data_alignment); + + covered_offset = offset; } /* Read the core portion. */ diff --git a/libraries/libstratosphere/source/fssystem/fssystem_file_system_proxy_api.cpp b/libraries/libstratosphere/source/fssystem/fssystem_file_system_proxy_api.cpp index 31011c0c1..8bc1dd43d 100644 --- a/libraries/libstratosphere/source/fssystem/fssystem_file_system_proxy_api.cpp +++ b/libraries/libstratosphere/source/fssystem/fssystem_file_system_proxy_api.cpp @@ -24,12 +24,12 @@ namespace ams::fssystem { /* Official FS has a 4.5 MB exp heap, a 6 MB buffer pool, an 8 MB device buffer manager heap, and a 14 MB buffer manager heap. */ /* We don't need so much memory for ams.mitm (as we're servicing a much more limited context). */ - /* We'll give ourselves a 2.5 MB exp heap, a 2 MB buffer pool, a 2 MB device buffer manager heap, and a 2 MB buffer manager heap. */ - /* These numbers match signed-system-partition safe FS. */ - constexpr size_t ExpHeapSize = 2_MB + 512_KB; - constexpr size_t BufferPoolSize = 2_MB; - constexpr size_t DeviceBufferSize = 2_MB; - constexpr size_t BufferManagerHeapSize = 2_MB; + /* We'll give ourselves a 1.5 MB exp heap, a 1 MB buffer pool, a 1 MB device buffer manager heap, and a 1 MB buffer manager heap. */ + /* These numbers are 1 MB less than signed-system-partition safe FS in all pools. */ + constexpr size_t ExpHeapSize = 1_MB + 512_KB; + constexpr size_t BufferPoolSize = 1_MB; + constexpr size_t DeviceBufferSize = 1_MB; + constexpr size_t BufferManagerHeapSize = 1_MB; constexpr size_t MaxCacheCount = 1024; constexpr size_t BlockSize = 16_KB; diff --git a/libraries/libstratosphere/source/fssystem/fssystem_nca_file_system_driver.cpp b/libraries/libstratosphere/source/fssystem/fssystem_nca_file_system_driver.cpp index 171ac9317..c1afd1860 100644 --- a/libraries/libstratosphere/source/fssystem/fssystem_nca_file_system_driver.cpp +++ b/libraries/libstratosphere/source/fssystem/fssystem_nca_file_system_driver.cpp @@ -888,7 +888,7 @@ namespace ams::fssystem { /* Set the storage holder's storages. */ storage->SetStorage(0, original_storage.get(), 0, original_data_size); - storage->SetStorage(1, indirect_table_storage.get(), 0, indirect_data_size); + storage->SetStorage(1, indirect_data_storage.get(), 0, indirect_data_size); storage->Set(std::move(base_storage), std::move(original_storage), std::move(indirect_table_storage), std::move(indirect_data_storage)); /* Set the indirect storage. */ diff --git a/libraries/libstratosphere/source/fssystem/fssystem_pooled_buffer.cpp b/libraries/libstratosphere/source/fssystem/fssystem_pooled_buffer.cpp index df32ec50a..7d36e9919 100644 --- a/libraries/libstratosphere/source/fssystem/fssystem_pooled_buffer.cpp +++ b/libraries/libstratosphere/source/fssystem/fssystem_pooled_buffer.cpp @@ -145,8 +145,7 @@ namespace ams::fssystem { break; } else { /* Sleep. */ - /* TODO: os::SleepThread() */ - svc::SleepThread(RetryWait.GetNanoSeconds()); + os::SleepThread(RetryWait); g_retry_count++; } } diff --git a/libraries/libstratosphere/source/lr/lr_add_on_content_location_resolver_impl.hpp b/libraries/libstratosphere/source/lr/lr_add_on_content_location_resolver_impl.hpp index 17c7a0c1c..60e57b767 100644 --- a/libraries/libstratosphere/source/lr/lr_add_on_content_location_resolver_impl.hpp +++ b/libraries/libstratosphere/source/lr/lr_add_on_content_location_resolver_impl.hpp @@ -21,7 +21,7 @@ namespace ams::lr { - class AddOnContentLocationResolverImpl : public IAddOnContentLocationResolver { + class AddOnContentLocationResolverImpl { private: /* Storage for RegisteredData entries by data id. */ RegisteredStorages registered_storages; @@ -29,12 +29,13 @@ namespace ams::lr { AddOnContentLocationResolverImpl() : registered_storages(hos::GetVersion() < hos::Version_9_0_0 ? 0x800 : 0x2) { /* ... */ } /* Actual commands. */ - virtual Result ResolveAddOnContentPath(sf::Out out, ncm::DataId id) override; - virtual Result RegisterAddOnContentStorageDeprecated(ncm::DataId id, ncm::StorageId storage_id) override; - virtual Result RegisterAddOnContentStorage(ncm::DataId id, ncm::ApplicationId application_id, ncm::StorageId storage_id) override; - virtual Result UnregisterAllAddOnContentPath() override; - virtual Result RefreshApplicationAddOnContent(const sf::InArray &ids) override; - virtual Result UnregisterApplicationAddOnContent(ncm::ApplicationId id) override; + Result ResolveAddOnContentPath(sf::Out out, ncm::DataId id); + Result RegisterAddOnContentStorageDeprecated(ncm::DataId id, ncm::StorageId storage_id); + Result RegisterAddOnContentStorage(ncm::DataId id, ncm::ApplicationId application_id, ncm::StorageId storage_id); + Result UnregisterAllAddOnContentPath(); + Result RefreshApplicationAddOnContent(const sf::InArray &ids); + Result UnregisterApplicationAddOnContent(ncm::ApplicationId id); }; + static_assert(lr::IsIAddOnContentLocationResolver); } diff --git a/libraries/libstratosphere/source/lr/lr_api.cpp b/libraries/libstratosphere/source/lr/lr_api.cpp index 36a7cb813..391285080 100644 --- a/libraries/libstratosphere/source/lr/lr_api.cpp +++ b/libraries/libstratosphere/source/lr/lr_api.cpp @@ -42,7 +42,7 @@ namespace ams::lr { LrLocationResolver lr; R_TRY(lrOpenLocationResolver(static_cast(storage_id), std::addressof(lr))); - *out = LocationResolver(std::make_shared(lr)); + *out = LocationResolver(sf::MakeShared(lr)); return ResultSuccess(); } @@ -50,7 +50,7 @@ namespace ams::lr { LrRegisteredLocationResolver lr; R_TRY(lrOpenRegisteredLocationResolver(std::addressof(lr))); - *out = RegisteredLocationResolver(std::make_shared(lr)); + *out = RegisteredLocationResolver(sf::MakeShared(lr)); return ResultSuccess(); } diff --git a/libraries/libstratosphere/source/lr/lr_content_location_resolver_impl.hpp b/libraries/libstratosphere/source/lr/lr_content_location_resolver_impl.hpp index 5553962da..5b344bdfc 100644 --- a/libraries/libstratosphere/source/lr/lr_content_location_resolver_impl.hpp +++ b/libraries/libstratosphere/source/lr/lr_content_location_resolver_impl.hpp @@ -35,32 +35,33 @@ namespace ams::lr { void GetContentStoragePath(Path *out, ncm::ContentId content_id); public: /* Actual commands. */ - virtual Result ResolveProgramPath(sf::Out out, ncm::ProgramId id) override; - virtual Result RedirectProgramPath(const Path &path, ncm::ProgramId id) override; - virtual Result ResolveApplicationControlPath(sf::Out out, ncm::ProgramId id) override; - virtual Result ResolveApplicationHtmlDocumentPath(sf::Out out, ncm::ProgramId id) override; - virtual Result ResolveDataPath(sf::Out out, ncm::DataId id) override; - virtual Result RedirectApplicationControlPathDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectApplicationControlPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result RedirectApplicationHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectApplicationHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result ResolveApplicationLegalInformationPath(sf::Out out, ncm::ProgramId id) override; - virtual Result RedirectApplicationLegalInformationPathDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectApplicationLegalInformationPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result Refresh() override; - virtual Result RedirectApplicationProgramPathDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectApplicationProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result ClearApplicationRedirectionDeprecated() override; - virtual Result ClearApplicationRedirection(const sf::InArray &excluding_ids) override; - virtual Result EraseProgramRedirection(ncm::ProgramId id) override; - virtual Result EraseApplicationControlRedirection(ncm::ProgramId id) override; - virtual Result EraseApplicationHtmlDocumentRedirection(ncm::ProgramId id) override; - virtual Result EraseApplicationLegalInformationRedirection(ncm::ProgramId id) override; - virtual Result ResolveProgramPathForDebug(sf::Out out, ncm::ProgramId id) override; - virtual Result RedirectProgramPathForDebug(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectApplicationProgramPathForDebugDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectApplicationProgramPathForDebug(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result EraseProgramRedirectionForDebug(ncm::ProgramId id) override; + Result ResolveProgramPath(sf::Out out, ncm::ProgramId id); + Result RedirectProgramPath(const Path &path, ncm::ProgramId id); + Result ResolveApplicationControlPath(sf::Out out, ncm::ProgramId id); + Result ResolveApplicationHtmlDocumentPath(sf::Out out, ncm::ProgramId id); + Result ResolveDataPath(sf::Out out, ncm::DataId id); + Result RedirectApplicationControlPathDeprecated(const Path &path, ncm::ProgramId id); + Result RedirectApplicationControlPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result RedirectApplicationHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id); + Result RedirectApplicationHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result ResolveApplicationLegalInformationPath(sf::Out out, ncm::ProgramId id); + Result RedirectApplicationLegalInformationPathDeprecated(const Path &path, ncm::ProgramId id); + Result RedirectApplicationLegalInformationPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result Refresh(); + Result RedirectApplicationProgramPathDeprecated(const Path &path, ncm::ProgramId id); + Result RedirectApplicationProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result ClearApplicationRedirectionDeprecated(); + Result ClearApplicationRedirection(const sf::InArray &excluding_ids); + Result EraseProgramRedirection(ncm::ProgramId id); + Result EraseApplicationControlRedirection(ncm::ProgramId id); + Result EraseApplicationHtmlDocumentRedirection(ncm::ProgramId id); + Result EraseApplicationLegalInformationRedirection(ncm::ProgramId id); + Result ResolveProgramPathForDebug(sf::Out out, ncm::ProgramId id); + Result RedirectProgramPathForDebug(const Path &path, ncm::ProgramId id); + Result RedirectApplicationProgramPathForDebugDeprecated(const Path &path, ncm::ProgramId id); + Result RedirectApplicationProgramPathForDebug(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result EraseProgramRedirectionForDebug(ncm::ProgramId id); }; + static_assert(lr::IsILocationResolver); } diff --git a/libraries/libstratosphere/source/lr/lr_location_resolver_impl_base.hpp b/libraries/libstratosphere/source/lr/lr_location_resolver_impl_base.hpp index d8931d041..03f7c68ee 100644 --- a/libraries/libstratosphere/source/lr/lr_location_resolver_impl_base.hpp +++ b/libraries/libstratosphere/source/lr/lr_location_resolver_impl_base.hpp @@ -20,7 +20,7 @@ namespace ams::lr { - class LocationResolverImplBase : public ILocationResolver { + class LocationResolverImplBase { NON_COPYABLE(LocationResolverImplBase); NON_MOVEABLE(LocationResolverImplBase); protected: diff --git a/libraries/libstratosphere/source/lr/lr_location_resolver_manager_impl.cpp b/libraries/libstratosphere/source/lr/lr_location_resolver_manager_impl.cpp index f726cdf7e..fd5392c52 100644 --- a/libraries/libstratosphere/source/lr/lr_location_resolver_manager_impl.cpp +++ b/libraries/libstratosphere/source/lr/lr_location_resolver_manager_impl.cpp @@ -30,10 +30,10 @@ namespace ams::lr { /* No existing resolver is present, create one. */ if (!resolver) { if (storage_id == ncm::StorageId::Host) { - AMS_ABORT_UNLESS(this->location_resolvers.Insert(storage_id, std::make_shared())); + AMS_ABORT_UNLESS(this->location_resolvers.Insert(storage_id, sf::MakeShared())); } else { - auto content_resolver = std::make_shared(storage_id); - R_TRY(content_resolver->Refresh()); + auto content_resolver = sf::MakeShared(storage_id); + R_TRY(content_resolver->GetImpl().Refresh()); AMS_ABORT_UNLESS(this->location_resolvers.Insert(storage_id, std::move(content_resolver))); } @@ -51,7 +51,7 @@ namespace ams::lr { /* No existing resolver is present, create one. */ if (!this->registered_location_resolver) { - this->registered_location_resolver = std::make_shared(); + this->registered_location_resolver = sf::MakeShared(); } /* Copy the output interface. */ @@ -79,7 +79,7 @@ namespace ams::lr { /* No existing resolver is present, create one. */ if (!this->add_on_content_location_resolver) { - this->add_on_content_location_resolver = std::make_shared(); + this->add_on_content_location_resolver = sf::MakeShared(); } /* Copy the output interface. */ diff --git a/libraries/libstratosphere/source/lr/lr_redirect_only_location_resolver_impl.hpp b/libraries/libstratosphere/source/lr/lr_redirect_only_location_resolver_impl.hpp index a204169d1..a9aee02b4 100644 --- a/libraries/libstratosphere/source/lr/lr_redirect_only_location_resolver_impl.hpp +++ b/libraries/libstratosphere/source/lr/lr_redirect_only_location_resolver_impl.hpp @@ -24,32 +24,33 @@ namespace ams::lr { ~RedirectOnlyLocationResolverImpl(); public: /* Actual commands. */ - virtual Result ResolveProgramPath(sf::Out out, ncm::ProgramId id) override; - virtual Result RedirectProgramPath(const Path &path, ncm::ProgramId id) override; - virtual Result ResolveApplicationControlPath(sf::Out out, ncm::ProgramId id) override; - virtual Result ResolveApplicationHtmlDocumentPath(sf::Out out, ncm::ProgramId id) override; - virtual Result ResolveDataPath(sf::Out out, ncm::DataId id) override; - virtual Result RedirectApplicationControlPathDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectApplicationControlPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result RedirectApplicationHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectApplicationHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result ResolveApplicationLegalInformationPath(sf::Out out, ncm::ProgramId id) override; - virtual Result RedirectApplicationLegalInformationPathDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectApplicationLegalInformationPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result Refresh() override; - virtual Result RedirectApplicationProgramPathDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectApplicationProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result ClearApplicationRedirectionDeprecated() override; - virtual Result ClearApplicationRedirection(const sf::InArray &excluding_ids) override; - virtual Result EraseProgramRedirection(ncm::ProgramId id) override; - virtual Result EraseApplicationControlRedirection(ncm::ProgramId id) override; - virtual Result EraseApplicationHtmlDocumentRedirection(ncm::ProgramId id) override; - virtual Result EraseApplicationLegalInformationRedirection(ncm::ProgramId id) override; - virtual Result ResolveProgramPathForDebug(sf::Out out, ncm::ProgramId id) override; - virtual Result RedirectProgramPathForDebug(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectApplicationProgramPathForDebugDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectApplicationProgramPathForDebug(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result EraseProgramRedirectionForDebug(ncm::ProgramId id) override; + Result ResolveProgramPath(sf::Out out, ncm::ProgramId id); + Result RedirectProgramPath(const Path &path, ncm::ProgramId id); + Result ResolveApplicationControlPath(sf::Out out, ncm::ProgramId id); + Result ResolveApplicationHtmlDocumentPath(sf::Out out, ncm::ProgramId id); + Result ResolveDataPath(sf::Out out, ncm::DataId id); + Result RedirectApplicationControlPathDeprecated(const Path &path, ncm::ProgramId id); + Result RedirectApplicationControlPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result RedirectApplicationHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id); + Result RedirectApplicationHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result ResolveApplicationLegalInformationPath(sf::Out out, ncm::ProgramId id); + Result RedirectApplicationLegalInformationPathDeprecated(const Path &path, ncm::ProgramId id); + Result RedirectApplicationLegalInformationPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result Refresh(); + Result RedirectApplicationProgramPathDeprecated(const Path &path, ncm::ProgramId id); + Result RedirectApplicationProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result ClearApplicationRedirectionDeprecated(); + Result ClearApplicationRedirection(const sf::InArray &excluding_ids); + Result EraseProgramRedirection(ncm::ProgramId id); + Result EraseApplicationControlRedirection(ncm::ProgramId id); + Result EraseApplicationHtmlDocumentRedirection(ncm::ProgramId id); + Result EraseApplicationLegalInformationRedirection(ncm::ProgramId id); + Result ResolveProgramPathForDebug(sf::Out out, ncm::ProgramId id); + Result RedirectProgramPathForDebug(const Path &path, ncm::ProgramId id); + Result RedirectApplicationProgramPathForDebugDeprecated(const Path &path, ncm::ProgramId id); + Result RedirectApplicationProgramPathForDebug(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result EraseProgramRedirectionForDebug(ncm::ProgramId id); }; + static_assert(lr::IsILocationResolver); } diff --git a/libraries/libstratosphere/source/lr/lr_registered_location_resolver_impl.hpp b/libraries/libstratosphere/source/lr/lr_registered_location_resolver_impl.hpp index 1a5ea6994..b1399336f 100644 --- a/libraries/libstratosphere/source/lr/lr_registered_location_resolver_impl.hpp +++ b/libraries/libstratosphere/source/lr/lr_registered_location_resolver_impl.hpp @@ -21,7 +21,7 @@ namespace ams::lr { - class RegisteredLocationResolverImpl : public IRegisteredLocationResolver { + class RegisteredLocationResolverImpl { private: static constexpr size_t MaxRegisteredLocationsDeprecated = 0x10; static constexpr size_t MaxRegisteredLocations = 0x20; @@ -49,20 +49,21 @@ namespace ams::lr { ~RegisteredLocationResolverImpl(); public: /* Actual commands. */ - virtual Result ResolveProgramPath(sf::Out out, ncm::ProgramId id) override; - virtual Result RegisterProgramPathDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RegisterProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result UnregisterProgramPath(ncm::ProgramId id) override; - virtual Result RedirectProgramPathDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result ResolveHtmlDocumentPath(sf::Out out, ncm::ProgramId id) override; - virtual Result RegisterHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RegisterHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result UnregisterHtmlDocumentPath(ncm::ProgramId id) override; - virtual Result RedirectHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) override; - virtual Result RedirectHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override; - virtual Result Refresh() override; - virtual Result RefreshExcluding(const sf::InArray &ids) override; + Result ResolveProgramPath(sf::Out out, ncm::ProgramId id); + Result RegisterProgramPathDeprecated(const Path &path, ncm::ProgramId id); + Result RegisterProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result UnregisterProgramPath(ncm::ProgramId id); + Result RedirectProgramPathDeprecated(const Path &path, ncm::ProgramId id); + Result RedirectProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result ResolveHtmlDocumentPath(sf::Out out, ncm::ProgramId id); + Result RegisterHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id); + Result RegisterHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result UnregisterHtmlDocumentPath(ncm::ProgramId id); + Result RedirectHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id); + Result RedirectHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id); + Result Refresh(); + Result RefreshExcluding(const sf::InArray &ids); }; + static_assert(lr::IsIRegisteredLocationResolver); } diff --git a/libraries/libstratosphere/source/lr/lr_remote_location_resolver_impl.hpp b/libraries/libstratosphere/source/lr/lr_remote_location_resolver_impl.hpp index 59093e7d8..895325dcf 100644 --- a/libraries/libstratosphere/source/lr/lr_remote_location_resolver_impl.hpp +++ b/libraries/libstratosphere/source/lr/lr_remote_location_resolver_impl.hpp @@ -18,7 +18,7 @@ namespace ams::lr { - class RemoteLocationResolverImpl : public ILocationResolver { + class RemoteLocationResolverImpl { private: ::LrLocationResolver srv; public: @@ -27,121 +27,122 @@ namespace ams::lr { ~RemoteLocationResolverImpl() { ::serviceClose(&srv.s); } public: /* Actual commands. */ - virtual Result ResolveProgramPath(sf::Out out, ncm::ProgramId id) override { - return lrLrResolveProgramPath(std::addressof(this->srv), id.value, out->str); + Result ResolveProgramPath(sf::Out out, ncm::ProgramId id) { + return ::lrLrResolveProgramPath(std::addressof(this->srv), id.value, out->str); } - virtual Result RedirectProgramPath(const Path &path, ncm::ProgramId id) override { - return lrLrRedirectProgramPath(std::addressof(this->srv), id.value, path.str); + Result RedirectProgramPath(const Path &path, ncm::ProgramId id) { + return ::lrLrRedirectProgramPath(std::addressof(this->srv), id.value, path.str); } - virtual Result ResolveApplicationControlPath(sf::Out out, ncm::ProgramId id) override { - return lrLrResolveApplicationControlPath(std::addressof(this->srv), id.value, out->str); + Result ResolveApplicationControlPath(sf::Out out, ncm::ProgramId id) { + return ::lrLrResolveApplicationControlPath(std::addressof(this->srv), id.value, out->str); } - virtual Result ResolveApplicationHtmlDocumentPath(sf::Out out, ncm::ProgramId id) override { - return lrLrResolveApplicationHtmlDocumentPath(std::addressof(this->srv), id.value, out->str); + Result ResolveApplicationHtmlDocumentPath(sf::Out out, ncm::ProgramId id) { + return ::lrLrResolveApplicationHtmlDocumentPath(std::addressof(this->srv), id.value, out->str); } - virtual Result ResolveDataPath(sf::Out out, ncm::DataId id) override { - return lrLrResolveDataPath(std::addressof(this->srv), id.value, out->str); + Result ResolveDataPath(sf::Out out, ncm::DataId id) { + return ::lrLrResolveDataPath(std::addressof(this->srv), id.value, out->str); } - virtual Result RedirectApplicationControlPathDeprecated(const Path &path, ncm::ProgramId id) override { - return lrLrRedirectApplicationControlPath(std::addressof(this->srv), id.value, 0, path.str); + Result RedirectApplicationControlPathDeprecated(const Path &path, ncm::ProgramId id) { + return ::lrLrRedirectApplicationControlPath(std::addressof(this->srv), id.value, 0, path.str); } - virtual Result RedirectApplicationControlPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override { - return lrLrRedirectApplicationControlPath(std::addressof(this->srv), id.value, owner_id.value, path.str); + Result RedirectApplicationControlPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) { + return ::lrLrRedirectApplicationControlPath(std::addressof(this->srv), id.value, owner_id.value, path.str); } - virtual Result RedirectApplicationHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) override { - return lrLrRedirectApplicationHtmlDocumentPath(std::addressof(this->srv), id.value, 0, path.str); + Result RedirectApplicationHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) { + return ::lrLrRedirectApplicationHtmlDocumentPath(std::addressof(this->srv), id.value, 0, path.str); } - virtual Result RedirectApplicationHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override { - return lrLrRedirectApplicationHtmlDocumentPath(std::addressof(this->srv), id.value, owner_id.value, path.str); + Result RedirectApplicationHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) { + return ::lrLrRedirectApplicationHtmlDocumentPath(std::addressof(this->srv), id.value, owner_id.value, path.str); } - virtual Result ResolveApplicationLegalInformationPath(sf::Out out, ncm::ProgramId id) override { - return lrLrResolveApplicationLegalInformationPath(std::addressof(this->srv), id.value, out->str); + Result ResolveApplicationLegalInformationPath(sf::Out out, ncm::ProgramId id) { + return ::lrLrResolveApplicationLegalInformationPath(std::addressof(this->srv), id.value, out->str); } - virtual Result RedirectApplicationLegalInformationPathDeprecated(const Path &path, ncm::ProgramId id) override { - return lrLrRedirectApplicationLegalInformationPath(std::addressof(this->srv), id.value, 0, path.str); + Result RedirectApplicationLegalInformationPathDeprecated(const Path &path, ncm::ProgramId id) { + return ::lrLrRedirectApplicationLegalInformationPath(std::addressof(this->srv), id.value, 0, path.str); } - virtual Result RedirectApplicationLegalInformationPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override { - return lrLrRedirectApplicationLegalInformationPath(std::addressof(this->srv), id.value, owner_id.value, path.str); + Result RedirectApplicationLegalInformationPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) { + return ::lrLrRedirectApplicationLegalInformationPath(std::addressof(this->srv), id.value, owner_id.value, path.str); } - virtual Result Refresh() override { - return lrLrRefresh(std::addressof(this->srv)); + Result Refresh() { + return ::lrLrRefresh(std::addressof(this->srv)); } - virtual Result RedirectApplicationProgramPathDeprecated(const Path &path, ncm::ProgramId id) override { + Result RedirectApplicationProgramPathDeprecated(const Path &path, ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result RedirectApplicationProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override { + Result RedirectApplicationProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result ClearApplicationRedirectionDeprecated() override { + Result ClearApplicationRedirectionDeprecated() { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result ClearApplicationRedirection(const sf::InArray &excluding_ids) override { + Result ClearApplicationRedirection(const sf::InArray &excluding_ids) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result EraseProgramRedirection(ncm::ProgramId id) override { - return lrLrEraseProgramRedirection(std::addressof(this->srv), id.value); + Result EraseProgramRedirection(ncm::ProgramId id) { + return ::lrLrEraseProgramRedirection(std::addressof(this->srv), id.value); } - virtual Result EraseApplicationControlRedirection(ncm::ProgramId id) override { + Result EraseApplicationControlRedirection(ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result EraseApplicationHtmlDocumentRedirection(ncm::ProgramId id) override { + Result EraseApplicationHtmlDocumentRedirection(ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result EraseApplicationLegalInformationRedirection(ncm::ProgramId id) override { + Result EraseApplicationLegalInformationRedirection(ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result ResolveProgramPathForDebug(sf::Out out, ncm::ProgramId id) override { + Result ResolveProgramPathForDebug(sf::Out out, ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result RedirectProgramPathForDebug(const Path &path, ncm::ProgramId id) override { + Result RedirectProgramPathForDebug(const Path &path, ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result RedirectApplicationProgramPathForDebugDeprecated(const Path &path, ncm::ProgramId id) override { + Result RedirectApplicationProgramPathForDebugDeprecated(const Path &path, ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result RedirectApplicationProgramPathForDebug(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override { + Result RedirectApplicationProgramPathForDebug(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result EraseProgramRedirectionForDebug(ncm::ProgramId id) override { + Result EraseProgramRedirectionForDebug(ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } }; + static_assert(lr::IsILocationResolver); } diff --git a/libraries/libstratosphere/source/lr/lr_remote_registered_location_resolver_impl.hpp b/libraries/libstratosphere/source/lr/lr_remote_registered_location_resolver_impl.hpp index 3b8551756..2a3367d34 100644 --- a/libraries/libstratosphere/source/lr/lr_remote_registered_location_resolver_impl.hpp +++ b/libraries/libstratosphere/source/lr/lr_remote_registered_location_resolver_impl.hpp @@ -19,7 +19,7 @@ namespace ams::lr { - class RemoteRegisteredLocationResolverImpl : public IRegisteredLocationResolver { + class RemoteRegisteredLocationResolverImpl { private: ::LrRegisteredLocationResolver srv; public: @@ -28,74 +28,75 @@ namespace ams::lr { ~RemoteRegisteredLocationResolverImpl() { ::serviceClose(&srv.s); } public: /* Actual commands. */ - virtual Result ResolveProgramPath(sf::Out out, ncm::ProgramId id) override { - return lrRegLrResolveProgramPath(std::addressof(this->srv), static_cast(id), out->str); + Result ResolveProgramPath(sf::Out out, ncm::ProgramId id) { + return ::lrRegLrResolveProgramPath(std::addressof(this->srv), static_cast(id), out->str); } - virtual Result RegisterProgramPathDeprecated(const Path &path, ncm::ProgramId id) override { + Result RegisterProgramPathDeprecated(const Path &path, ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result RegisterProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override { + Result RegisterProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result UnregisterProgramPath(ncm::ProgramId id) override { + Result UnregisterProgramPath(ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result RedirectProgramPathDeprecated(const Path &path, ncm::ProgramId id) override { + Result RedirectProgramPathDeprecated(const Path &path, ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result RedirectProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override { + Result RedirectProgramPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result ResolveHtmlDocumentPath(sf::Out out, ncm::ProgramId id) override { + Result ResolveHtmlDocumentPath(sf::Out out, ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result RegisterHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) override { + Result RegisterHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result RegisterHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override { + Result RegisterHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result UnregisterHtmlDocumentPath(ncm::ProgramId id) override { + Result UnregisterHtmlDocumentPath(ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result RedirectHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) override { + Result RedirectHtmlDocumentPathDeprecated(const Path &path, ncm::ProgramId id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result RedirectHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) override { + Result RedirectHtmlDocumentPath(const Path &path, ncm::ProgramId id, ncm::ProgramId owner_id) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result Refresh() override { + Result Refresh() { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result RefreshExcluding(const sf::InArray &ids) override { + Result RefreshExcluding(const sf::InArray &ids) { /* TODO: libnx bindings */ AMS_ABORT(); } }; + static_assert(lr::IsIRegisteredLocationResolver); } diff --git a/libraries/libstratosphere/source/ncm/ncm_api.cpp b/libraries/libstratosphere/source/ncm/ncm_api.cpp index b2c3e2d9c..4432b76c1 100644 --- a/libraries/libstratosphere/source/ncm/ncm_api.cpp +++ b/libraries/libstratosphere/source/ncm/ncm_api.cpp @@ -27,7 +27,7 @@ namespace ams::ncm { void Initialize() { AMS_ASSERT(g_content_manager == nullptr); R_ABORT_UNLESS(ncmInitialize()); - g_content_manager = std::make_shared(); + g_content_manager = sf::MakeShared(); } void Finalize() { @@ -88,10 +88,14 @@ namespace ams::ncm { } Result ActivateContentMetaDatabase(StorageId storage_id) { + /* On < 2.0.0, this command doesn't exist, and databases are activated as needed on open. */ + R_SUCCEED_IF(hos::GetVersion() < hos::Version_2_0_0); return g_content_manager->ActivateContentMetaDatabase(storage_id); } Result InactivateContentMetaDatabase(StorageId storage_id) { + /* On < 2.0.0, this command doesn't exist. */ + R_SUCCEED_IF(hos::GetVersion() < hos::Version_2_0_0); return g_content_manager->InactivateContentMetaDatabase(storage_id); } diff --git a/libraries/libstratosphere/source/ncm/ncm_content_manager_impl.cpp b/libraries/libstratosphere/source/ncm/ncm_content_manager_impl.cpp index 2e1f9d2a9..7302acc19 100644 --- a/libraries/libstratosphere/source/ncm/ncm_content_manager_impl.cpp +++ b/libraries/libstratosphere/source/ncm/ncm_content_manager_impl.cpp @@ -275,7 +275,7 @@ namespace ams::ncm { } Result ContentManagerImpl::BuildContentMetaDatabase(StorageId storage_id) { - if (hos::GetVersion() <= hos::Version_4_0_0) { + if (hos::GetVersion() < hos::Version_5_0_0) { /* Temporarily activate the database. */ R_TRY(this->ActivateContentMetaDatabase(storage_id)); ON_SCOPE_EXIT { this->InactivateContentMetaDatabase(storage_id); }; @@ -553,23 +553,23 @@ namespace ams::ncm { if (storage_id == StorageId::GameCard) { /* Game card content storage is read only. */ - auto content_storage = std::make_shared(); - R_TRY(content_storage->Initialize(root->path, MakeFlatContentFilePath)); + auto content_storage = sf::MakeShared(); + R_TRY(content_storage->GetImpl().Initialize(root->path, MakeFlatContentFilePath)); root->content_storage = std::move(content_storage); } else { /* Create a content storage. */ - auto content_storage = std::make_shared(); + auto content_storage = sf::MakeShared(); /* Initialize content storage with an appropriate path function. */ switch (storage_id) { case StorageId::BuiltInSystem: - R_TRY(content_storage->Initialize(root->path, MakeFlatContentFilePath, MakeFlatPlaceHolderFilePath, false, std::addressof(this->rights_id_cache))); + R_TRY(content_storage->GetImpl().Initialize(root->path, MakeFlatContentFilePath, MakeFlatPlaceHolderFilePath, false, std::addressof(this->rights_id_cache))); break; case StorageId::SdCard: - R_TRY(content_storage->Initialize(root->path, MakeSha256HierarchicalContentFilePath_ForFat16KCluster, MakeSha256HierarchicalPlaceHolderFilePath_ForFat16KCluster, true, std::addressof(this->rights_id_cache))); + R_TRY(content_storage->GetImpl().Initialize(root->path, MakeSha256HierarchicalContentFilePath_ForFat16KCluster, MakeSha256HierarchicalPlaceHolderFilePath_ForFat16KCluster, true, std::addressof(this->rights_id_cache))); break; default: - R_TRY(content_storage->Initialize(root->path, MakeSha256HierarchicalContentFilePath_ForFat16KCluster, MakeSha256HierarchicalPlaceHolderFilePath_ForFat16KCluster, false, std::addressof(this->rights_id_cache))); + R_TRY(content_storage->GetImpl().Initialize(root->path, MakeSha256HierarchicalContentFilePath_ForFat16KCluster, MakeSha256HierarchicalPlaceHolderFilePath_ForFat16KCluster, false, std::addressof(this->rights_id_cache))); break; } @@ -617,7 +617,7 @@ namespace ams::ncm { R_TRY(root->kvs->Initialize(root->max_content_metas, root->memory_resource)); /* Create an on memory content meta database for game cards. */ - root->content_meta_database = std::make_shared(std::addressof(*root->kvs)); + root->content_meta_database = sf::MakeShared(std::addressof(*root->kvs)); } else { /* Mount save data for this root. */ R_TRY(fs::MountSystemSaveData(root->mount_name, root->info.space_id, root->info.id)); @@ -630,7 +630,7 @@ namespace ams::ncm { R_TRY(root->kvs->Load()); /* Create the content meta database. */ - root->content_meta_database = std::make_shared(std::addressof(*root->kvs), root->mount_name); + root->content_meta_database = sf::MakeShared(std::addressof(*root->kvs), root->mount_name); mount_guard.Cancel(); } diff --git a/libraries/libstratosphere/source/ncm/ncm_content_meta_database_impl_base.hpp b/libraries/libstratosphere/source/ncm/ncm_content_meta_database_impl_base.hpp index 171180622..21945868c 100644 --- a/libraries/libstratosphere/source/ncm/ncm_content_meta_database_impl_base.hpp +++ b/libraries/libstratosphere/source/ncm/ncm_content_meta_database_impl_base.hpp @@ -18,7 +18,7 @@ namespace ams::ncm { - class ContentMetaDatabaseImplBase : public IContentMetaDatabase { + class ContentMetaDatabaseImplBase { NON_COPYABLE(ContentMetaDatabaseImplBase); NON_MOVEABLE(ContentMetaDatabaseImplBase); protected: @@ -52,6 +52,32 @@ namespace ams::ncm { R_TRY(this->GetContentMetaSize(out_size, key)); return this->kvs->GetValuePointer(reinterpret_cast(out_value_ptr), key); } + public: + /* Actual commands. */ + virtual Result Set(const ContentMetaKey &key, sf::InBuffer value) = 0; + virtual Result Get(sf::Out out_size, const ContentMetaKey &key, sf::OutBuffer out_value) = 0; + virtual Result Remove(const ContentMetaKey &key) = 0; + virtual Result GetContentIdByType(sf::Out out_content_id, const ContentMetaKey &key, ContentType type) = 0; + virtual Result ListContentInfo(sf::Out out_entries_written, const sf::OutArray &out_info, const ContentMetaKey &key, s32 offset) = 0; + virtual Result List(sf::Out out_entries_total, sf::Out out_entries_written, const sf::OutArray &out_info, ContentMetaType meta_type, ApplicationId application_id, u64 min, u64 max, ContentInstallType install_type) = 0; + virtual Result GetLatestContentMetaKey(sf::Out out_key, u64 id) = 0; + virtual Result ListApplication(sf::Out out_entries_total, sf::Out out_entries_written, const sf::OutArray &out_keys, ContentMetaType meta_type) = 0; + virtual Result Has(sf::Out out, const ContentMetaKey &key) = 0; + virtual Result HasAll(sf::Out out, const sf::InArray &keys) = 0; + virtual Result GetSize(sf::Out out_size, const ContentMetaKey &key) = 0; + virtual Result GetRequiredSystemVersion(sf::Out out_version, const ContentMetaKey &key) = 0; + virtual Result GetPatchId(sf::Out out_patch_id, const ContentMetaKey &key) = 0; + virtual Result DisableForcibly() = 0; + virtual Result LookupOrphanContent(const sf::OutArray &out_orphaned, const sf::InArray &content_ids) = 0; + virtual Result Commit() = 0; + virtual Result HasContent(sf::Out out, const ContentMetaKey &key, const ContentId &content_id) = 0; + virtual Result ListContentMetaInfo(sf::Out out_entries_written, const sf::OutArray &out_meta_info, const ContentMetaKey &key, s32 offset) = 0; + virtual Result GetAttributes(sf::Out out_attributes, const ContentMetaKey &key) = 0; + virtual Result GetRequiredApplicationVersion(sf::Out out_version, const ContentMetaKey &key) = 0; + virtual Result GetContentIdByTypeAndIdOffset(sf::Out out_content_id, const ContentMetaKey &key, ContentType type, u8 id_offset) = 0; + virtual Result GetCount(sf::Out out_count) = 0; + virtual Result GetOwnerApplicationId(sf::Out out_id, const ContentMetaKey &key) = 0; }; + static_assert(ncm::IsIContentMetaDatabase); } diff --git a/libraries/libstratosphere/source/ncm/ncm_content_meta_utils.cpp b/libraries/libstratosphere/source/ncm/ncm_content_meta_utils.cpp index e4c77ad70..59639e1af 100644 --- a/libraries/libstratosphere/source/ncm/ncm_content_meta_utils.cpp +++ b/libraries/libstratosphere/source/ncm/ncm_content_meta_utils.cpp @@ -26,12 +26,18 @@ namespace ams::ncm { return impl::PathView(name).HasSuffix(".cnmt"); } + Result MountContentMetaByRemoteFileSystemProxy(const char *mount_name, const char *path) { + return fs::MountContent(mount_name, path, fs::ContentType_Meta); + } + + constinit MountContentMetaFunction g_mount_content_meta_func = MountContentMetaByRemoteFileSystemProxy; + } Result ReadContentMetaPath(AutoBuffer *out, const char *path) { /* Mount the content. */ auto mount_name = impl::CreateUniqueMountName(); - R_TRY(fs::MountContent(mount_name.str, path, fs::ContentType_Meta)); + R_TRY(g_mount_content_meta_func(mount_name.str, path)); ON_SCOPE_EXIT { fs::Unmount(mount_name.str); }; /* Open the root directory. */ @@ -80,7 +86,7 @@ namespace ams::ncm { Result ReadVariationContentMetaInfoList(s32 *out_count, std::unique_ptr *out_meta_infos, const Path &path, FirmwareVariationId firmware_variation_id) { AutoBuffer meta; { - /* TODO: fs::ScopedAutoAbortDisabler aad; */ + fs::ScopedAutoAbortDisabler aad; R_TRY(ReadContentMetaPath(std::addressof(meta), path.str)); } @@ -88,8 +94,7 @@ namespace ams::ncm { PackagedContentMetaReader reader(meta.Get(), meta.GetSize()); /* Define a helper to output the base meta infos. */ - /* TODO: C++20 ALWAYS_INLINE_LAMBDA */ - const auto ReadMetaInfoListFromBase = [&]() -> Result { + const auto ReadMetaInfoListFromBase = [&] ALWAYS_INLINE_LAMBDA () -> Result { /* Output the base content meta info count. */ *out_count = reader.GetContentMetaCount(); @@ -113,22 +118,39 @@ namespace ams::ncm { SystemUpdateMetaExtendedDataReader extended_data_reader(reader.GetExtendedData(), reader.GetExtendedDataSize()); std::optional firmware_variation_index = std::nullopt; + /* NOTE: Atmosphere extension to support downgrading. */ + /* If all firmware variations refer to base, don't require the current variation be present. */ + bool force_refer_to_base = true; + /* Find the input firmware variation id. */ for (size_t i = 0; i < extended_data_reader.GetFirmwareVariationCount(); i++) { if (*extended_data_reader.GetFirmwareVariationId(i) == firmware_variation_id) { firmware_variation_index = i; break; + } else { + /* Check if the current variation refers to base. */ + const FirmwareVariationInfo *cur_variation_info = extended_data_reader.GetFirmwareVariationInfo(i); + const bool cur_refers_to_base = extended_data_reader.GetHeader()->version == 1 || cur_variation_info->refer_to_base; + + /* We force referral to base on unsupported variation only if all supported variations refer to base. */ + force_refer_to_base &= cur_refers_to_base; } } /* We couldn't find the input firmware variation id. */ - R_UNLESS(firmware_variation_index, ncm::ResultInvalidFirmwareVariation()); + if (!firmware_variation_index) { + /* Unless we can force a referral to base, the firmware isn't supported. */ + R_UNLESS(force_refer_to_base, ncm::ResultInvalidFirmwareVariation()); + + /* Force a referral to base. */ + return ReadMetaInfoListFromBase(); + } /* Obtain the variation info. */ const FirmwareVariationInfo *variation_info = extended_data_reader.GetFirmwareVariationInfo(*firmware_variation_index); - /* Refer to base if variation info says we should, or if unk is 1 (unk is usually 2, probably a version). */ - const bool refer_to_base = variation_info->refer_to_base || extended_data_reader.GetHeader()->unk == 1; + /* Refer to base if variation info says we should, or if version is 1. */ + const bool refer_to_base = extended_data_reader.GetHeader()->version == 1 || variation_info->refer_to_base; R_UNLESS(!refer_to_base, ReadMetaInfoListFromBase()); /* Output the content meta count. */ @@ -156,4 +178,8 @@ namespace ams::ncm { return ResultSuccess(); } + void SetMountContentMetaFunction(MountContentMetaFunction func) { + g_mount_content_meta_func = func; + } + } diff --git a/libraries/libstratosphere/source/ncm/ncm_content_storage_impl_base.hpp b/libraries/libstratosphere/source/ncm/ncm_content_storage_impl_base.hpp index 7991eeb2b..3c094dba8 100644 --- a/libraries/libstratosphere/source/ncm/ncm_content_storage_impl_base.hpp +++ b/libraries/libstratosphere/source/ncm/ncm_content_storage_impl_base.hpp @@ -18,7 +18,7 @@ namespace ams::ncm { - class ContentStorageImplBase : public IContentStorage { + class ContentStorageImplBase { NON_COPYABLE(ContentStorageImplBase); NON_MOVEABLE(ContentStorageImplBase); protected: @@ -43,6 +43,39 @@ namespace ams::ncm { } return ResultSuccess(); } + public: + /* Actual commands. */ + virtual Result GeneratePlaceHolderId(sf::Out out) = 0; + virtual Result CreatePlaceHolder(PlaceHolderId placeholder_id, ContentId content_id, s64 size) = 0; + virtual Result DeletePlaceHolder(PlaceHolderId placeholder_id) = 0; + virtual Result HasPlaceHolder(sf::Out out, PlaceHolderId placeholder_id) = 0; + virtual Result WritePlaceHolder(PlaceHolderId placeholder_id, s64 offset, sf::InBuffer data) = 0; + virtual Result Register(PlaceHolderId placeholder_id, ContentId content_id) = 0; + virtual Result Delete(ContentId content_id) = 0; + virtual Result Has(sf::Out out, ContentId content_id) = 0; + virtual Result GetPath(sf::Out out, ContentId content_id) = 0; + virtual Result GetPlaceHolderPath(sf::Out out, PlaceHolderId placeholder_id) = 0; + virtual Result CleanupAllPlaceHolder() = 0; + virtual Result ListPlaceHolder(sf::Out out_count, const sf::OutArray &out_buf) = 0; + virtual Result GetContentCount(sf::Out out_count) = 0; + virtual Result ListContentId(sf::Out out_count, const sf::OutArray &out_buf, s32 start_offset) = 0; + virtual Result GetSizeFromContentId(sf::Out out_size, ContentId content_id) = 0; + virtual Result DisableForcibly() = 0; + virtual Result RevertToPlaceHolder(PlaceHolderId placeholder_id, ContentId old_content_id, ContentId new_content_id) = 0; + virtual Result SetPlaceHolderSize(PlaceHolderId placeholder_id, s64 size) = 0; + virtual Result ReadContentIdFile(sf::OutBuffer buf, ContentId content_id, s64 offset) = 0; + virtual Result GetRightsIdFromPlaceHolderIdDeprecated(sf::Out out_rights_id, PlaceHolderId placeholder_id) = 0; + virtual Result GetRightsIdFromPlaceHolderId(sf::Out out_rights_id, PlaceHolderId placeholder_id) = 0; + virtual Result GetRightsIdFromContentIdDeprecated(sf::Out out_rights_id, ContentId content_id) = 0; + virtual Result GetRightsIdFromContentId(sf::Out out_rights_id, ContentId content_id) = 0; + virtual Result WriteContentForDebug(ContentId content_id, s64 offset, sf::InBuffer data) = 0; + virtual Result GetFreeSpaceSize(sf::Out out_size) = 0; + virtual Result GetTotalSpaceSize(sf::Out out_size) = 0; + virtual Result FlushPlaceHolder() = 0; + virtual Result GetSizeFromPlaceHolderId(sf::Out out, PlaceHolderId placeholder_id) = 0; + virtual Result RepairInvalidFileAttribute() = 0; + virtual Result GetRightsIdFromPlaceHolderIdWithCache(sf::Out out_rights_id, PlaceHolderId placeholder_id, ContentId cache_content_id) = 0; }; + static_assert(ncm::IsIContentStorage); } diff --git a/libraries/libstratosphere/source/ncm/ncm_install_task_base.cpp b/libraries/libstratosphere/source/ncm/ncm_install_task_base.cpp index 506e89fac..e7e24e4ec 100644 --- a/libraries/libstratosphere/source/ncm/ncm_install_task_base.cpp +++ b/libraries/libstratosphere/source/ncm/ncm_install_task_base.cpp @@ -593,7 +593,7 @@ namespace ams::ncm { R_UNLESS(std::memcmp(hash, content_info->digest.data, crypto::Sha256Generator::HashSize) == 0, ncm::ResultInvalidContentHash()); } - if (!(this->config & InstallConfig_IgnoreTicket)) { + if (hos::GetVersion() >= hos::Version_2_0_0 && !(this->config & InstallConfig_IgnoreTicket)) { ncm::RightsId rights_id; { /* Open the content storage and obtain the rights id. */ @@ -1000,7 +1000,7 @@ namespace ams::ncm { Result InstallTaskBase::GetInstallContentMetaDataFromPath(AutoBuffer *out, const Path &path, const InstallContentInfo &content_info, std::optional source_version) { AutoBuffer meta; { - /* TODO: fs::ScopedAutoAbortDisabler aad; */ + fs::ScopedAutoAbortDisabler aad; R_TRY(ReadContentMetaPath(std::addressof(meta), path.str)); } diff --git a/libraries/libstratosphere/source/ncm/ncm_package_system_update_task.cpp b/libraries/libstratosphere/source/ncm/ncm_package_system_update_task.cpp index 881427225..9e428aa42 100644 --- a/libraries/libstratosphere/source/ncm/ncm_package_system_update_task.cpp +++ b/libraries/libstratosphere/source/ncm/ncm_package_system_update_task.cpp @@ -58,7 +58,7 @@ namespace ams::ncm { R_TRY(this->data.Initialize(context_path)); /* Initialize PackageInstallTaskBase. */ - u32 config = requires_exfat_driver ? InstallConfig_SystemUpdate : InstallConfig_SystemUpdate | InstallConfig_RequiresExFatDriver; + u32 config = !requires_exfat_driver ? InstallConfig_SystemUpdate : InstallConfig_SystemUpdate | InstallConfig_RequiresExFatDriver; R_TRY(PackageInstallTaskBase::Initialize(package_root, buffer, buffer_size, StorageId::BuiltInSystem, std::addressof(this->data), config)); /* Cancel guards. */ diff --git a/libraries/libstratosphere/source/ncm/ncm_remote_content_manager_impl.hpp b/libraries/libstratosphere/source/ncm/ncm_remote_content_manager_impl.hpp index 5ff6ca8d5..59577f7ae 100644 --- a/libraries/libstratosphere/source/ncm/ncm_remote_content_manager_impl.hpp +++ b/libraries/libstratosphere/source/ncm/ncm_remote_content_manager_impl.hpp @@ -20,80 +20,81 @@ namespace ams::ncm { - class RemoteContentManagerImpl final : public IContentManager { + class RemoteContentManagerImpl final { public: RemoteContentManagerImpl() { /* ... */ } ~RemoteContentManagerImpl() { /* ... */ } public: - virtual Result CreateContentStorage(StorageId storage_id) override { + Result CreateContentStorage(StorageId storage_id) { return ::ncmCreateContentStorage(static_cast(storage_id)); } - virtual Result CreateContentMetaDatabase(StorageId storage_id) override { + Result CreateContentMetaDatabase(StorageId storage_id) { return ::ncmCreateContentMetaDatabase(static_cast(storage_id)); } - virtual Result VerifyContentStorage(StorageId storage_id) override { + Result VerifyContentStorage(StorageId storage_id) { return ::ncmVerifyContentStorage(static_cast(storage_id)); } - virtual Result VerifyContentMetaDatabase(StorageId storage_id) override { + Result VerifyContentMetaDatabase(StorageId storage_id) { return ::ncmVerifyContentMetaDatabase(static_cast(storage_id)); } - virtual Result OpenContentStorage(sf::Out> out, StorageId storage_id) override { + Result OpenContentStorage(sf::Out> out, StorageId storage_id) { NcmContentStorage cs; R_TRY(::ncmOpenContentStorage(std::addressof(cs), static_cast(storage_id))); - out.SetValue(std::make_shared(cs)); + out.SetValue(sf::MakeShared(cs)); return ResultSuccess(); } - virtual Result OpenContentMetaDatabase(sf::Out> out, StorageId storage_id) override { + Result OpenContentMetaDatabase(sf::Out> out, StorageId storage_id) { NcmContentMetaDatabase db; R_TRY(::ncmOpenContentMetaDatabase(std::addressof(db), static_cast(storage_id))); - out.SetValue(std::make_shared(db)); + out.SetValue(sf::MakeShared(db)); return ResultSuccess(); } - virtual Result CloseContentStorageForcibly(StorageId storage_id) override { + Result CloseContentStorageForcibly(StorageId storage_id) { return ::ncmCloseContentStorageForcibly(static_cast(storage_id)); } - virtual Result CloseContentMetaDatabaseForcibly(StorageId storage_id) override { + Result CloseContentMetaDatabaseForcibly(StorageId storage_id) { return ::ncmCloseContentMetaDatabaseForcibly(static_cast(storage_id)); } - virtual Result CleanupContentMetaDatabase(StorageId storage_id) override { + Result CleanupContentMetaDatabase(StorageId storage_id) { return ::ncmCleanupContentMetaDatabase(static_cast(storage_id)); } - virtual Result ActivateContentStorage(StorageId storage_id) override { + Result ActivateContentStorage(StorageId storage_id) { return ::ncmActivateContentStorage(static_cast(storage_id)); } - virtual Result InactivateContentStorage(StorageId storage_id) override { + Result InactivateContentStorage(StorageId storage_id) { return ::ncmInactivateContentStorage(static_cast(storage_id)); } - virtual Result ActivateContentMetaDatabase(StorageId storage_id) override { + Result ActivateContentMetaDatabase(StorageId storage_id) { return ::ncmActivateContentMetaDatabase(static_cast(storage_id)); } - virtual Result InactivateContentMetaDatabase(StorageId storage_id) override { + Result InactivateContentMetaDatabase(StorageId storage_id) { return ::ncmInactivateContentMetaDatabase(static_cast(storage_id)); } - virtual Result InvalidateRightsIdCache() override { + Result InvalidateRightsIdCache() { return ::ncmInvalidateRightsIdCache(); } - virtual Result GetMemoryReport(sf::Out out) override { + Result GetMemoryReport(sf::Out out) { /* TODO: libnx bindings */ AMS_ABORT(); } }; + static_assert(ncm::IsIContentManager); } diff --git a/libraries/libstratosphere/source/ncm/ncm_remote_content_meta_database_impl.hpp b/libraries/libstratosphere/source/ncm/ncm_remote_content_meta_database_impl.hpp index 98fc25068..14935f465 100644 --- a/libraries/libstratosphere/source/ncm/ncm_remote_content_meta_database_impl.hpp +++ b/libraries/libstratosphere/source/ncm/ncm_remote_content_meta_database_impl.hpp @@ -18,7 +18,7 @@ namespace ams::ncm { - class RemoteContentMetaDatabaseImpl final : public IContentMetaDatabase { + class RemoteContentMetaDatabaseImpl final { private: ::NcmContentMetaDatabase srv; public: @@ -71,101 +71,101 @@ namespace ams::ncm { return reinterpret_cast(std::addressof(c)); } public: - virtual Result Set(const ContentMetaKey &key, sf::InBuffer value) override { + Result Set(const ContentMetaKey &key, sf::InBuffer value) { return ncmContentMetaDatabaseSet(std::addressof(this->srv), Convert(key), value.GetPointer(), value.GetSize()); } - virtual Result Get(sf::Out out_size, const ContentMetaKey &key, sf::OutBuffer out_value) override { + Result Get(sf::Out out_size, const ContentMetaKey &key, sf::OutBuffer out_value) { return ncmContentMetaDatabaseGet(std::addressof(this->srv), Convert(key), out_size.GetPointer(), out_value.GetPointer(), out_value.GetSize()); } - virtual Result Remove(const ContentMetaKey &key) override { + Result Remove(const ContentMetaKey &key) { return ncmContentMetaDatabaseRemove(std::addressof(this->srv), Convert(key)); } - virtual Result GetContentIdByType(sf::Out out_content_id, const ContentMetaKey &key, ContentType type) override { + Result GetContentIdByType(sf::Out out_content_id, const ContentMetaKey &key, ContentType type) { return ncmContentMetaDatabaseGetContentIdByType(std::addressof(this->srv), Convert(out_content_id.GetPointer()), Convert(key), static_cast<::NcmContentType>(type)); } - virtual Result ListContentInfo(sf::Out out_entries_written, const sf::OutArray &out_info, const ContentMetaKey &key, s32 offset) override { + Result ListContentInfo(sf::Out out_entries_written, const sf::OutArray &out_info, const ContentMetaKey &key, s32 offset) { return ncmContentMetaDatabaseListContentInfo(std::addressof(this->srv), out_entries_written.GetPointer(), Convert(out_info.GetPointer()), out_info.GetSize(), Convert(key), offset); } - virtual Result List(sf::Out out_entries_total, sf::Out out_entries_written, const sf::OutArray &out_info, ContentMetaType meta_type, ApplicationId application_id, u64 min, u64 max, ContentInstallType install_type) override { + Result List(sf::Out out_entries_total, sf::Out out_entries_written, const sf::OutArray &out_info, ContentMetaType meta_type, ApplicationId application_id, u64 min, u64 max, ContentInstallType install_type) { return ncmContentMetaDatabaseList(std::addressof(this->srv), out_entries_total.GetPointer(), out_entries_written.GetPointer(), Convert(out_info.GetPointer()), out_info.GetSize(), static_cast<::NcmContentMetaType>(meta_type), application_id.value, min, max, static_cast<::NcmContentInstallType>(install_type)); } - virtual Result GetLatestContentMetaKey(sf::Out out_key, u64 id) override { + Result GetLatestContentMetaKey(sf::Out out_key, u64 id) { return ncmContentMetaDatabaseGetLatestContentMetaKey(std::addressof(this->srv), Convert(out_key.GetPointer()), static_cast(id)); } - virtual Result ListApplication(sf::Out out_entries_total, sf::Out out_entries_written, const sf::OutArray &out_keys, ContentMetaType meta_type) override { + Result ListApplication(sf::Out out_entries_total, sf::Out out_entries_written, const sf::OutArray &out_keys, ContentMetaType meta_type) { return ncmContentMetaDatabaseListApplication(std::addressof(this->srv), out_entries_total.GetPointer(), out_entries_written.GetPointer(), Convert(out_keys.GetPointer()), out_keys.GetSize(), static_cast<::NcmContentMetaType>(meta_type)); } - virtual Result Has(sf::Out out, const ContentMetaKey &key) override { + Result Has(sf::Out out, const ContentMetaKey &key) { return ncmContentMetaDatabaseHas(std::addressof(this->srv), out.GetPointer(), Convert(key)); } - virtual Result HasAll(sf::Out out, const sf::InArray &keys) override { + Result HasAll(sf::Out out, const sf::InArray &keys) { return ncmContentMetaDatabaseHasAll(std::addressof(this->srv), out.GetPointer(), Convert(keys.GetPointer()), keys.GetSize()); } - virtual Result GetSize(sf::Out out_size, const ContentMetaKey &key) override { + Result GetSize(sf::Out out_size, const ContentMetaKey &key) { return ncmContentMetaDatabaseGetSize(std::addressof(this->srv), out_size.GetPointer(), Convert(key)); } - virtual Result GetRequiredSystemVersion(sf::Out out_version, const ContentMetaKey &key) override { + Result GetRequiredSystemVersion(sf::Out out_version, const ContentMetaKey &key) { return ncmContentMetaDatabaseGetRequiredSystemVersion(std::addressof(this->srv), out_version.GetPointer(), Convert(key)); } - virtual Result GetPatchId(sf::Out out_patch_id, const ContentMetaKey &key) override { + Result GetPatchId(sf::Out out_patch_id, const ContentMetaKey &key) { return ncmContentMetaDatabaseGetPatchId(std::addressof(this->srv), reinterpret_cast(out_patch_id.GetPointer()), Convert(key)); } - virtual Result DisableForcibly() override { + Result DisableForcibly() { return ncmContentMetaDatabaseDisableForcibly(std::addressof(this->srv)); } - virtual Result LookupOrphanContent(const sf::OutArray &out_orphaned, const sf::InArray &content_ids) override { + Result LookupOrphanContent(const sf::OutArray &out_orphaned, const sf::InArray &content_ids) { return ncmContentMetaDatabaseLookupOrphanContent(std::addressof(this->srv), out_orphaned.GetPointer(), Convert(content_ids.GetPointer()), std::min(out_orphaned.GetSize(), content_ids.GetSize())); } - virtual Result Commit() override { + Result Commit() { return ncmContentMetaDatabaseCommit(std::addressof(this->srv)); } - virtual Result HasContent(sf::Out out, const ContentMetaKey &key, const ContentId &content_id) override { + Result HasContent(sf::Out out, const ContentMetaKey &key, const ContentId &content_id) { return ncmContentMetaDatabaseHasContent(std::addressof(this->srv), out.GetPointer(), Convert(key), Convert(content_id)); } - virtual Result ListContentMetaInfo(sf::Out out_entries_written, const sf::OutArray &out_meta_info, const ContentMetaKey &key, s32 offset) override { + Result ListContentMetaInfo(sf::Out out_entries_written, const sf::OutArray &out_meta_info, const ContentMetaKey &key, s32 offset) { return ncmContentMetaDatabaseListContentMetaInfo(std::addressof(this->srv), out_entries_written.GetPointer(), out_meta_info.GetPointer(), out_meta_info.GetSize(), Convert(key), offset); } - virtual Result GetAttributes(sf::Out out_attributes, const ContentMetaKey &key) override { + Result GetAttributes(sf::Out out_attributes, const ContentMetaKey &key) { static_assert(sizeof(ContentMetaAttribute) == sizeof(u8)); return ncmContentMetaDatabaseGetAttributes(std::addressof(this->srv), Convert(key), out_attributes.GetPointer()); } - virtual Result GetRequiredApplicationVersion(sf::Out out_version, const ContentMetaKey &key) override { + Result GetRequiredApplicationVersion(sf::Out out_version, const ContentMetaKey &key) { return ncmContentMetaDatabaseGetRequiredApplicationVersion(std::addressof(this->srv), out_version.GetPointer(), Convert(key)); } - virtual Result GetContentIdByTypeAndIdOffset(sf::Out out_content_id, const ContentMetaKey &key, ContentType type, u8 id_offset) override { + Result GetContentIdByTypeAndIdOffset(sf::Out out_content_id, const ContentMetaKey &key, ContentType type, u8 id_offset) { return ncmContentMetaDatabaseGetContentIdByTypeAndIdOffset(std::addressof(this->srv), Convert(out_content_id.GetPointer()), Convert(key), static_cast<::NcmContentType>(type), id_offset); } - virtual Result GetCount(sf::Out out_count) override { + Result GetCount(sf::Out out_count) { /* TODO: libnx bindings */ AMS_ABORT(); } - virtual Result GetOwnerApplicationId(sf::Out out_id, const ContentMetaKey &key) override { + Result GetOwnerApplicationId(sf::Out out_id, const ContentMetaKey &key) { /* TODO: libnx bindings */ AMS_ABORT(); } - }; + static_assert(ncm::IsIContentMetaDatabase); } diff --git a/libraries/libstratosphere/source/ncm/ncm_remote_content_storage_impl.hpp b/libraries/libstratosphere/source/ncm/ncm_remote_content_storage_impl.hpp index a7db1d4d9..32465f563 100644 --- a/libraries/libstratosphere/source/ncm/ncm_remote_content_storage_impl.hpp +++ b/libraries/libstratosphere/source/ncm/ncm_remote_content_storage_impl.hpp @@ -18,7 +18,7 @@ namespace ams::ncm { - class RemoteContentStorageImpl final : public IContentStorage { + class RemoteContentStorageImpl final { private: ::NcmContentStorage srv; public: @@ -46,85 +46,85 @@ namespace ams::ncm { return reinterpret_cast<::NcmContentId *>(std::addressof(c)); } public: - virtual Result GeneratePlaceHolderId(sf::Out out) override { + Result GeneratePlaceHolderId(sf::Out out) { return ncmContentStorageGeneratePlaceHolderId(std::addressof(this->srv), Convert(out.GetPointer())); } - virtual Result CreatePlaceHolder(PlaceHolderId placeholder_id, ContentId content_id, s64 size) override { + Result CreatePlaceHolder(PlaceHolderId placeholder_id, ContentId content_id, s64 size) { static_assert(alignof(ContentId) < alignof(PlaceHolderId)); return ncmContentStorageCreatePlaceHolder(std::addressof(this->srv), Convert(content_id), Convert(placeholder_id), size); } - virtual Result DeletePlaceHolder(PlaceHolderId placeholder_id) override { + Result DeletePlaceHolder(PlaceHolderId placeholder_id) { return ncmContentStorageDeletePlaceHolder(std::addressof(this->srv), Convert(placeholder_id)); } - virtual Result HasPlaceHolder(sf::Out out, PlaceHolderId placeholder_id) override { + Result HasPlaceHolder(sf::Out out, PlaceHolderId placeholder_id) { return ncmContentStorageHasPlaceHolder(std::addressof(this->srv), out.GetPointer(), Convert(placeholder_id)); } - virtual Result WritePlaceHolder(PlaceHolderId placeholder_id, s64 offset, sf::InBuffer data) override { + Result WritePlaceHolder(PlaceHolderId placeholder_id, s64 offset, sf::InBuffer data) { return ncmContentStorageWritePlaceHolder(std::addressof(this->srv), Convert(placeholder_id), offset, data.GetPointer(), data.GetSize()); } - virtual Result Register(PlaceHolderId placeholder_id, ContentId content_id) override { + Result Register(PlaceHolderId placeholder_id, ContentId content_id) { static_assert(alignof(ContentId) < alignof(PlaceHolderId)); return ncmContentStorageRegister(std::addressof(this->srv), Convert(content_id), Convert(placeholder_id)); } - virtual Result Delete(ContentId content_id) override { + Result Delete(ContentId content_id) { return ncmContentStorageDelete(std::addressof(this->srv), Convert(content_id)); } - virtual Result Has(sf::Out out, ContentId content_id) override { + Result Has(sf::Out out, ContentId content_id) { return ncmContentStorageHas(std::addressof(this->srv), out.GetPointer(), Convert(content_id)); } - virtual Result GetPath(sf::Out out, ContentId content_id) override { + Result GetPath(sf::Out out, ContentId content_id) { return ncmContentStorageGetPath(std::addressof(this->srv), out.GetPointer()->str, sizeof(out.GetPointer()->str), Convert(content_id)); } - virtual Result GetPlaceHolderPath(sf::Out out, PlaceHolderId placeholder_id) override { + Result GetPlaceHolderPath(sf::Out out, PlaceHolderId placeholder_id) { return ncmContentStorageGetPlaceHolderPath(std::addressof(this->srv), out.GetPointer()->str, sizeof(out.GetPointer()->str), Convert(placeholder_id)); } - virtual Result CleanupAllPlaceHolder() override { + Result CleanupAllPlaceHolder() { return ncmContentStorageCleanupAllPlaceHolder(std::addressof(this->srv)); } - virtual Result ListPlaceHolder(sf::Out out_count, const sf::OutArray &out_buf) override { + Result ListPlaceHolder(sf::Out out_count, const sf::OutArray &out_buf) { return ncmContentStorageListPlaceHolder(std::addressof(this->srv), Convert(out_buf.GetPointer()), out_buf.GetSize(), out_count.GetPointer()); } - virtual Result GetContentCount(sf::Out out_count) override { + Result GetContentCount(sf::Out out_count) { return ncmContentStorageGetContentCount(std::addressof(this->srv), out_count.GetPointer()); } - virtual Result ListContentId(sf::Out out_count, const sf::OutArray &out_buf, s32 offset) override { + Result ListContentId(sf::Out out_count, const sf::OutArray &out_buf, s32 offset) { return ncmContentStorageListContentId(std::addressof(this->srv), Convert(out_buf.GetPointer()), out_buf.GetSize(), out_count.GetPointer(), offset); } - virtual Result GetSizeFromContentId(sf::Out out_size, ContentId content_id) override { + Result GetSizeFromContentId(sf::Out out_size, ContentId content_id) { return ncmContentStorageGetSizeFromContentId(std::addressof(this->srv), out_size.GetPointer(), Convert(content_id)); } - virtual Result DisableForcibly() override { + Result DisableForcibly() { return ncmContentStorageDisableForcibly(std::addressof(this->srv)); } - virtual Result RevertToPlaceHolder(PlaceHolderId placeholder_id, ContentId old_content_id, ContentId new_content_id) override { + Result RevertToPlaceHolder(PlaceHolderId placeholder_id, ContentId old_content_id, ContentId new_content_id) { return ncmContentStorageRevertToPlaceHolder(std::addressof(this->srv), Convert(placeholder_id), Convert(old_content_id), Convert(new_content_id)); } - virtual Result SetPlaceHolderSize(PlaceHolderId placeholder_id, s64 size) override { + Result SetPlaceHolderSize(PlaceHolderId placeholder_id, s64 size) { return ncmContentStorageSetPlaceHolderSize(std::addressof(this->srv), Convert(placeholder_id), size); } - virtual Result ReadContentIdFile(sf::OutBuffer buf, ContentId content_id, s64 offset) override { + Result ReadContentIdFile(sf::OutBuffer buf, ContentId content_id, s64 offset) { return ncmContentStorageReadContentIdFile(std::addressof(this->srv), buf.GetPointer(), buf.GetSize(), Convert(content_id), offset); } - virtual Result GetRightsIdFromPlaceHolderIdDeprecated(sf::Out out_rights_id, PlaceHolderId placeholder_id) override { + Result GetRightsIdFromPlaceHolderIdDeprecated(sf::Out out_rights_id, PlaceHolderId placeholder_id) { ::NcmRightsId rights_id; R_TRY(ncmContentStorageGetRightsIdFromPlaceHolderId(std::addressof(this->srv), std::addressof(rights_id), Convert(placeholder_id))); @@ -133,7 +133,7 @@ namespace ams::ncm { return ResultSuccess(); } - virtual Result GetRightsIdFromPlaceHolderId(sf::Out out_rights_id, PlaceHolderId placeholder_id) override { + Result GetRightsIdFromPlaceHolderId(sf::Out out_rights_id, PlaceHolderId placeholder_id) { ::NcmRightsId rights_id; R_TRY(ncmContentStorageGetRightsIdFromPlaceHolderId(std::addressof(this->srv), std::addressof(rights_id), Convert(placeholder_id))); @@ -142,7 +142,7 @@ namespace ams::ncm { return ResultSuccess(); } - virtual Result GetRightsIdFromContentIdDeprecated(sf::Out out_rights_id, ContentId content_id) override { + Result GetRightsIdFromContentIdDeprecated(sf::Out out_rights_id, ContentId content_id) { ::NcmRightsId rights_id; R_TRY(ncmContentStorageGetRightsIdFromContentId(std::addressof(this->srv), std::addressof(rights_id), Convert(content_id))); @@ -151,7 +151,7 @@ namespace ams::ncm { return ResultSuccess(); } - virtual Result GetRightsIdFromContentId(sf::Out out_rights_id, ContentId content_id) override { + Result GetRightsIdFromContentId(sf::Out out_rights_id, ContentId content_id) { ::NcmRightsId rights_id; R_TRY(ncmContentStorageGetRightsIdFromContentId(std::addressof(this->srv), std::addressof(rights_id), Convert(content_id))); @@ -160,35 +160,36 @@ namespace ams::ncm { return ResultSuccess(); } - virtual Result WriteContentForDebug(ContentId content_id, s64 offset, sf::InBuffer data) override { + Result WriteContentForDebug(ContentId content_id, s64 offset, sf::InBuffer data) { return ncmContentStorageWriteContentForDebug(std::addressof(this->srv), Convert(content_id), offset, data.GetPointer(), data.GetSize()); } - virtual Result GetFreeSpaceSize(sf::Out out_size) override { + Result GetFreeSpaceSize(sf::Out out_size) { return ncmContentStorageGetFreeSpaceSize(std::addressof(this->srv), out_size.GetPointer()); } - virtual Result GetTotalSpaceSize(sf::Out out_size) override { + Result GetTotalSpaceSize(sf::Out out_size) { return ncmContentStorageGetTotalSpaceSize(std::addressof(this->srv), out_size.GetPointer()); } - virtual Result FlushPlaceHolder() override { + Result FlushPlaceHolder() { return ncmContentStorageFlushPlaceHolder(std::addressof(this->srv)); } - virtual Result GetSizeFromPlaceHolderId(sf::Out out_size, PlaceHolderId placeholder_id) override { + Result GetSizeFromPlaceHolderId(sf::Out out_size, PlaceHolderId placeholder_id) { return ncmContentStorageGetSizeFromPlaceHolderId(std::addressof(this->srv), out_size.GetPointer(), Convert(placeholder_id)); } - virtual Result RepairInvalidFileAttribute() override { + Result RepairInvalidFileAttribute() { return ncmContentStorageRepairInvalidFileAttribute(std::addressof(this->srv)); } - virtual Result GetRightsIdFromPlaceHolderIdWithCache(sf::Out out_rights_id, PlaceHolderId placeholder_id, ContentId cache_content_id) override { + Result GetRightsIdFromPlaceHolderIdWithCache(sf::Out out_rights_id, PlaceHolderId placeholder_id, ContentId cache_content_id) { static_assert(sizeof(::NcmRightsId) == sizeof(ncm::RightsId)); ::NcmRightsId *out = reinterpret_cast<::NcmRightsId *>(out_rights_id.GetPointer()); return ncmContentStorageGetRightsIdFromPlaceHolderIdWithCache(std::addressof(this->srv), out, Convert(placeholder_id), Convert(cache_content_id)); } }; + static_assert(ncm::IsIContentStorage); } diff --git a/libraries/libstratosphere/source/os/impl/os_thread_manager.cpp b/libraries/libstratosphere/source/os/impl/os_thread_manager.cpp index 8fee951ae..d58c4c321 100644 --- a/libraries/libstratosphere/source/os/impl/os_thread_manager.cpp +++ b/libraries/libstratosphere/source/os/impl/os_thread_manager.cpp @@ -41,6 +41,9 @@ namespace ams::os::impl { thread->name_buffer[0] = '\x00'; thread->name_pointer = thread->name_buffer; + /* Set internal tls variables. */ + thread->atomic_sf_inline_context = 0; + /* Mark initialized. */ thread->state = ThreadType::State_Initialized; } diff --git a/libraries/libstratosphere/source/os/impl/os_transfer_memory_impl.hpp b/libraries/libstratosphere/source/os/impl/os_transfer_memory_impl.hpp new file mode 100644 index 000000000..39d18a5a6 --- /dev/null +++ b/libraries/libstratosphere/source/os/impl/os_transfer_memory_impl.hpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::os::impl { + + class TransferMemoryImpl { + public: + static Result Create(Handle *out, void *address, size_t size, MemoryPermission perm); + static void Close(Handle handle); + + static Result Map(void **out, Handle handle, void *address, size_t size, MemoryPermission owner_perm); + static void Unmap(Handle handle, void *address, size_t size); + }; + +} \ No newline at end of file diff --git a/libraries/libstratosphere/source/os/impl/os_transfer_memory_impl.os.horizon.cpp b/libraries/libstratosphere/source/os/impl/os_transfer_memory_impl.os.horizon.cpp new file mode 100644 index 000000000..d67a87b79 --- /dev/null +++ b/libraries/libstratosphere/source/os/impl/os_transfer_memory_impl.os.horizon.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "os_transfer_memory_impl.hpp" + +namespace ams::os::impl { + + namespace { + + svc::MemoryPermission ConvertToSvcMemoryPermission(os::MemoryPermission perm) { + switch (perm) { + case os::MemoryPermission_None: return svc::MemoryPermission_None; + case os::MemoryPermission_ReadOnly: return svc::MemoryPermission_Read; + case os::MemoryPermission_WriteOnly: return svc::MemoryPermission_Write; + case os::MemoryPermission_ReadWrite: return svc::MemoryPermission_ReadWrite; + AMS_UNREACHABLE_DEFAULT_CASE(); + } + } + + } + + Result TransferMemoryImpl::Create(Handle *out, void *address, size_t size, MemoryPermission perm) { + /* Convert memory permission. */ + auto svc_perm = ConvertToSvcMemoryPermission(perm); + + /* Create the memory. */ + svc::Handle handle; + R_TRY_CATCH(svc::CreateTransferMemory(std::addressof(handle), reinterpret_cast(address), size, svc_perm)) { + R_CONVERT(svc::ResultOutOfHandles, os::ResultOutOfHandles()) + R_CONVERT(svc::ResultOutOfResource, os::ResultOutOfTransferMemory()) + } R_END_TRY_CATCH_WITH_ABORT_UNLESS; + + *out = handle; + return ResultSuccess(); + } + + void TransferMemoryImpl::Close(Handle handle) { + R_ABORT_UNLESS(svc::CloseHandle(handle)); + } + + Result TransferMemoryImpl::Map(void **out, Handle handle, void *address, size_t size, MemoryPermission owner_perm) { + AMS_ASSERT(address != nullptr); + + /* Convert memory permission. */ + auto svc_owner_perm = ConvertToSvcMemoryPermission(owner_perm); + + /* Map the memory. */ + R_TRY_CATCH(svc::MapTransferMemory(handle, reinterpret_cast(address), size, svc_owner_perm)) { + R_CONVERT(svc::ResultInvalidHandle, os::ResultInvalidHandle()) + R_CONVERT(svc::ResultInvalidSize, os::ResultInvalidTransferMemorySize()) + R_CONVERT(svc::ResultInvalidState, os::ResultInvalidTransferMemoryState()) + R_CONVERT(svc::ResultInvalidCurrentMemory, os::ResultInvalidCurrentMemoryState()) + R_CONVERT(svc::ResultInvalidMemoryRegion, os::ResultInvalidCurrentMemoryState()) + } R_END_TRY_CATCH_WITH_ABORT_UNLESS; + + *out = address; + return ResultSuccess(); + } + + void TransferMemoryImpl::Unmap(Handle handle, void *address, size_t size) { + R_ABORT_UNLESS(svc::UnmapTransferMemory(handle, reinterpret_cast(address), size)); + } + +} diff --git a/libraries/libstratosphere/source/os/impl/os_waitable_manager_target_impl.os.horizon.hpp b/libraries/libstratosphere/source/os/impl/os_waitable_manager_target_impl.os.horizon.hpp index 03918c95f..46c13d3b7 100644 --- a/libraries/libstratosphere/source/os/impl/os_waitable_manager_target_impl.os.horizon.hpp +++ b/libraries/libstratosphere/source/os/impl/os_waitable_manager_target_impl.os.horizon.hpp @@ -21,7 +21,7 @@ namespace ams::os::impl { class WaitableManagerHorizonImpl { public: - static constexpr size_t MaximumHandleCount = svc::MaxWaitSynchronizationHandleCount; + static constexpr size_t MaximumHandleCount = static_cast(ams::svc::ArgumentHandleCountMax); private: Handle handle; private: diff --git a/libraries/libstratosphere/source/os/os_sdk_thread_local_storage_api.cpp b/libraries/libstratosphere/source/os/os_sdk_thread_local_storage_api.cpp new file mode 100644 index 000000000..f7bb5d291 --- /dev/null +++ b/libraries/libstratosphere/source/os/os_sdk_thread_local_storage_api.cpp @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::os { + + /* TODO: Nintendo reserves half the TLS slots for SDK usage. */ + /* We don't have that ability...how should this work? */ + Result SdkAllocateTlsSlot(TlsSlot *out, TlsDestructor destructor) { + return os::AllocateTlsSlot(out, destructor); + } + +} diff --git a/libraries/libstratosphere/source/os/os_transfer_memory_api.cpp b/libraries/libstratosphere/source/os/os_transfer_memory_api.cpp new file mode 100644 index 000000000..747e1492e --- /dev/null +++ b/libraries/libstratosphere/source/os/os_transfer_memory_api.cpp @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "impl/os_thread_manager.hpp" +#include "impl/os_transfer_memory_impl.hpp" + +namespace ams::os { + + namespace { + + Result MapTransferMemoryWithAddressUnsafe(TransferMemoryType *tmem, void *address, os::MemoryPermission owner_perm) { + /* Map the transfer memory. */ + void *mapped_address = nullptr; + R_TRY(impl::TransferMemoryImpl::Map(std::addressof(mapped_address), tmem->handle, address, tmem->size, owner_perm)); + + /* Set fields now that we've mapped. */ + tmem->address = mapped_address; + tmem->state = TransferMemoryType::State_Mapped; + + return ResultSuccess(); + } + + inline void SetupTransferMemoryType(TransferMemoryType *tmem, size_t size, Handle handle, bool managed) { + /* Set members. */ + tmem->handle = handle; + tmem->size = size; + tmem->address = nullptr; + tmem->allocated = false; + + /* Set managed. */ + tmem->handle_managed = managed; + + /* Create the critical section. */ + new (GetPointer(tmem->cs_transfer_memory)) impl::InternalCriticalSection; + } + + } + + Result CreateTransferMemory(TransferMemoryType *tmem, void *address, size_t size, MemoryPermission perm) { + /* Validate pre-conditions. */ + AMS_ASSERT(size > 0); + AMS_ASSERT(util::IsAligned(size, os::MemoryPageSize)); + AMS_ASSERT(address != nullptr); + AMS_ASSERT(util::IsAligned(reinterpret_cast(address), os::MemoryPageSize)); + + /* Create the memory. */ + Handle handle; + R_TRY(impl::TransferMemoryImpl::Create(std::addressof(handle), address, size, perm)); + + /* Setup the object. */ + SetupTransferMemoryType(tmem, size, handle, true); + + return ResultSuccess(); + } + + Result AttachTransferMemory(TransferMemoryType *tmem, size_t size, Handle handle, bool managed) { + AMS_ASSERT(size > 0); + AMS_ASSERT(util::IsAligned(size, os::MemoryPageSize)); + AMS_ASSERT(handle != svc::InvalidHandle); + + /* Setup the object. */ + SetupTransferMemoryType(tmem, size, handle, managed); + + return ResultSuccess(); + } + + Handle DetachTransferMemory(TransferMemoryType *tmem) { + AMS_ASSERT(tmem->state == TransferMemoryType::State_Created); + + /* Set state to detached. */ + tmem->state = TransferMemoryType::State_Detached; + + /* Clear handle. */ + Handle handle = tmem->handle; + + tmem->handle = svc::InvalidHandle; + tmem->handle_managed = false; + + return handle; + } + + void DestroyTransferMemory(TransferMemoryType *tmem) { + /* Unmap the transfer memory, if required. */ + if (tmem->state == TransferMemoryType::State_Mapped) { + UnmapTransferMemory(tmem); + } + + /* Check the state is valid. */ + AMS_ASSERT(tmem->state == TransferMemoryType::State_Created || tmem->state == TransferMemoryType::State_Detached); + + /* Set state to not initialized. */ + tmem->state = TransferMemoryType::State_NotInitialized; + + /* Close the handle, if it's managed. */ + if (tmem->handle_managed) { + impl::TransferMemoryImpl::Close(tmem->handle); + } + tmem->handle_managed = false; + + /* Clear members. */ + tmem->address = nullptr; + tmem->size = 0; + tmem->handle = svc::InvalidHandle; + + /* Destroy the critical section. */ + GetReference(tmem->cs_transfer_memory).~InternalCriticalSection(); + } + + Result MapTransferMemory(void **out, TransferMemoryType *tmem, MemoryPermission owner_perm) { + /* Lock the current thread, and then the transfer memory. */ + std::scoped_lock thread_lk(GetReference(impl::GetCurrentThread()->cs_thread)); + std::scoped_lock lk(GetReference(tmem->cs_transfer_memory)); + + /* Ensure we're in a mappable state. */ + AMS_ASSERT(tmem->state == TransferMemoryType::State_Created); + + /* Try to map up to 64 times. */ + for (int i = 0; i < 64; ++i) { + /* Reserve space to map the memory. */ + /* TODO: os::AslrSpaceManager */ + void *map_address = ::virtmemReserve(tmem->size); + R_UNLESS(map_address != nullptr, os::ResultOutOfAddressSpace()); + + /* Mark allocated. */ + tmem->allocated = true; + auto alloc_guard = SCOPE_GUARD { tmem->allocated = false; }; + + /* Try to map. */ + R_TRY_CATCH(MapTransferMemoryWithAddressUnsafe(tmem, map_address, owner_perm)) { + /* If we failed to map at the address, retry. */ + R_CATCH(os::ResultInvalidCurrentMemoryState) { continue; } + } R_END_TRY_CATCH; + + /* TODO: Check guard space via aslr manager. */ + if (false /* !impl::GetAslrSpaceManager()->CheckGuardSpace(reinterpret_cast(tmem->address), tmem->size) */) { + impl::TransferMemoryImpl::Unmap(tmem->handle, tmem->address, tmem->size); + continue; + } + + /* We mapped successfully. */ + alloc_guard.Cancel(); + *out = tmem->address; + return ResultSuccess(); + } + + /* We failed to map. */ + return os::ResultOutOfAddressSpace(); + } + + void UnmapTransferMemory(TransferMemoryType *tmem) { + /* Lock the memory. */ + std::scoped_lock lk(GetReference(tmem->cs_transfer_memory)); + + /* If the memory isn't mapped, we can't unmap it. */ + if (tmem->state != TransferMemoryType::State_Mapped) { + return; + } + + /* Unmap the memory. */ + impl::TransferMemoryImpl::Unmap(tmem->handle, tmem->address, tmem->size); + + /* Unmapped memory is necessarily not allocated. */ + if (tmem->allocated) { + tmem->allocated = false; + } + + /* Clear the address. */ + tmem->address = nullptr; + tmem->state = TransferMemoryType::State_Created; + } + +} diff --git a/libraries/libstratosphere/source/patcher/patcher_api.cpp b/libraries/libstratosphere/source/patcher/patcher_api.cpp index c7ec0ab9b..d13fabbaa 100644 --- a/libraries/libstratosphere/source/patcher/patcher_api.cpp +++ b/libraries/libstratosphere/source/patcher/patcher_api.cpp @@ -237,10 +237,7 @@ namespace ams::patcher { } /* Print the path for this directory. */ -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wformat-truncation" std::snprintf(path + patches_dir_path_len, sizeof(path) - patches_dir_path_len, "/%s", entry.name); -#pragma GCC diagnostic pop const size_t patch_dir_path_len = patches_dir_path_len + 1 + std::strlen(entry.name); /* Open the patch directory. */ diff --git a/libraries/libstratosphere/source/pgl/pgl_remote_event_observer.hpp b/libraries/libstratosphere/source/pgl/pgl_remote_event_observer.hpp index c7cf0d844..418ec6e99 100644 --- a/libraries/libstratosphere/source/pgl/pgl_remote_event_observer.hpp +++ b/libraries/libstratosphere/source/pgl/pgl_remote_event_observer.hpp @@ -18,28 +18,29 @@ namespace ams::pgl { - class RemoteEventObserver final : public pgl::sf::IEventObserver { + class RemoteEventObserver final { NON_COPYABLE(RemoteEventObserver); NON_MOVEABLE(RemoteEventObserver); private: ::PglEventObserver observer; public: constexpr RemoteEventObserver(const ::PglEventObserver &o) : observer(o) { /* ... */ } - virtual ~RemoteEventObserver() override { + ~RemoteEventObserver() { ::pglEventObserverClose(std::addressof(this->observer)); } - virtual Result GetProcessEventHandle(ams::sf::OutCopyHandle out) override { + Result GetProcessEventHandle(ams::sf::OutCopyHandle out) { ::Event ev; R_TRY(::pglEventObserverGetProcessEvent(std::addressof(this->observer), std::addressof(ev))); out.SetValue(ev.revent); return ResultSuccess(); } - virtual Result GetProcessEventInfo(ams::sf::Out out) override { + Result GetProcessEventInfo(ams::sf::Out out) { static_assert(sizeof(*out.GetPointer()) == sizeof(::PmProcessEventInfo)); return ::pglEventObserverGetProcessEventInfo(std::addressof(this->observer), reinterpret_cast<::PmProcessEventInfo *>(out.GetPointer())); } }; + static_assert(pgl::sf::IsIEventObserver); } \ No newline at end of file diff --git a/libraries/libstratosphere/source/pgl/pgl_shell_api.cpp b/libraries/libstratosphere/source/pgl/pgl_shell_api.cpp index 66c2bc2d0..742292091 100644 --- a/libraries/libstratosphere/source/pgl/pgl_shell_api.cpp +++ b/libraries/libstratosphere/source/pgl/pgl_shell_api.cpp @@ -79,7 +79,7 @@ namespace ams::pgl { ::PglEventObserver obs; R_TRY(::pglGetEventObserver(std::addressof(obs))); - auto remote_observer = std::make_shared(obs); + auto remote_observer = ams::sf::MakeShared(obs); AMS_ABORT_UNLESS(remote_observer != nullptr); *out = pgl::EventObserver(remote_observer); diff --git a/libraries/libstratosphere/source/pgl/srv/pgl_srv_shell_event_observer.cpp b/libraries/libstratosphere/source/pgl/srv/pgl_srv_shell_event_observer.cpp index daaa1e0f5..5b7bacec9 100644 --- a/libraries/libstratosphere/source/pgl/srv/pgl_srv_shell_event_observer.cpp +++ b/libraries/libstratosphere/source/pgl/srv/pgl_srv_shell_event_observer.cpp @@ -66,13 +66,13 @@ namespace ams::pgl::srv { this->event.Signal(); } - Result EventObserverInterface::GetProcessEventHandle(ams::sf::OutCopyHandle out) { - out.SetValue(GetReference(this->observer).GetEvent().GetReadableHandle()); + Result ShellEventObserver::GetProcessEventHandle(ams::sf::OutCopyHandle out) { + out.SetValue(this->GetEvent().GetReadableHandle()); return ResultSuccess(); } - Result EventObserverInterface::GetProcessEventInfo(ams::sf::Out out) { - return GetReference(this->observer).PopEventInfo(out.GetPointer()); + Result ShellEventObserver::GetProcessEventInfo(ams::sf::Out out) { + return this->PopEventInfo(out.GetPointer()); } -} \ No newline at end of file +} diff --git a/libraries/libstratosphere/source/pgl/srv/pgl_srv_shell_event_observer.hpp b/libraries/libstratosphere/source/pgl/srv/pgl_srv_shell_event_observer.hpp index 29918a8c3..30387526b 100644 --- a/libraries/libstratosphere/source/pgl/srv/pgl_srv_shell_event_observer.hpp +++ b/libraries/libstratosphere/source/pgl/srv/pgl_srv_shell_event_observer.hpp @@ -56,23 +56,10 @@ namespace ams::pgl::srv { Result PopEventInfo(pm::ProcessEventInfo *out); virtual void Notify(const pm::ProcessEventInfo &info) override final; - }; - class EventObserverInterface final : public pgl::sf::IEventObserver { - private: - TYPED_STORAGE(ShellEventObserver) observer; - public: - EventObserverInterface() { - std::memset(std::addressof(this->observer), 0, sizeof(this->observer)); - new (GetPointer(this->observer)) ShellEventObserver; - } - - ~EventObserverInterface() { - GetReference(this->observer).~ShellEventObserver(); - } - public: - virtual Result GetProcessEventHandle(ams::sf::OutCopyHandle out) override final; - virtual Result GetProcessEventInfo(ams::sf::Out out) override final; + Result GetProcessEventHandle(ams::sf::OutCopyHandle out); + Result GetProcessEventInfo(ams::sf::Out out); }; + static_assert(pgl::sf::IsIEventObserver); } diff --git a/libraries/libstratosphere/source/pgl/srv/pgl_srv_shell_interface.cpp b/libraries/libstratosphere/source/pgl/srv/pgl_srv_shell_interface.cpp index 614488e25..81b2641d5 100644 --- a/libraries/libstratosphere/source/pgl/srv/pgl_srv_shell_interface.cpp +++ b/libraries/libstratosphere/source/pgl/srv/pgl_srv_shell_interface.cpp @@ -69,20 +69,22 @@ namespace ams::pgl::srv { } Result ShellInterface::GetShellEventObserver(ams::sf::Out> out) { + using Interface = typename pgl::sf::IEventObserver::ImplHolder; + /* Allocate a new interface. */ - auto *observer_memory = this->memory_resource->Allocate(sizeof(EventObserverInterface), alignof(EventObserverInterface)); + auto *observer_memory = this->memory_resource->Allocate(sizeof(Interface), alignof(Interface)); AMS_ABORT_UNLESS(observer_memory != nullptr); /* Create the interface object. */ - new (observer_memory) EventObserverInterface; + new (observer_memory) Interface; /* Set the output. */ - out.SetValue(std::shared_ptr(reinterpret_cast(observer_memory), [&](EventObserverInterface *obj) { + out.SetValue(std::shared_ptr(reinterpret_cast(observer_memory), [&](Interface *obj) { /* Destroy the object. */ - obj->~EventObserverInterface(); + obj->~Interface(); /* Custom deleter: use the memory resource to free. */ - this->memory_resource->Deallocate(obj, sizeof(EventObserverInterface), alignof(EventObserverInterface)); + this->memory_resource->Deallocate(obj, sizeof(Interface), alignof(Interface)); })); return ResultSuccess(); } diff --git a/libraries/libstratosphere/source/psc/psc_pm_module.os.horizon.cpp b/libraries/libstratosphere/source/psc/psc_pm_module.os.horizon.cpp index 146e5e47a..f9c10cc06 100644 --- a/libraries/libstratosphere/source/psc/psc_pm_module.os.horizon.cpp +++ b/libraries/libstratosphere/source/psc/psc_pm_module.os.horizon.cpp @@ -34,7 +34,7 @@ namespace ams::psc { ::PscPmModule module; R_TRY(::pscmGetPmModule(std::addressof(module), static_cast<::PscPmModuleId>(mid), reinterpret_cast(dependencies), dependency_count, clear_mode == os::EventClearMode_AutoClear)); - this->intf = std::make_shared(module); + this->intf = ams::sf::MakeShared(module); this->system_event.AttachReadableHandle(module.event.revent, false, clear_mode); this->initialized = true; return ResultSuccess(); diff --git a/libraries/libstratosphere/source/psc/psc_remote_pm_module.hpp b/libraries/libstratosphere/source/psc/psc_remote_pm_module.hpp index ff6451a62..6afc613fc 100644 --- a/libraries/libstratosphere/source/psc/psc_remote_pm_module.hpp +++ b/libraries/libstratosphere/source/psc/psc_remote_pm_module.hpp @@ -18,41 +18,42 @@ namespace ams::psc { - class RemotePmModule final : public psc::sf::IPmModule { + class RemotePmModule final { NON_COPYABLE(RemotePmModule); NON_MOVEABLE(RemotePmModule); private: ::PscPmModule module; public: constexpr RemotePmModule(const ::PscPmModule &m) : module(m) { /* ... */ } - virtual ~RemotePmModule() override { + ~RemotePmModule() { ::pscPmModuleClose(std::addressof(this->module)); } - virtual Result Initialize(ams::sf::OutCopyHandle out, psc::PmModuleId module_id, const ams::sf::InBuffer &child_list) override final { + Result Initialize(ams::sf::OutCopyHandle out, psc::PmModuleId module_id, const ams::sf::InBuffer &child_list) { /* NOTE: This functionality is already implemented by the libnx command we use to instantiate the PscPmModule. */ AMS_ABORT(); } - virtual Result GetRequest(ams::sf::Out out_state, ams::sf::Out out_flags) override final { + Result GetRequest(ams::sf::Out out_state, ams::sf::Out out_flags) { static_assert(sizeof(PmState) == sizeof(::PscPmState)); static_assert(sizeof(PmFlagSet) == sizeof(u32)); return ::pscPmModuleGetRequest(std::addressof(this->module), reinterpret_cast<::PscPmState *>(out_state.GetPointer()), reinterpret_cast(out_flags.GetPointer())); } - virtual Result Acknowledge() override final { + Result Acknowledge() { /* NOTE: libnx does not separate acknowledge/acknowledgeEx. */ return ::pscPmModuleAcknowledge(std::addressof(this->module), static_cast<::PscPmState>(0)); } - virtual Result Finalize() override final { + Result Finalize() { return ::pscPmModuleFinalize(std::addressof(this->module)); } - virtual Result AcknowledgeEx(PmState state) override final { + Result AcknowledgeEx(PmState state) { static_assert(sizeof(state) == sizeof(::PscPmState)); return ::pscPmModuleAcknowledge(std::addressof(this->module), static_cast<::PscPmState>(state)); } }; + static_assert(psc::sf::IsIPmModule); } \ No newline at end of file diff --git a/libraries/libstratosphere/source/settings/impl/settings_platform_region_impl.cpp b/libraries/libstratosphere/source/settings/impl/settings_platform_region_impl.cpp new file mode 100644 index 000000000..ce0b62ebe --- /dev/null +++ b/libraries/libstratosphere/source/settings/impl/settings_platform_region_impl.cpp @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "settings_platform_region_impl.hpp" + +namespace ams::settings::impl { + + Result GetPlatformRegion(s32 *out) { + static_assert(sizeof(*out) == sizeof(::SetSysPlatformRegion)); + return ::setsysGetPlatformRegion(reinterpret_cast<::SetSysPlatformRegion *>(out)); + } + +} diff --git a/libraries/libstratosphere/source/settings/impl/settings_platform_region_impl.hpp b/libraries/libstratosphere/source/settings/impl/settings_platform_region_impl.hpp new file mode 100644 index 000000000..d5b6833ec --- /dev/null +++ b/libraries/libstratosphere/source/settings/impl/settings_platform_region_impl.hpp @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::settings::impl { + + Result GetPlatformRegion(s32 *out); + +} diff --git a/libraries/libstratosphere/source/settings/settings_platform_region.cpp b/libraries/libstratosphere/source/settings/settings_platform_region.cpp new file mode 100644 index 000000000..b4c26b8c8 --- /dev/null +++ b/libraries/libstratosphere/source/settings/settings_platform_region.cpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "impl/settings_platform_region_impl.hpp" + +namespace ams::settings::system { + + PlatformRegion GetPlatformRegion() { + if (hos::GetVersion() >= hos::Version_9_0_0) { + s32 region = 0; + R_ABORT_UNLESS(settings::impl::GetPlatformRegion(std::addressof(region))); + return static_cast(region); + } else { + return PlatformRegion_Global; + } + } + +} diff --git a/libraries/libstratosphere/source/sf/cmif/sf_cmif_inline_context.cpp b/libraries/libstratosphere/source/sf/cmif/sf_cmif_inline_context.cpp index ecb3b98f3..a288754e2 100644 --- a/libraries/libstratosphere/source/sf/cmif/sf_cmif_inline_context.cpp +++ b/libraries/libstratosphere/source/sf/cmif/sf_cmif_inline_context.cpp @@ -21,47 +21,71 @@ namespace ams::sf { namespace { - thread_local InlineContext g_inline_context; + ALWAYS_INLINE std::atomic *GetAtomicSfInlineContext(os::ThreadType *thread) { + static_assert(sizeof(thread->atomic_sf_inline_context) >= sizeof(std::atomic)); + return reinterpret_cast *>(std::addressof(thread->atomic_sf_inline_context)); + } - ALWAYS_INLINE void OnSetInlineContext() { + ALWAYS_INLINE std::atomic *GetAtomicSfInlineContext() { + return GetAtomicSfInlineContext(os::GetCurrentThread()); + } + + ALWAYS_INLINE void OnSetInlineContext(os::ThreadType *thread) { /* Ensure that libnx receives the priority value. */ - ::fsSetPriority(static_cast<::FsPriority>(::ams::sf::GetFsInlineContext())); + ::fsSetPriority(static_cast<::FsPriority>(::ams::sf::GetFsInlineContext(thread))); } } InlineContext GetInlineContext() { + /* Get the context. */ + uintptr_t thread_context = GetAtomicSfInlineContext()->load(); + + /* Copy it out. */ InlineContext ctx; - std::memcpy(std::addressof(ctx), std::addressof(::ams::sf::cmif::g_inline_context), sizeof(ctx)); + static_assert(sizeof(ctx) <= sizeof(thread_context)); + std::memcpy(std::addressof(ctx), std::addressof(thread_context), sizeof(ctx)); return ctx; } InlineContext SetInlineContext(InlineContext ctx) { - ON_SCOPE_EXIT { OnSetInlineContext(); }; - static_assert(sizeof(ctx) <= sizeof(g_inline_context)); + /* Get current thread. */ + os::ThreadType * const cur_thread = os::GetCurrentThread(); + ON_SCOPE_EXIT { OnSetInlineContext(cur_thread); }; + /* Create the new context. */ + static_assert(sizeof(ctx) <= sizeof(uintptr_t)); + uintptr_t new_context_value = 0; + std::memcpy(std::addressof(new_context_value), std::addressof(ctx), sizeof(ctx)); + + /* Get the old context. */ + uintptr_t old_context_value = GetAtomicSfInlineContext(cur_thread)->exchange(new_context_value); + + /* Convert and copy it out. */ InlineContext old_ctx; - std::memcpy(std::addressof(old_ctx), std::addressof(g_inline_context), sizeof(old_ctx)); - std::memcpy(std::addressof(g_inline_context), std::addressof(ctx), sizeof(ctx)); + std::memcpy(std::addressof(old_ctx), std::addressof(old_context_value), sizeof(old_ctx)); return old_ctx; } } - u8 GetFsInlineContext() { - u8 ctx; - std::memcpy(std::addressof(ctx), std::addressof(cmif::g_inline_context), sizeof(ctx)); - return ctx; + namespace { + + ALWAYS_INLINE std::atomic *GetAtomicFsInlineContext(os::ThreadType *thread) { + static_assert(sizeof(thread->atomic_sf_inline_context) >= sizeof(std::atomic)); + return reinterpret_cast *>(std::addressof(thread->atomic_sf_inline_context)); + } + } - u8 SetFsInlineContext(u8 ctx) { - ON_SCOPE_EXIT { cmif::OnSetInlineContext(); }; - static_assert(sizeof(ctx) <= sizeof(cmif::g_inline_context)); + u8 GetFsInlineContext(os::ThreadType *thread) { + return GetAtomicFsInlineContext(thread)->load(); + } - u8 old_ctx; - std::memcpy(std::addressof(old_ctx), std::addressof(cmif::g_inline_context), sizeof(old_ctx)); - std::memcpy(std::addressof(cmif::g_inline_context), std::addressof(ctx), sizeof(ctx)); - return old_ctx; + u8 SetFsInlineContext(os::ThreadType *thread, u8 ctx) { + ON_SCOPE_EXIT { cmif::OnSetInlineContext(thread); }; + + return GetAtomicFsInlineContext(thread)->exchange(ctx); } } diff --git a/libraries/libstratosphere/source/sf/hipc/sf_hipc_mitm_query_api.cpp b/libraries/libstratosphere/source/sf/hipc/sf_hipc_mitm_query_api.cpp index 29b3e5c56..ee6f9c18a 100644 --- a/libraries/libstratosphere/source/sf/hipc/sf_hipc_mitm_query_api.cpp +++ b/libraries/libstratosphere/source/sf/hipc/sf_hipc_mitm_query_api.cpp @@ -20,11 +20,13 @@ namespace ams::sf::hipc::impl { namespace { - class MitmQueryService : public IServiceObject { - private: - enum class CommandId { - ShouldMitm = 65000, - }; + #define AMS_SF_HIPC_IMPL_I_MITM_QUERY_SERVICE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 65000, void, ShouldMitm, (sf::Out out, const sm::MitmProcessInfo &client_info)) + + AMS_SF_DEFINE_INTERFACE(IMitmQueryService, AMS_SF_HIPC_IMPL_I_MITM_QUERY_SERVICE_INTERFACE_INFO) + + + class MitmQueryService { private: ServerManagerBase::MitmQueryFunction query_function; public: @@ -33,11 +35,8 @@ namespace ams::sf::hipc::impl { void ShouldMitm(sf::Out out, const sm::MitmProcessInfo &client_info) { out.SetValue(this->query_function(client_info)); } - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(ShouldMitm), - }; }; + static_assert(IsIMitmQueryService); /* Globals. */ os::Mutex g_query_server_lock(false); @@ -66,7 +65,7 @@ namespace ams::sf::hipc::impl { g_constructed_server = true; } - R_ABORT_UNLESS(GetPointer(g_query_server_storage)->RegisterSession(query_handle, cmif::ServiceObjectHolder(std::make_shared(query_func)))); + R_ABORT_UNLESS(GetPointer(g_query_server_storage)->RegisterSession(query_handle, cmif::ServiceObjectHolder(sf::MakeShared(query_func)))); if (AMS_UNLIKELY(!g_registered_any)) { R_ABORT_UNLESS(os::CreateThread(std::addressof(g_query_server_process_thread), &QueryServerProcessThreadMain, GetPointer(g_query_server_storage), g_server_process_thread_stack, sizeof(g_server_process_thread_stack), AMS_GET_SYSTEM_THREAD_PRIORITY(mitm_sf, QueryServerProcessThread))); diff --git a/libraries/libstratosphere/source/sf/hipc/sf_hipc_server_domain_session_manager.cpp b/libraries/libstratosphere/source/sf/hipc/sf_hipc_server_domain_session_manager.cpp index 2177bd237..20578d12d 100644 --- a/libraries/libstratosphere/source/sf/hipc/sf_hipc_server_domain_session_manager.cpp +++ b/libraries/libstratosphere/source/sf/hipc/sf_hipc_server_domain_session_manager.cpp @@ -19,15 +19,16 @@ namespace ams::sf::hipc { namespace impl { - class HipcManager : public IServiceObject { - private: - enum class CommandId { - ConvertCurrentObjectToDomain = 0, - CopyFromCurrentDomain = 1, - CloneCurrentObject = 2, - QueryPointerBufferSize = 3, - CloneCurrentObjectEx = 4, - }; + #define AMS_SF_HIPC_IMPL_I_HIPC_MANAGER_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, ConvertCurrentObjectToDomain, (ams::sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, CopyFromCurrentDomain, (ams::sf::OutMoveHandle out, ams::sf::cmif::DomainObjectId object_id)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, CloneCurrentObject, (ams::sf::OutMoveHandle out)) \ + AMS_SF_METHOD_INFO(C, H, 3, void, QueryPointerBufferSize, (ams::sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, CloneCurrentObjectEx, (ams::sf::OutMoveHandle out, u32 tag)) + + AMS_SF_DEFINE_INTERFACE(IHipcManager, AMS_SF_HIPC_IMPL_I_HIPC_MANAGER_INTERFACE_INFO) + + class HipcManager final { private: ServerDomainSessionManager *manager; ServerSession *session; @@ -150,16 +151,8 @@ namespace ams::sf::hipc { Result CloneCurrentObjectEx(sf::OutMoveHandle out, u32 tag) { return this->CloneCurrentObjectImpl(out.GetHandlePointer(), this->manager->GetSessionManagerByTag(tag)); } - - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(ConvertCurrentObjectToDomain), - MAKE_SERVICE_COMMAND_META(CopyFromCurrentDomain), - MAKE_SERVICE_COMMAND_META(CloneCurrentObject), - MAKE_SERVICE_COMMAND_META(QueryPointerBufferSize), - MAKE_SERVICE_COMMAND_META(CloneCurrentObjectEx), - }; }; + static_assert(IsIHipcManager); } @@ -168,7 +161,7 @@ namespace ams::sf::hipc { /* Note: This is safe, as no additional references to the hipc manager can ever be stored. */ /* The shared pointer to stack object is definitely gross, though. */ impl::HipcManager hipc_manager(this, session); - return this->DispatchRequest(cmif::ServiceObjectHolder(std::move(ServiceObjectTraits::SharedPointerHelper::GetEmptyDeleteSharedPointer(&hipc_manager))), session, in_message, out_message); + return this->DispatchRequest(cmif::ServiceObjectHolder(sf::GetSharedPointerTo(hipc_manager)), session, in_message, out_message); } } diff --git a/libraries/libstratosphere/source/spl/smc/spl_smc.cpp b/libraries/libstratosphere/source/spl/smc/spl_smc.cpp index 232be11e8..02934c7a9 100644 --- a/libraries/libstratosphere/source/spl/smc/spl_smc.cpp +++ b/libraries/libstratosphere/source/spl/smc/spl_smc.cpp @@ -17,11 +17,11 @@ namespace ams::spl::smc { - Result SetConfig(SplConfigItem which, const u64 *value, size_t num_qwords) { + Result SetConfig(spl::ConfigItem which, const u64 *value, size_t num_qwords) { SecmonArgs args; args.X[0] = static_cast(FunctionId::SetConfig); - args.X[1] = which; + args.X[1] = static_cast(which); args.X[2] = 0; for (size_t i = 0; i < std::min(size_t(4), num_qwords); i++) { args.X[3 + i] = value[i]; @@ -31,11 +31,11 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result GetConfig(u64 *out, size_t num_qwords, SplConfigItem which) { + Result GetConfig(u64 *out, size_t num_qwords, spl::ConfigItem which) { SecmonArgs args; args.X[0] = static_cast(FunctionId::GetConfig); - args.X[1] = which; + args.X[1] = static_cast(which); svcCallSecureMonitor(&args); for (size_t i = 0; i < std::min(size_t(4), num_qwords); i++) { @@ -44,10 +44,10 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result CheckStatus(Result *out, AsyncOperationKey op) { + Result GetResult(Result *out, AsyncOperationKey op) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::CheckStatus); + args.X[0] = static_cast(FunctionId::GetResult); args.X[1] = op.value; svcCallSecureMonitor(&args); @@ -55,10 +55,10 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result GetResult(Result *out, void *out_buf, size_t out_buf_size, AsyncOperationKey op) { + Result GetResultData(Result *out, void *out_buf, size_t out_buf_size, AsyncOperationKey op) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::GetResult); + args.X[0] = static_cast(FunctionId::GetResultData); args.X[1] = op.value; args.X[2] = reinterpret_cast(out_buf); args.X[3] = out_buf_size; @@ -68,10 +68,10 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result ExpMod(AsyncOperationKey *out_op, const void *base, const void *exp, size_t exp_size, const void *mod) { + Result ModularExponentiate(AsyncOperationKey *out_op, const void *base, const void *exp, size_t exp_size, const void *mod) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::ExpMod); + args.X[0] = static_cast(FunctionId::ModularExponentiate); args.X[1] = reinterpret_cast(base); args.X[2] = reinterpret_cast(exp); args.X[3] = reinterpret_cast(mod); @@ -124,10 +124,10 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result CryptAes(AsyncOperationKey *out_op, u32 mode, const IvCtr &iv_ctr, u32 dst_addr, u32 src_addr, size_t size) { + Result ComputeAes(AsyncOperationKey *out_op, u32 mode, const IvCtr &iv_ctr, u32 dst_addr, u32 src_addr, size_t size) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::CryptAes); + args.X[0] = static_cast(FunctionId::ComputeAes); args.X[1] = mode; args.X[2] = iv_ctr.data64[0]; args.X[3] = iv_ctr.data64[1]; @@ -169,10 +169,10 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result ReEncryptRsaPrivateKey(void *data, size_t size, const AccessKey &access_key_dec, const KeySource &source_dec, const AccessKey &access_key_enc, const KeySource &source_enc, u32 option) { + Result ReencryptDeviceUniqueData(void *data, size_t size, const AccessKey &access_key_dec, const KeySource &source_dec, const AccessKey &access_key_enc, const KeySource &source_enc, u32 option) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::ReEncryptRsaPrivateKey); + args.X[0] = static_cast(FunctionId::ReencryptDeviceUniqueData); args.X[1] = reinterpret_cast(&access_key_dec); args.X[2] = reinterpret_cast(&access_key_enc); args.X[3] = option; @@ -185,10 +185,10 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result DecryptOrImportRsaPrivateKey(void *data, size_t size, const AccessKey &access_key, const KeySource &source, DecryptOrImportMode mode) { + Result DecryptDeviceUniqueData(void *data, size_t size, const AccessKey &access_key, const KeySource &source, DeviceUniqueDataMode mode) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::DecryptOrImportRsaPrivateKey); + args.X[0] = static_cast(FunctionId::DecryptDeviceUniqueData); args.X[1] = access_key.data64[0]; args.X[2] = access_key.data64[1]; args.X[3] = static_cast(mode); @@ -201,10 +201,10 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result SecureExpMod(AsyncOperationKey *out_op, const void *base, const void *mod, SecureExpModMode mode) { + Result ModularExponentiateWithStorageKey(AsyncOperationKey *out_op, const void *base, const void *mod, ModularExponentiateWithStorageKeyMode mode) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::SecureExpMod); + args.X[0] = static_cast(FunctionId::ModularExponentiateWithStorageKey); args.X[1] = reinterpret_cast(base); args.X[2] = reinterpret_cast(mod); args.X[3] = static_cast(mode); @@ -214,10 +214,10 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result UnwrapTitleKey(AsyncOperationKey *out_op, const void *base, const void *mod, const void *label_digest, size_t label_digest_size, u32 option) { + Result PrepareEsDeviceUniqueKey(AsyncOperationKey *out_op, const void *base, const void *mod, const void *label_digest, size_t label_digest_size, u32 option) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::UnwrapTitleKey); + args.X[0] = static_cast(FunctionId::PrepareEsDeviceUniqueKey); args.X[1] = reinterpret_cast(base); args.X[2] = reinterpret_cast(mod); std::memset(&args.X[3], 0, 4 * sizeof(args.X[3])); @@ -229,10 +229,10 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result LoadTitleKey(u32 keyslot, const AccessKey &access_key) { + Result LoadPreparedAesKey(u32 keyslot, const AccessKey &access_key) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::LoadTitleKey); + args.X[0] = static_cast(FunctionId::LoadPreparedAesKey); args.X[1] = keyslot; args.X[2] = access_key.data64[0]; args.X[3] = access_key.data64[1]; @@ -241,10 +241,10 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result UnwrapCommonTitleKey(AccessKey *out, const KeySource &source, u32 generation) { + Result PrepareCommonEsTitleKey(AccessKey *out, const KeySource &source, u32 generation) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::UnwrapCommonTitleKey); + args.X[0] = static_cast(FunctionId::PrepareCommonEsTitleKey); args.X[1] = source.data64[0]; args.X[2] = source.data64[1]; args.X[3] = generation; @@ -257,10 +257,10 @@ namespace ams::spl::smc { /* Deprecated functions. */ - Result ImportEsKey(const void *data, size_t size, const AccessKey &access_key, const KeySource &source, u32 option) { + Result LoadEsDeviceKey(const void *data, size_t size, const AccessKey &access_key, const KeySource &source, u32 option) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::ImportEsKey); + args.X[0] = static_cast(FunctionId::LoadEsDeviceKey); args.X[1] = access_key.data64[0]; args.X[2] = access_key.data64[1]; args.X[3] = option; @@ -273,10 +273,10 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result DecryptRsaPrivateKey(size_t *out_size, void *data, size_t size, const AccessKey &access_key, const KeySource &source, u32 option) { + Result DecryptDeviceUniqueData(size_t *out_size, void *data, size_t size, const AccessKey &access_key, const KeySource &source, u32 option) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::DecryptRsaPrivateKey); + args.X[0] = static_cast(FunctionId::DecryptDeviceUniqueData); args.X[1] = access_key.data64[0]; args.X[2] = access_key.data64[1]; args.X[3] = option; @@ -290,10 +290,10 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result ImportSecureExpModKey(const void *data, size_t size, const AccessKey &access_key, const KeySource &source, u32 option) { + Result DecryptAndStoreGcKey(const void *data, size_t size, const AccessKey &access_key, const KeySource &source, u32 option) { SecmonArgs args; - args.X[0] = static_cast(FunctionId::ImportSecureExpModKey); + args.X[0] = static_cast(FunctionId::DecryptAndStoreGcKey); args.X[1] = access_key.data64[0]; args.X[2] = access_key.data64[1]; args.X[3] = option; @@ -348,19 +348,6 @@ namespace ams::spl::smc { return static_cast(args.X[0]); } - Result AtmosphereWriteAddress(void *dst, const void *src, size_t size) { - AMS_ABORT_UNLESS(size <= sizeof(u64)); - - SecmonArgs args; - args.X[0] = static_cast(FunctionId::AtmosphereWriteAddress); - args.X[1] = reinterpret_cast(dst); - __builtin_memcpy(&args.X[1], src, size); - args.X[3] = size; - svcCallSecureMonitor(&args); - - return static_cast(args.X[0]); - } - Result AtmosphereGetEmummcConfig(void *out_config, void *out_paths, u32 storage_id) { const u64 paths = reinterpret_cast(out_paths); AMS_ABORT_UNLESS(util::IsAligned(paths, os::MemoryPageSize)); diff --git a/libraries/libstratosphere/source/spl/spl_api.cpp b/libraries/libstratosphere/source/spl/spl_api.cpp index 6e8240244..f53b75379 100644 --- a/libraries/libstratosphere/source/spl/spl_api.cpp +++ b/libraries/libstratosphere/source/spl/spl_api.cpp @@ -63,7 +63,7 @@ namespace ams::spl { auto is_event_initialized = false; while (true) { R_TRY_CATCH(static_cast<::ams::Result>(f())) { - R_CATCH(spl::ResultOutOfKeyslots) { + R_CATCH(spl::ResultOutOfKeySlots) { if (!is_event_initialized) { GetAesKeySlotAvailableEvent(std::addressof(event)); is_event_initialized = true; diff --git a/libraries/libvapours/include/vapours/ams/ams_api_version.h b/libraries/libvapours/include/vapours/ams/ams_api_version.h index a7ec6545c..0a087ea81 100644 --- a/libraries/libvapours/include/vapours/ams/ams_api_version.h +++ b/libraries/libvapours/include/vapours/ams/ams_api_version.h @@ -16,11 +16,11 @@ #pragma once #define ATMOSPHERE_RELEASE_VERSION_MAJOR 0 -#define ATMOSPHERE_RELEASE_VERSION_MINOR 13 -#define ATMOSPHERE_RELEASE_VERSION_MICRO 0 +#define ATMOSPHERE_RELEASE_VERSION_MINOR 14 +#define ATMOSPHERE_RELEASE_VERSION_MICRO 1 #define ATMOSPHERE_RELEASE_VERSION ATMOSPHERE_RELEASE_VERSION_MAJOR, ATMOSPHERE_RELEASE_VERSION_MINOR, ATMOSPHERE_RELEASE_VERSION_MICRO #define ATMOSPHERE_SUPPORTED_HOS_VERSION_MAJOR 10 -#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MINOR 0 -#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MICRO 4 +#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MINOR 1 +#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MICRO 0 diff --git a/libraries/libvapours/include/vapours/ams/ams_target_firmware.h b/libraries/libvapours/include/vapours/ams/ams_target_firmware.h index 3273cf24c..0e9de3cb3 100644 --- a/libraries/libvapours/include/vapours/ams/ams_target_firmware.h +++ b/libraries/libvapours/include/vapours/ams/ams_target_firmware.h @@ -53,8 +53,10 @@ #define ATMOSPHERE_TARGET_FIRMWARE_10_0_2 ATMOSPHERE_TARGET_FIRMWARE(10, 0, 2) #define ATMOSPHERE_TARGET_FIRMWARE_10_0_3 ATMOSPHERE_TARGET_FIRMWARE(10, 0, 3) #define ATMOSPHERE_TARGET_FIRMWARE_10_0_4 ATMOSPHERE_TARGET_FIRMWARE(10, 0, 4) +#define ATMOSPHERE_TARGET_FIRMWARE_10_1_0 ATMOSPHERE_TARGET_FIRMWARE(10, 1, 0) +#define ATMOSPHERE_TARGET_FIRMWARE_10_1_1 ATMOSPHERE_TARGET_FIRMWARE(10, 1, 1) -#define ATMOSPHERE_TARGET_FIRMWARE_CURRENT ATMOSPHERE_TARGET_FIRMWARE_10_0_4 +#define ATMOSPHERE_TARGET_FIRMWARE_CURRENT ATMOSPHERE_TARGET_FIRMWARE_10_1_1 #define ATMOSPHERE_TARGET_FIRMWARE_MIN ATMOSPHERE_TARGET_FIRMWARE(0, 0, 0) #define ATMOSPHERE_TARGET_FIRMWARE_MAX ATMOSPHERE_TARGET_FIRMWARE_CURRENT @@ -99,6 +101,9 @@ namespace ams { TargetFirmware_10_0_1 = ATMOSPHERE_TARGET_FIRMWARE_10_0_1, TargetFirmware_10_0_2 = ATMOSPHERE_TARGET_FIRMWARE_10_0_2, TargetFirmware_10_0_3 = ATMOSPHERE_TARGET_FIRMWARE_10_0_3, + TargetFirmware_10_0_4 = ATMOSPHERE_TARGET_FIRMWARE_10_0_4, + TargetFirmware_10_1_0 = ATMOSPHERE_TARGET_FIRMWARE_10_1_0, + TargetFirmware_10_1_1 = ATMOSPHERE_TARGET_FIRMWARE_10_1_1, TargetFirmware_Current = ATMOSPHERE_TARGET_FIRMWARE_CURRENT, diff --git a/libraries/libvapours/include/vapours/defines.hpp b/libraries/libvapours/include/vapours/defines.hpp index 61c733237..a4be456b7 100644 --- a/libraries/libvapours/include/vapours/defines.hpp +++ b/libraries/libvapours/include/vapours/defines.hpp @@ -40,6 +40,9 @@ #define BITSIZEOF(x) (sizeof(x) * CHAR_BIT) +#define STRINGIZE(x) STRINGIZE_IMPL(x) +#define STRINGIZE_IMPL(x) #x + #ifdef __COUNTER__ #define ANONYMOUS_VARIABLE(pref) CONCATENATE(pref, __COUNTER__) #else diff --git a/libraries/libvapours/include/vapours/includes.hpp b/libraries/libvapours/include/vapours/includes.hpp index 577e2c458..4a5494402 100644 --- a/libraries/libvapours/include/vapours/includes.hpp +++ b/libraries/libvapours/include/vapours/includes.hpp @@ -25,6 +25,7 @@ #include #include #include +#include /* C++ headers. */ #include diff --git a/libraries/libvapours/include/vapours/results.hpp b/libraries/libvapours/include/vapours/results.hpp index fbebb06b3..4d4c41a56 100644 --- a/libraries/libvapours/include/vapours/results.hpp +++ b/libraries/libvapours/include/vapours/results.hpp @@ -37,8 +37,10 @@ #include #include #include -#include #include +#include +#include +#include #include #include #include diff --git a/libraries/libvapours/include/vapours/results/fs_results.hpp b/libraries/libvapours/include/vapours/results/fs_results.hpp index 0eeb9ada7..5128fb631 100644 --- a/libraries/libvapours/include/vapours/results/fs_results.hpp +++ b/libraries/libvapours/include/vapours/results/fs_results.hpp @@ -21,384 +21,389 @@ namespace ams::fs { R_DEFINE_NAMESPACE_RESULT_MODULE(2); - R_DEFINE_ERROR_RESULT(PathNotFound, 1); - R_DEFINE_ERROR_RESULT(PathAlreadyExists, 2); + R_DEFINE_ERROR_RANGE(HandledByAllProcess, 0, 999); + R_DEFINE_ERROR_RESULT(PathNotFound, 1); + R_DEFINE_ERROR_RESULT(PathAlreadyExists, 2); - R_DEFINE_ERROR_RESULT(TargetLocked, 7); - R_DEFINE_ERROR_RESULT(DirectoryNotEmpty, 8); + R_DEFINE_ERROR_RESULT(TargetLocked, 7); + R_DEFINE_ERROR_RESULT(DirectoryNotEmpty, 8); - R_DEFINE_ERROR_RANGE (NotEnoughFreeSpace, 30, 45); - R_DEFINE_ERROR_RANGE(NotEnoughFreeSpaceBis, 34, 38); - R_DEFINE_ERROR_RESULT(NotEnoughFreeSpaceBisCalibration, 35); - R_DEFINE_ERROR_RESULT(NotEnoughFreeSpaceBisSafe, 36); - R_DEFINE_ERROR_RESULT(NotEnoughFreeSpaceBisUser, 37); - R_DEFINE_ERROR_RESULT(NotEnoughFreeSpaceBisSystem, 38); - R_DEFINE_ERROR_RESULT(NotEnoughFreeSpaceSdCard, 39); + R_DEFINE_ERROR_RANGE (NotEnoughFreeSpace, 30, 45); + R_DEFINE_ERROR_RANGE(NotEnoughFreeSpaceBis, 34, 38); + R_DEFINE_ERROR_RESULT(NotEnoughFreeSpaceBisCalibration, 35); + R_DEFINE_ERROR_RESULT(NotEnoughFreeSpaceBisSafe, 36); + R_DEFINE_ERROR_RESULT(NotEnoughFreeSpaceBisUser, 37); + R_DEFINE_ERROR_RESULT(NotEnoughFreeSpaceBisSystem, 38); + R_DEFINE_ERROR_RESULT(NotEnoughFreeSpaceSdCard, 39); - R_DEFINE_ERROR_RESULT(UnsupportedSdkVersion, 50); + R_DEFINE_ERROR_RESULT(UnsupportedSdkVersion, 50); - R_DEFINE_ERROR_RESULT(MountNameAlreadyExists, 60); + R_DEFINE_ERROR_RESULT(MountNameAlreadyExists, 60); - R_DEFINE_ERROR_RESULT(PartitionNotFound, 1001); - R_DEFINE_ERROR_RESULT(TargetNotFound, 1002); + R_DEFINE_ERROR_RANGE(HandledBySystemProcess, 1000, 2999); + R_DEFINE_ERROR_RESULT(PartitionNotFound, 1001); + R_DEFINE_ERROR_RESULT(TargetNotFound, 1002); - R_DEFINE_ERROR_RANGE(SdCardAccessFailed, 2000, 2499); - R_DEFINE_ERROR_RESULT(SdCardNotPresent, 2001); + R_DEFINE_ERROR_RANGE(SdCardAccessFailed, 2000, 2499); + R_DEFINE_ERROR_RESULT(SdCardNotPresent, 2001); - R_DEFINE_ERROR_RANGE(GameCardAccessFailed, 2500, 2999); + R_DEFINE_ERROR_RANGE(GameCardAccessFailed, 2500, 2999); - R_DEFINE_ERROR_RESULT(NotImplemented, 3001); - R_DEFINE_ERROR_RESULT(UnsupportedVersion, 3002); - R_DEFINE_ERROR_RESULT(OutOfRange, 3005); + R_DEFINE_ERROR_RESULT(NotImplemented, 3001); + R_DEFINE_ERROR_RESULT(UnsupportedVersion, 3002); + R_DEFINE_ERROR_RESULT(OutOfRange, 3005); - R_DEFINE_ERROR_RESULT(SystemPartitionNotReady, 3100); + R_DEFINE_ERROR_RESULT(SystemPartitionNotReady, 3100); - R_DEFINE_ERROR_RANGE(AllocationFailure, 3200, 3499); - R_DEFINE_ERROR_RESULT(AllocationFailureInFileSystemAccessorA, 3211); - R_DEFINE_ERROR_RESULT(AllocationFailureInFileSystemAccessorB, 3212); - R_DEFINE_ERROR_RESULT(AllocationFailureInApplicationA, 3213); - R_DEFINE_ERROR_RESULT(AllocationFailureInBisA, 3215); - R_DEFINE_ERROR_RESULT(AllocationFailureInBisB, 3216); - R_DEFINE_ERROR_RESULT(AllocationFailureInBisC, 3217); - R_DEFINE_ERROR_RESULT(AllocationFailureInCodeA, 3218); - R_DEFINE_ERROR_RESULT(AllocationFailureInContentA, 3219); - R_DEFINE_ERROR_RESULT(AllocationFailureInContentStorageA, 3220); - R_DEFINE_ERROR_RESULT(AllocationFailureInContentStorageB, 3221); - R_DEFINE_ERROR_RESULT(AllocationFailureInDataA, 3222); - R_DEFINE_ERROR_RESULT(AllocationFailureInDataB, 3223); - R_DEFINE_ERROR_RESULT(AllocationFailureInDeviceSaveDataA, 3224); - R_DEFINE_ERROR_RESULT(AllocationFailureInGameCardA, 3225); - R_DEFINE_ERROR_RESULT(AllocationFailureInGameCardB, 3226); - R_DEFINE_ERROR_RESULT(AllocationFailureInGameCardC, 3227); - R_DEFINE_ERROR_RESULT(AllocationFailureInGameCardD, 3228); - R_DEFINE_ERROR_RESULT(AllocationFailureInImageDirectoryA, 3232); - R_DEFINE_ERROR_RESULT(AllocationFailureInSdCardA, 3244); - R_DEFINE_ERROR_RESULT(AllocationFailureInSdCardB, 3245); - R_DEFINE_ERROR_RESULT(AllocationFailureInSystemSaveDataA, 3246); - R_DEFINE_ERROR_RESULT(AllocationFailureInRomFsFileSystemA, 3247); - R_DEFINE_ERROR_RESULT(AllocationFailureInRomFsFileSystemB, 3248); - R_DEFINE_ERROR_RESULT(AllocationFailureInRomFsFileSystemC, 3249); - R_DEFINE_ERROR_RESULT(AllocationFailureInPartitionFileSystemCreatorA, 3280); - R_DEFINE_ERROR_RESULT(AllocationFailureInRomFileSystemCreatorA, 3281); - R_DEFINE_ERROR_RESULT(AllocationFailureInStorageOnNcaCreatorA, 3288); - R_DEFINE_ERROR_RESULT(AllocationFailureInStorageOnNcaCreatorB, 3289); - R_DEFINE_ERROR_RESULT(AllocationFailureInFileSystemBuddyHeapA, 3294); - R_DEFINE_ERROR_RESULT(AllocationFailureInFileSystemBufferManagerA, 3295); - R_DEFINE_ERROR_RESULT(AllocationFailureInBlockCacheBufferedStorageA, 3296); - R_DEFINE_ERROR_RESULT(AllocationFailureInBlockCacheBufferedStorageB, 3297); - R_DEFINE_ERROR_RESULT(AllocationFailureInIntegrityVerificationStorageA, 3304); - R_DEFINE_ERROR_RESULT(AllocationFailureInIntegrityVerificationStorageB, 3305); - R_DEFINE_ERROR_RESULT(AllocationFailureInDirectorySaveDataFileSystem, 3321); - R_DEFINE_ERROR_RESULT(AllocationFailureInNcaFileSystemDriverI, 3341); - R_DEFINE_ERROR_RESULT(AllocationFailureInPartitionFileSystemA, 3347); - R_DEFINE_ERROR_RESULT(AllocationFailureInPartitionFileSystemB, 3348); - R_DEFINE_ERROR_RESULT(AllocationFailureInPartitionFileSystemC, 3349); - R_DEFINE_ERROR_RESULT(AllocationFailureInPartitionFileSystemMetaA, 3350); - R_DEFINE_ERROR_RESULT(AllocationFailureInPartitionFileSystemMetaB, 3351); - R_DEFINE_ERROR_RESULT(AllocationFailureInRomFsFileSystemD, 3352); - R_DEFINE_ERROR_RESULT(AllocationFailureInSubDirectoryFileSystem, 3355); - R_DEFINE_ERROR_RESULT(AllocationFailureInNcaReaderA, 3363); - R_DEFINE_ERROR_RESULT(AllocationFailureInRegisterA, 3365); - R_DEFINE_ERROR_RESULT(AllocationFailureInRegisterB, 3366); - R_DEFINE_ERROR_RESULT(AllocationFailureInPathNormalizer, 3367); - R_DEFINE_ERROR_RESULT(AllocationFailureInDbmRomKeyValueStorage, 3375); - R_DEFINE_ERROR_RESULT(AllocationFailureInRomFsFileSystemE, 3377); - R_DEFINE_ERROR_RESULT(AllocationFailureInReadOnlyFileSystemA, 3386); - R_DEFINE_ERROR_RESULT(AllocationFailureInAesCtrCounterExtendedStorageA, 3399); - R_DEFINE_ERROR_RESULT(AllocationFailureInAesCtrCounterExtendedStorageB, 3400); - R_DEFINE_ERROR_RESULT(AllocationFailureInFileSystemInterfaceAdapter, 3407); - R_DEFINE_ERROR_RESULT(AllocationFailureInBufferedStorageA, 3411); - R_DEFINE_ERROR_RESULT(AllocationFailureInIntegrityRomFsStorageA, 3412); - R_DEFINE_ERROR_RESULT(AllocationFailureInNew, 3420); - R_DEFINE_ERROR_RESULT(AllocationFailureInMakeUnique, 3422); - R_DEFINE_ERROR_RESULT(AllocationFailureInAllocateShared, 3423); - R_DEFINE_ERROR_RESULT(AllocationFailurePooledBufferNotEnoughSize, 3424); + R_DEFINE_ERROR_RANGE(AllocationFailure, 3200, 3499); + R_DEFINE_ERROR_RESULT(AllocationFailureInFileSystemAccessorA, 3211); + R_DEFINE_ERROR_RESULT(AllocationFailureInFileSystemAccessorB, 3212); + R_DEFINE_ERROR_RESULT(AllocationFailureInApplicationA, 3213); + R_DEFINE_ERROR_RESULT(AllocationFailureInBisA, 3215); + R_DEFINE_ERROR_RESULT(AllocationFailureInBisB, 3216); + R_DEFINE_ERROR_RESULT(AllocationFailureInBisC, 3217); + R_DEFINE_ERROR_RESULT(AllocationFailureInCodeA, 3218); + R_DEFINE_ERROR_RESULT(AllocationFailureInContentA, 3219); + R_DEFINE_ERROR_RESULT(AllocationFailureInContentStorageA, 3220); + R_DEFINE_ERROR_RESULT(AllocationFailureInContentStorageB, 3221); + R_DEFINE_ERROR_RESULT(AllocationFailureInDataA, 3222); + R_DEFINE_ERROR_RESULT(AllocationFailureInDataB, 3223); + R_DEFINE_ERROR_RESULT(AllocationFailureInDeviceSaveDataA, 3224); + R_DEFINE_ERROR_RESULT(AllocationFailureInGameCardA, 3225); + R_DEFINE_ERROR_RESULT(AllocationFailureInGameCardB, 3226); + R_DEFINE_ERROR_RESULT(AllocationFailureInGameCardC, 3227); + R_DEFINE_ERROR_RESULT(AllocationFailureInGameCardD, 3228); + R_DEFINE_ERROR_RESULT(AllocationFailureInImageDirectoryA, 3232); + R_DEFINE_ERROR_RESULT(AllocationFailureInSdCardA, 3244); + R_DEFINE_ERROR_RESULT(AllocationFailureInSdCardB, 3245); + R_DEFINE_ERROR_RESULT(AllocationFailureInSystemSaveDataA, 3246); + R_DEFINE_ERROR_RESULT(AllocationFailureInRomFsFileSystemA, 3247); + R_DEFINE_ERROR_RESULT(AllocationFailureInRomFsFileSystemB, 3248); + R_DEFINE_ERROR_RESULT(AllocationFailureInRomFsFileSystemC, 3249); + R_DEFINE_ERROR_RESULT(AllocationFailureInFileSystemProxyCoreImplD, 3256); + R_DEFINE_ERROR_RESULT(AllocationFailureInFileSystemProxyCoreImplE, 3257); + R_DEFINE_ERROR_RESULT(AllocationFailureInPartitionFileSystemCreatorA, 3280); + R_DEFINE_ERROR_RESULT(AllocationFailureInRomFileSystemCreatorA, 3281); + R_DEFINE_ERROR_RESULT(AllocationFailureInStorageOnNcaCreatorA, 3288); + R_DEFINE_ERROR_RESULT(AllocationFailureInStorageOnNcaCreatorB, 3289); + R_DEFINE_ERROR_RESULT(AllocationFailureInFileSystemBuddyHeapA, 3294); + R_DEFINE_ERROR_RESULT(AllocationFailureInFileSystemBufferManagerA, 3295); + R_DEFINE_ERROR_RESULT(AllocationFailureInBlockCacheBufferedStorageA, 3296); + R_DEFINE_ERROR_RESULT(AllocationFailureInBlockCacheBufferedStorageB, 3297); + R_DEFINE_ERROR_RESULT(AllocationFailureInIntegrityVerificationStorageA, 3304); + R_DEFINE_ERROR_RESULT(AllocationFailureInIntegrityVerificationStorageB, 3305); + R_DEFINE_ERROR_RESULT(AllocationFailureInDirectorySaveDataFileSystem, 3321); + R_DEFINE_ERROR_RESULT(AllocationFailureInNcaFileSystemDriverI, 3341); + R_DEFINE_ERROR_RESULT(AllocationFailureInPartitionFileSystemA, 3347); + R_DEFINE_ERROR_RESULT(AllocationFailureInPartitionFileSystemB, 3348); + R_DEFINE_ERROR_RESULT(AllocationFailureInPartitionFileSystemC, 3349); + R_DEFINE_ERROR_RESULT(AllocationFailureInPartitionFileSystemMetaA, 3350); + R_DEFINE_ERROR_RESULT(AllocationFailureInPartitionFileSystemMetaB, 3351); + R_DEFINE_ERROR_RESULT(AllocationFailureInRomFsFileSystemD, 3352); + R_DEFINE_ERROR_RESULT(AllocationFailureInSubDirectoryFileSystem, 3355); + R_DEFINE_ERROR_RESULT(AllocationFailureInNcaReaderA, 3363); + R_DEFINE_ERROR_RESULT(AllocationFailureInRegisterA, 3365); + R_DEFINE_ERROR_RESULT(AllocationFailureInRegisterB, 3366); + R_DEFINE_ERROR_RESULT(AllocationFailureInPathNormalizer, 3367); + R_DEFINE_ERROR_RESULT(AllocationFailureInDbmRomKeyValueStorage, 3375); + R_DEFINE_ERROR_RESULT(AllocationFailureInRomFsFileSystemE, 3377); + R_DEFINE_ERROR_RESULT(AllocationFailureInReadOnlyFileSystemA, 3386); + R_DEFINE_ERROR_RESULT(AllocationFailureInAesCtrCounterExtendedStorageA, 3399); + R_DEFINE_ERROR_RESULT(AllocationFailureInAesCtrCounterExtendedStorageB, 3400); + R_DEFINE_ERROR_RESULT(AllocationFailureInFileSystemInterfaceAdapter, 3407); + R_DEFINE_ERROR_RESULT(AllocationFailureInBufferedStorageA, 3411); + R_DEFINE_ERROR_RESULT(AllocationFailureInIntegrityRomFsStorageA, 3412); + R_DEFINE_ERROR_RESULT(AllocationFailureInNew, 3420); + R_DEFINE_ERROR_RESULT(AllocationFailureInMakeUnique, 3422); + R_DEFINE_ERROR_RESULT(AllocationFailureInAllocateShared, 3423); + R_DEFINE_ERROR_RESULT(AllocationFailurePooledBufferNotEnoughSize, 3424); - R_DEFINE_ERROR_RANGE(MmcAccessFailed, 3500, 3999); + R_DEFINE_ERROR_RANGE(Internal, 3000, 7999); + R_DEFINE_ERROR_RANGE(MmcAccessFailed, 3500, 3999); - R_DEFINE_ERROR_RANGE(DataCorrupted, 4000, 4999); - R_DEFINE_ERROR_RANGE(RomCorrupted, 4001, 4299); - R_DEFINE_ERROR_RESULT(UnsupportedRomVersion, 4002); + R_DEFINE_ERROR_RANGE(DataCorrupted, 4000, 4999); + R_DEFINE_ERROR_RANGE(RomCorrupted, 4001, 4299); + R_DEFINE_ERROR_RESULT(UnsupportedRomVersion, 4002); - R_DEFINE_ERROR_RANGE(AesCtrCounterExtendedStorageCorrupted, 4011, 4019); - R_DEFINE_ERROR_RESULT(InvalidAesCtrCounterExtendedEntryOffset, 4012); - R_DEFINE_ERROR_RESULT(InvalidAesCtrCounterExtendedTableSize, 4013); - R_DEFINE_ERROR_RESULT(InvalidAesCtrCounterExtendedGeneration, 4014); - R_DEFINE_ERROR_RESULT(InvalidAesCtrCounterExtendedOffset, 4015); + R_DEFINE_ERROR_RANGE(AesCtrCounterExtendedStorageCorrupted, 4011, 4019); + R_DEFINE_ERROR_RESULT(InvalidAesCtrCounterExtendedEntryOffset, 4012); + R_DEFINE_ERROR_RESULT(InvalidAesCtrCounterExtendedTableSize, 4013); + R_DEFINE_ERROR_RESULT(InvalidAesCtrCounterExtendedGeneration, 4014); + R_DEFINE_ERROR_RESULT(InvalidAesCtrCounterExtendedOffset, 4015); - R_DEFINE_ERROR_RANGE(IndirectStorageCorrupted, 4021, 4029); - R_DEFINE_ERROR_RESULT(InvalidIndirectEntryOffset, 4022); - R_DEFINE_ERROR_RESULT(InvalidIndirectEntryStorageIndex, 4023); - R_DEFINE_ERROR_RESULT(InvalidIndirectStorageSize, 4024); - R_DEFINE_ERROR_RESULT(InvalidIndirectVirtualOffset, 4025); - R_DEFINE_ERROR_RESULT(InvalidIndirectPhysicalOffset, 4026); - R_DEFINE_ERROR_RESULT(InvalidIndirectStorageIndex, 4027); + R_DEFINE_ERROR_RANGE(IndirectStorageCorrupted, 4021, 4029); + R_DEFINE_ERROR_RESULT(InvalidIndirectEntryOffset, 4022); + R_DEFINE_ERROR_RESULT(InvalidIndirectEntryStorageIndex, 4023); + R_DEFINE_ERROR_RESULT(InvalidIndirectStorageSize, 4024); + R_DEFINE_ERROR_RESULT(InvalidIndirectVirtualOffset, 4025); + R_DEFINE_ERROR_RESULT(InvalidIndirectPhysicalOffset, 4026); + R_DEFINE_ERROR_RESULT(InvalidIndirectStorageIndex, 4027); - R_DEFINE_ERROR_RANGE(BucketTreeCorrupted, 4031, 4039); - R_DEFINE_ERROR_RESULT(InvalidBucketTreeSignature, 4032); - R_DEFINE_ERROR_RESULT(InvalidBucketTreeEntryCount, 4033); - R_DEFINE_ERROR_RESULT(InvalidBucketTreeNodeEntryCount, 4034); - R_DEFINE_ERROR_RESULT(InvalidBucketTreeNodeOffset, 4035); - R_DEFINE_ERROR_RESULT(InvalidBucketTreeEntryOffset, 4036); - R_DEFINE_ERROR_RESULT(InvalidBucketTreeEntrySetOffset, 4037); - R_DEFINE_ERROR_RESULT(InvalidBucketTreeNodeIndex, 4038); - R_DEFINE_ERROR_RESULT(InvalidBucketTreeVirtualOffset, 4039); + R_DEFINE_ERROR_RANGE(BucketTreeCorrupted, 4031, 4039); + R_DEFINE_ERROR_RESULT(InvalidBucketTreeSignature, 4032); + R_DEFINE_ERROR_RESULT(InvalidBucketTreeEntryCount, 4033); + R_DEFINE_ERROR_RESULT(InvalidBucketTreeNodeEntryCount, 4034); + R_DEFINE_ERROR_RESULT(InvalidBucketTreeNodeOffset, 4035); + R_DEFINE_ERROR_RESULT(InvalidBucketTreeEntryOffset, 4036); + R_DEFINE_ERROR_RESULT(InvalidBucketTreeEntrySetOffset, 4037); + R_DEFINE_ERROR_RESULT(InvalidBucketTreeNodeIndex, 4038); + R_DEFINE_ERROR_RESULT(InvalidBucketTreeVirtualOffset, 4039); - R_DEFINE_ERROR_RANGE(RomNcaCorrupted, 4041, 4139); - R_DEFINE_ERROR_RANGE(RomNcaFileSystemCorrupted, 4051, 4069); - R_DEFINE_ERROR_RESULT(InvalidRomNcaFileSystemType, 4052); - R_DEFINE_ERROR_RESULT(InvalidRomAcidFileSize, 4053); - R_DEFINE_ERROR_RESULT(InvalidRomAcidSize, 4054); - R_DEFINE_ERROR_RESULT(InvalidRomAcid, 4055); - R_DEFINE_ERROR_RESULT(RomAcidVerificationFailed, 4056); - R_DEFINE_ERROR_RESULT(InvalidRomNcaSignature, 4057); - R_DEFINE_ERROR_RESULT(RomNcaHeaderSignature1VerificationFailed, 4058); - R_DEFINE_ERROR_RESULT(RomNcaHeaderSignature2VerificationFailed, 4059); - R_DEFINE_ERROR_RESULT(RomNcaFsHeaderHashVerificationFailed, 4060); - R_DEFINE_ERROR_RESULT(InvalidRomNcaKeyIndex, 4061); - R_DEFINE_ERROR_RESULT(InvalidRomNcaFsHeaderHashType, 4062); - R_DEFINE_ERROR_RESULT(InvalidRomNcaFsHeaderEncryptionType, 4063); + R_DEFINE_ERROR_RANGE(RomNcaCorrupted, 4041, 4139); + R_DEFINE_ERROR_RANGE(RomNcaFileSystemCorrupted, 4051, 4069); + R_DEFINE_ERROR_RESULT(InvalidRomNcaFileSystemType, 4052); + R_DEFINE_ERROR_RESULT(InvalidRomAcidFileSize, 4053); + R_DEFINE_ERROR_RESULT(InvalidRomAcidSize, 4054); + R_DEFINE_ERROR_RESULT(InvalidRomAcid, 4055); + R_DEFINE_ERROR_RESULT(RomAcidVerificationFailed, 4056); + R_DEFINE_ERROR_RESULT(InvalidRomNcaSignature, 4057); + R_DEFINE_ERROR_RESULT(RomNcaHeaderSignature1VerificationFailed, 4058); + R_DEFINE_ERROR_RESULT(RomNcaHeaderSignature2VerificationFailed, 4059); + R_DEFINE_ERROR_RESULT(RomNcaFsHeaderHashVerificationFailed, 4060); + R_DEFINE_ERROR_RESULT(InvalidRomNcaKeyIndex, 4061); + R_DEFINE_ERROR_RESULT(InvalidRomNcaFsHeaderHashType, 4062); + R_DEFINE_ERROR_RESULT(InvalidRomNcaFsHeaderEncryptionType, 4063); - R_DEFINE_ERROR_RANGE(RomNcaHierarchicalSha256StorageCorrupted, 4071, 4079); - R_DEFINE_ERROR_RESULT(InvalidRomHierarchicalSha256BlockSize, 4072); - R_DEFINE_ERROR_RESULT(InvalidRomHierarchicalSha256LayerCount, 4073); - R_DEFINE_ERROR_RESULT(RomHierarchicalSha256BaseStorageTooLarge, 4074); - R_DEFINE_ERROR_RESULT(RomHierarchicalSha256HashVerificationFailed, 4075); + R_DEFINE_ERROR_RANGE(RomNcaHierarchicalSha256StorageCorrupted, 4071, 4079); + R_DEFINE_ERROR_RESULT(InvalidRomHierarchicalSha256BlockSize, 4072); + R_DEFINE_ERROR_RESULT(InvalidRomHierarchicalSha256LayerCount, 4073); + R_DEFINE_ERROR_RESULT(RomHierarchicalSha256BaseStorageTooLarge, 4074); + R_DEFINE_ERROR_RESULT(RomHierarchicalSha256HashVerificationFailed, 4075); - R_DEFINE_ERROR_RANGE(RomIntegrityVerificationStorageCorrupted, 4141, 4179); - R_DEFINE_ERROR_RESULT(IncorrectRomIntegrityVerificationMagic, 4142); - R_DEFINE_ERROR_RESULT(InvalidRomZeroHash, 4143); - R_DEFINE_ERROR_RESULT(RomNonRealDataVerificationFailed, 4144); - R_DEFINE_ERROR_RESULT(InvalidRomHierarchicalIntegrityVerificationLayerCount, 4145); + R_DEFINE_ERROR_RANGE(RomIntegrityVerificationStorageCorrupted, 4141, 4179); + R_DEFINE_ERROR_RESULT(IncorrectRomIntegrityVerificationMagic, 4142); + R_DEFINE_ERROR_RESULT(InvalidRomZeroHash, 4143); + R_DEFINE_ERROR_RESULT(RomNonRealDataVerificationFailed, 4144); + R_DEFINE_ERROR_RESULT(InvalidRomHierarchicalIntegrityVerificationLayerCount, 4145); - R_DEFINE_ERROR_RANGE(RomRealDataVerificationFailed, 4151, 4159); - R_DEFINE_ERROR_RESULT(ClearedRomRealDataVerificationFailed, 4152); - R_DEFINE_ERROR_RESULT(UnclearedRomRealDataVerificationFailed, 4153); + R_DEFINE_ERROR_RANGE(RomRealDataVerificationFailed, 4151, 4159); + R_DEFINE_ERROR_RESULT(ClearedRomRealDataVerificationFailed, 4152); + R_DEFINE_ERROR_RESULT(UnclearedRomRealDataVerificationFailed, 4153); - R_DEFINE_ERROR_RANGE(RomPartitionFileSystemCorrupted, 4181, 4199); - R_DEFINE_ERROR_RESULT(InvalidRomSha256PartitionHashTarget, 4182); - R_DEFINE_ERROR_RESULT(RomSha256PartitionHashVerificationFailed, 4183); - R_DEFINE_ERROR_RESULT(RomPartitionSignatureVerificationFailed, 4184); - R_DEFINE_ERROR_RESULT(RomSha256PartitionSignatureVerificationFailed, 4185); - R_DEFINE_ERROR_RESULT(InvalidRomPartitionEntryOffset, 4186); - R_DEFINE_ERROR_RESULT(InvalidRomSha256PartitionMetaDataSize, 4187); + R_DEFINE_ERROR_RANGE(RomPartitionFileSystemCorrupted, 4181, 4199); + R_DEFINE_ERROR_RESULT(InvalidRomSha256PartitionHashTarget, 4182); + R_DEFINE_ERROR_RESULT(RomSha256PartitionHashVerificationFailed, 4183); + R_DEFINE_ERROR_RESULT(RomPartitionSignatureVerificationFailed, 4184); + R_DEFINE_ERROR_RESULT(RomSha256PartitionSignatureVerificationFailed, 4185); + R_DEFINE_ERROR_RESULT(InvalidRomPartitionEntryOffset, 4186); + R_DEFINE_ERROR_RESULT(InvalidRomSha256PartitionMetaDataSize, 4187); - R_DEFINE_ERROR_RANGE(RomBuiltInStorageCorrupted, 4201, 4219); - R_DEFINE_ERROR_RESULT(RomGptHeaderVerificationFailed, 4202); + R_DEFINE_ERROR_RANGE(RomBuiltInStorageCorrupted, 4201, 4219); + R_DEFINE_ERROR_RESULT(RomGptHeaderVerificationFailed, 4202); - R_DEFINE_ERROR_RANGE(RomHostFileSystemCorrupted, 4241, 4259); - R_DEFINE_ERROR_RESULT(RomHostEntryCorrupted, 4242); - R_DEFINE_ERROR_RESULT(RomHostFileDataCorrupted, 4243); - R_DEFINE_ERROR_RESULT(RomHostFileCorrupted, 4244); - R_DEFINE_ERROR_RESULT(InvalidRomHostHandle, 4245); + R_DEFINE_ERROR_RANGE(RomHostFileSystemCorrupted, 4241, 4259); + R_DEFINE_ERROR_RESULT(RomHostEntryCorrupted, 4242); + R_DEFINE_ERROR_RESULT(RomHostFileDataCorrupted, 4243); + R_DEFINE_ERROR_RESULT(RomHostFileCorrupted, 4244); + R_DEFINE_ERROR_RESULT(InvalidRomHostHandle, 4245); - R_DEFINE_ERROR_RANGE(RomDatabaseCorrupted, 4261, 4279); - R_DEFINE_ERROR_RESULT(InvalidRomAllocationTableBlock, 4262); - R_DEFINE_ERROR_RESULT(InvalidRomKeyValueListElementIndex, 4263); + R_DEFINE_ERROR_RANGE(RomDatabaseCorrupted, 4261, 4279); + R_DEFINE_ERROR_RESULT(InvalidRomAllocationTableBlock, 4262); + R_DEFINE_ERROR_RESULT(InvalidRomKeyValueListElementIndex, 4263); - R_DEFINE_ERROR_RANGE(SaveDataCorrupted, 4301, 4499); - R_DEFINE_ERROR_RANGE(NcaCorrupted, 4501, 4599); - R_DEFINE_ERROR_RESULT(NcaBaseStorageOutOfRangeA, 4508); - R_DEFINE_ERROR_RESULT(NcaBaseStorageOutOfRangeB, 4509); + R_DEFINE_ERROR_RANGE(SaveDataCorrupted, 4301, 4499); + R_DEFINE_ERROR_RANGE(NcaCorrupted, 4501, 4599); + R_DEFINE_ERROR_RESULT(NcaBaseStorageOutOfRangeA, 4508); + R_DEFINE_ERROR_RESULT(NcaBaseStorageOutOfRangeB, 4509); - R_DEFINE_ERROR_RANGE(NcaFileSystemCorrupted, 4511, 4529); - R_DEFINE_ERROR_RESULT(InvalidNcaFileSystemType, 4512); - R_DEFINE_ERROR_RESULT(InvalidAcidFileSize, 4513); - R_DEFINE_ERROR_RESULT(InvalidAcidSize, 4514); - R_DEFINE_ERROR_RESULT(InvalidAcid, 4515); - R_DEFINE_ERROR_RESULT(AcidVerificationFailed, 4516); - R_DEFINE_ERROR_RESULT(InvalidNcaSignature, 4517); - R_DEFINE_ERROR_RESULT(NcaHeaderSignature1VerificationFailed, 4518); - R_DEFINE_ERROR_RESULT(NcaHeaderSignature2VerificationFailed, 4519); - R_DEFINE_ERROR_RESULT(NcaFsHeaderHashVerificationFailed, 4520); - R_DEFINE_ERROR_RESULT(InvalidNcaKeyIndex, 4521); - R_DEFINE_ERROR_RESULT(InvalidNcaFsHeaderHashType, 4522); - R_DEFINE_ERROR_RESULT(InvalidNcaFsHeaderEncryptionType, 4523); - R_DEFINE_ERROR_RESULT(InvalidNcaPatchInfoIndirectSize, 4524); - R_DEFINE_ERROR_RESULT(InvalidNcaPatchInfoAesCtrExSize, 4525); - R_DEFINE_ERROR_RESULT(InvalidNcaPatchInfoAesCtrExOffset, 4526); - R_DEFINE_ERROR_RESULT(InvalidNcaId, 4527); - R_DEFINE_ERROR_RESULT(InvalidNcaHeader, 4528); - R_DEFINE_ERROR_RESULT(InvalidNcaFsHeader, 4529); + R_DEFINE_ERROR_RANGE(NcaFileSystemCorrupted, 4511, 4529); + R_DEFINE_ERROR_RESULT(InvalidNcaFileSystemType, 4512); + R_DEFINE_ERROR_RESULT(InvalidAcidFileSize, 4513); + R_DEFINE_ERROR_RESULT(InvalidAcidSize, 4514); + R_DEFINE_ERROR_RESULT(InvalidAcid, 4515); + R_DEFINE_ERROR_RESULT(AcidVerificationFailed, 4516); + R_DEFINE_ERROR_RESULT(InvalidNcaSignature, 4517); + R_DEFINE_ERROR_RESULT(NcaHeaderSignature1VerificationFailed, 4518); + R_DEFINE_ERROR_RESULT(NcaHeaderSignature2VerificationFailed, 4519); + R_DEFINE_ERROR_RESULT(NcaFsHeaderHashVerificationFailed, 4520); + R_DEFINE_ERROR_RESULT(InvalidNcaKeyIndex, 4521); + R_DEFINE_ERROR_RESULT(InvalidNcaFsHeaderHashType, 4522); + R_DEFINE_ERROR_RESULT(InvalidNcaFsHeaderEncryptionType, 4523); + R_DEFINE_ERROR_RESULT(InvalidNcaPatchInfoIndirectSize, 4524); + R_DEFINE_ERROR_RESULT(InvalidNcaPatchInfoAesCtrExSize, 4525); + R_DEFINE_ERROR_RESULT(InvalidNcaPatchInfoAesCtrExOffset, 4526); + R_DEFINE_ERROR_RESULT(InvalidNcaId, 4527); + R_DEFINE_ERROR_RESULT(InvalidNcaHeader, 4528); + R_DEFINE_ERROR_RESULT(InvalidNcaFsHeader, 4529); - R_DEFINE_ERROR_RANGE(NcaHierarchicalSha256StorageCorrupted, 4531, 4539); - R_DEFINE_ERROR_RESULT(InvalidHierarchicalSha256BlockSize, 4532); - R_DEFINE_ERROR_RESULT(InvalidHierarchicalSha256LayerCount, 4533); - R_DEFINE_ERROR_RESULT(HierarchicalSha256BaseStorageTooLarge, 4534); - R_DEFINE_ERROR_RESULT(HierarchicalSha256HashVerificationFailed, 4535); + R_DEFINE_ERROR_RANGE(NcaHierarchicalSha256StorageCorrupted, 4531, 4539); + R_DEFINE_ERROR_RESULT(InvalidHierarchicalSha256BlockSize, 4532); + R_DEFINE_ERROR_RESULT(InvalidHierarchicalSha256LayerCount, 4533); + R_DEFINE_ERROR_RESULT(HierarchicalSha256BaseStorageTooLarge, 4534); + R_DEFINE_ERROR_RESULT(HierarchicalSha256HashVerificationFailed, 4535); - /* TODO: Range? */ - R_DEFINE_ERROR_RESULT(InvalidNcaHeader1SignatureKeyGeneration, 4543); + /* TODO: Range? */ + R_DEFINE_ERROR_RESULT(InvalidNcaHeader1SignatureKeyGeneration, 4543); - R_DEFINE_ERROR_RANGE(IntegrityVerificationStorageCorrupted, 4601, 4639); - R_DEFINE_ERROR_RESULT(IncorrectIntegrityVerificationMagic, 4602); - R_DEFINE_ERROR_RESULT(InvalidZeroHash, 4603); - R_DEFINE_ERROR_RESULT(NonRealDataVerificationFailed, 4604); - R_DEFINE_ERROR_RESULT(InvalidHierarchicalIntegrityVerificationLayerCount, 4605); + R_DEFINE_ERROR_RANGE(IntegrityVerificationStorageCorrupted, 4601, 4639); + R_DEFINE_ERROR_RESULT(IncorrectIntegrityVerificationMagic, 4602); + R_DEFINE_ERROR_RESULT(InvalidZeroHash, 4603); + R_DEFINE_ERROR_RESULT(NonRealDataVerificationFailed, 4604); + R_DEFINE_ERROR_RESULT(InvalidHierarchicalIntegrityVerificationLayerCount, 4605); - R_DEFINE_ERROR_RANGE(RealDataVerificationFailed, 4611, 4619); - R_DEFINE_ERROR_RESULT(ClearedRealDataVerificationFailed, 4612); - R_DEFINE_ERROR_RESULT(UnclearedRealDataVerificationFailed, 4613); + R_DEFINE_ERROR_RANGE(RealDataVerificationFailed, 4611, 4619); + R_DEFINE_ERROR_RESULT(ClearedRealDataVerificationFailed, 4612); + R_DEFINE_ERROR_RESULT(UnclearedRealDataVerificationFailed, 4613); - R_DEFINE_ERROR_RANGE(PartitionFileSystemCorrupted, 4641, 4659); - R_DEFINE_ERROR_RESULT(InvalidSha256PartitionHashTarget, 4642); - R_DEFINE_ERROR_RESULT(Sha256PartitionHashVerificationFailed, 4643); - R_DEFINE_ERROR_RESULT(PartitionSignatureVerificationFailed, 4644); - R_DEFINE_ERROR_RESULT(Sha256PartitionSignatureVerificationFailed, 4645); - R_DEFINE_ERROR_RESULT(InvalidPartitionEntryOffset, 4646); - R_DEFINE_ERROR_RESULT(InvalidSha256PartitionMetaDataSize, 4647); + R_DEFINE_ERROR_RANGE(PartitionFileSystemCorrupted, 4641, 4659); + R_DEFINE_ERROR_RESULT(InvalidSha256PartitionHashTarget, 4642); + R_DEFINE_ERROR_RESULT(Sha256PartitionHashVerificationFailed, 4643); + R_DEFINE_ERROR_RESULT(PartitionSignatureVerificationFailed, 4644); + R_DEFINE_ERROR_RESULT(Sha256PartitionSignatureVerificationFailed, 4645); + R_DEFINE_ERROR_RESULT(InvalidPartitionEntryOffset, 4646); + R_DEFINE_ERROR_RESULT(InvalidSha256PartitionMetaDataSize, 4647); - R_DEFINE_ERROR_RANGE(BuiltInStorageCorrupted, 4661, 4679); - R_DEFINE_ERROR_RESULT(GptHeaderVerificationFailed, 4662); + R_DEFINE_ERROR_RANGE(BuiltInStorageCorrupted, 4661, 4679); + R_DEFINE_ERROR_RESULT(GptHeaderVerificationFailed, 4662); - R_DEFINE_ERROR_RANGE(FatFileSystemCorrupted, 4681, 4699); + R_DEFINE_ERROR_RANGE(FatFileSystemCorrupted, 4681, 4699); - R_DEFINE_ERROR_RANGE(HostFileSystemCorrupted, 4701, 4719); - R_DEFINE_ERROR_RESULT(HostEntryCorrupted, 4702); - R_DEFINE_ERROR_RESULT(HostFileDataCorrupted, 4703); - R_DEFINE_ERROR_RESULT(HostFileCorrupted, 4704); - R_DEFINE_ERROR_RESULT(InvalidHostHandle, 4705); + R_DEFINE_ERROR_RANGE(HostFileSystemCorrupted, 4701, 4719); + R_DEFINE_ERROR_RESULT(HostEntryCorrupted, 4702); + R_DEFINE_ERROR_RESULT(HostFileDataCorrupted, 4703); + R_DEFINE_ERROR_RESULT(HostFileCorrupted, 4704); + R_DEFINE_ERROR_RESULT(InvalidHostHandle, 4705); - R_DEFINE_ERROR_RANGE(DatabaseCorrupted, 4721, 4739); - R_DEFINE_ERROR_RESULT(InvalidAllocationTableBlock, 4722); - R_DEFINE_ERROR_RESULT(InvalidKeyValueListElementIndex, 4723); + R_DEFINE_ERROR_RANGE(DatabaseCorrupted, 4721, 4739); + R_DEFINE_ERROR_RESULT(InvalidAllocationTableBlock, 4722); + R_DEFINE_ERROR_RESULT(InvalidKeyValueListElementIndex, 4723); - R_DEFINE_ERROR_RANGE(AesXtsFileSystemCorrupted, 4741, 4759); - R_DEFINE_ERROR_RANGE(SaveDataTransferDataCorrupted, 4761, 4769); - R_DEFINE_ERROR_RANGE(SignedSystemPartitionDataCorrupted, 4771, 4779); + R_DEFINE_ERROR_RANGE(AesXtsFileSystemCorrupted, 4741, 4759); + R_DEFINE_ERROR_RANGE(SaveDataTransferDataCorrupted, 4761, 4769); + R_DEFINE_ERROR_RANGE(SignedSystemPartitionDataCorrupted, 4771, 4779); - R_DEFINE_ERROR_RESULT(GameCardLogoDataCorrupted, 4781); + R_DEFINE_ERROR_RESULT(GameCardLogoDataCorrupted, 4781); - R_DEFINE_ERROR_RANGE(Unexpected, 5000, 5999); - R_DEFINE_ERROR_RESULT(UnexpectedInAesCtrStorageA, 5315); - R_DEFINE_ERROR_RESULT(UnexpectedInAesXtsStorageA, 5316); - R_DEFINE_ERROR_RESULT(UnexpectedInFindFileSystemA, 5319); + R_DEFINE_ERROR_RANGE(Unexpected, 5000, 5999); + R_DEFINE_ERROR_RESULT(UnexpectedInAesCtrStorageA, 5315); + R_DEFINE_ERROR_RESULT(UnexpectedInAesXtsStorageA, 5316); + R_DEFINE_ERROR_RESULT(UnexpectedInFindFileSystemA, 5319); - R_DEFINE_ERROR_RANGE(PreconditionViolation, 6000, 6499); - R_DEFINE_ERROR_RANGE(InvalidArgument, 6001, 6199); - R_DEFINE_ERROR_RANGE(InvalidPath, 6002, 6029); - R_DEFINE_ERROR_RESULT(TooLongPath, 6003); - R_DEFINE_ERROR_RESULT(InvalidCharacter, 6004); - R_DEFINE_ERROR_RESULT(InvalidPathFormat, 6005); - R_DEFINE_ERROR_RESULT(DirectoryUnobtainable, 6006); - R_DEFINE_ERROR_RESULT(NotNormalized, 6007); + R_DEFINE_ERROR_RANGE(PreconditionViolation, 6000, 6499); + R_DEFINE_ERROR_RANGE(InvalidArgument, 6001, 6199); + R_DEFINE_ERROR_RANGE(InvalidPath, 6002, 6029); + R_DEFINE_ERROR_RESULT(TooLongPath, 6003); + R_DEFINE_ERROR_RESULT(InvalidCharacter, 6004); + R_DEFINE_ERROR_RESULT(InvalidPathFormat, 6005); + R_DEFINE_ERROR_RESULT(DirectoryUnobtainable, 6006); + R_DEFINE_ERROR_RESULT(NotNormalized, 6007); - R_DEFINE_ERROR_RANGE(InvalidPathForOperation, 6030, 6059); - R_DEFINE_ERROR_RESULT(DirectoryNotDeletable, 6031); - R_DEFINE_ERROR_RESULT(DirectoryNotRenamable, 6032); - R_DEFINE_ERROR_RESULT(IncompatiblePath, 6033); - R_DEFINE_ERROR_RESULT(RenameToOtherFileSystem, 6034); + R_DEFINE_ERROR_RANGE(InvalidPathForOperation, 6030, 6059); + R_DEFINE_ERROR_RESULT(DirectoryNotDeletable, 6031); + R_DEFINE_ERROR_RESULT(DirectoryNotRenamable, 6032); + R_DEFINE_ERROR_RESULT(IncompatiblePath, 6033); + R_DEFINE_ERROR_RESULT(RenameToOtherFileSystem, 6034); - R_DEFINE_ERROR_RESULT(InvalidOffset, 6061); - R_DEFINE_ERROR_RESULT(InvalidSize, 6062); - R_DEFINE_ERROR_RESULT(NullptrArgument, 6063); - R_DEFINE_ERROR_RESULT(InvalidAlignment, 6064); - R_DEFINE_ERROR_RESULT(InvalidMountName, 6065); + R_DEFINE_ERROR_RESULT(InvalidOffset, 6061); + R_DEFINE_ERROR_RESULT(InvalidSize, 6062); + R_DEFINE_ERROR_RESULT(NullptrArgument, 6063); + R_DEFINE_ERROR_RESULT(InvalidAlignment, 6064); + R_DEFINE_ERROR_RESULT(InvalidMountName, 6065); - R_DEFINE_ERROR_RESULT(ExtensionSizeTooLarge, 6066); - R_DEFINE_ERROR_RESULT(ExtensionSizeInvalid, 6067); + R_DEFINE_ERROR_RESULT(ExtensionSizeTooLarge, 6066); + R_DEFINE_ERROR_RESULT(ExtensionSizeInvalid, 6067); - R_DEFINE_ERROR_RESULT(InvalidOpenMode, 6072); + R_DEFINE_ERROR_RESULT(InvalidOpenMode, 6072); - R_DEFINE_ERROR_RANGE(InvalidEnumValue, 6080, 6099); - R_DEFINE_ERROR_RESULT(InvalidSaveDataState, 6081); - R_DEFINE_ERROR_RESULT(InvalidSaveDataSpaceId, 6082); + R_DEFINE_ERROR_RANGE(InvalidEnumValue, 6080, 6099); + R_DEFINE_ERROR_RESULT(InvalidSaveDataState, 6081); + R_DEFINE_ERROR_RESULT(InvalidSaveDataSpaceId, 6082); - R_DEFINE_ERROR_RANGE(InvalidOperationForOpenMode, 6200, 6299); - R_DEFINE_ERROR_RESULT(FileExtensionWithoutOpenModeAllowAppend, 6201); - R_DEFINE_ERROR_RESULT(ReadNotPermitted, 6202); - R_DEFINE_ERROR_RESULT(WriteNotPermitted, 6203); + R_DEFINE_ERROR_RANGE(InvalidOperationForOpenMode, 6200, 6299); + R_DEFINE_ERROR_RESULT(FileExtensionWithoutOpenModeAllowAppend, 6201); + R_DEFINE_ERROR_RESULT(ReadNotPermitted, 6202); + R_DEFINE_ERROR_RESULT(WriteNotPermitted, 6203); - R_DEFINE_ERROR_RANGE(UnsupportedOperation, 6300, 6399); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInSubStorageA, 6302); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInSubStorageB, 6303); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInMemoryStorageA, 6304); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInMemoryStorageB, 6305); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInFileStorageA, 6306); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInFileStorageB, 6307); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInSwitchStorageA, 6308); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInAesCtrCounterExtendedStorageA, 6310); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInAesCtrCounterExtendedStorageB, 6311); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInAesCtrCounterExtendedStorageC, 6312); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInAesCtrStorageExternalA, 6313); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInAesCtrStorageExternalB, 6314); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInAesCtrStorageA, 6315); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInHierarchicalIntegrityVerificationStorageA, 6316); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInHierarchicalIntegrityVerificationStorageB, 6317); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInIntegrityVerificationStorageA, 6318); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInIntegrityVerificationStorageB, 6319); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInIntegrityVerificationStorageC, 6320); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInBlockCacheBufferedStorageA, 6321); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInBlockCacheBufferedStorageB, 6322); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInBlockCacheBufferedStorageC, 6323); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInIndirectStorageA, 6324); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInIndirectStorageB, 6325); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInIndirectStorageC, 6326); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInZeroStorageA, 6327); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInZeroStorageB, 6328); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInHierarchicalSha256StorageA, 6329); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyBlockCacheStorageA, 6330); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyBlockCacheStorageB, 6331); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInIntegrityRomFsStorageA , 6332); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInFileServiceObjectAdapterA, 6362); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInRomFsFileSystemA, 6364); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInRomFsFileSystemB, 6365); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInRomFsFileSystemC, 6366); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInRomFsFileA, 6367); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInRomFsFileB, 6368); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyFileSystemTemplateA, 6369); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyFileSystemTemplateB, 6370); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyFileSystemTemplateC, 6371); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyFileA, 6372); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyFileB, 6373); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInPartitionFileSystemA, 6374); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInPartitionFileSystemB, 6375); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInPartitionFileA, 6376); - R_DEFINE_ERROR_RESULT(UnsupportedOperationInPartitionFileB, 6377); + R_DEFINE_ERROR_RANGE(UnsupportedOperation, 6300, 6399); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInSubStorageA, 6302); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInSubStorageB, 6303); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInMemoryStorageA, 6304); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInMemoryStorageB, 6305); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInFileStorageA, 6306); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInFileStorageB, 6307); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInSwitchStorageA, 6308); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInAesCtrCounterExtendedStorageA, 6310); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInAesCtrCounterExtendedStorageB, 6311); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInAesCtrCounterExtendedStorageC, 6312); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInAesCtrStorageExternalA, 6313); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInAesCtrStorageExternalB, 6314); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInAesCtrStorageA, 6315); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInHierarchicalIntegrityVerificationStorageA, 6316); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInHierarchicalIntegrityVerificationStorageB, 6317); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInIntegrityVerificationStorageA, 6318); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInIntegrityVerificationStorageB, 6319); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInIntegrityVerificationStorageC, 6320); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInBlockCacheBufferedStorageA, 6321); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInBlockCacheBufferedStorageB, 6322); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInBlockCacheBufferedStorageC, 6323); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInIndirectStorageA, 6324); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInIndirectStorageB, 6325); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInIndirectStorageC, 6326); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInZeroStorageA, 6327); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInZeroStorageB, 6328); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInHierarchicalSha256StorageA, 6329); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyBlockCacheStorageA, 6330); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyBlockCacheStorageB, 6331); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInIntegrityRomFsStorageA , 6332); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInFileServiceObjectAdapterA, 6362); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInRomFsFileSystemA, 6364); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInRomFsFileSystemB, 6365); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInRomFsFileSystemC, 6366); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInRomFsFileA, 6367); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInRomFsFileB, 6368); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyFileSystemTemplateA, 6369); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyFileSystemTemplateB, 6370); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyFileSystemTemplateC, 6371); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyFileA, 6372); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInReadOnlyFileB, 6373); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInPartitionFileSystemA, 6374); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInPartitionFileSystemB, 6375); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInPartitionFileA, 6376); + R_DEFINE_ERROR_RESULT(UnsupportedOperationInPartitionFileB, 6377); - R_DEFINE_ERROR_RANGE(PermissionDenied, 6400, 6449); + R_DEFINE_ERROR_RANGE(PermissionDenied, 6400, 6449); - R_DEFINE_ERROR_RESULT(NeedFlush, 6454); - R_DEFINE_ERROR_RESULT(FileNotClosed, 6455); - R_DEFINE_ERROR_RESULT(DirectoryNotClosed, 6456); - R_DEFINE_ERROR_RESULT(WriteModeFileNotClosed, 6457); - R_DEFINE_ERROR_RESULT(AllocatorAlreadyRegistered, 6458); - R_DEFINE_ERROR_RESULT(DefaultAllocatorUsed, 6459); - R_DEFINE_ERROR_RESULT(AllocatorAlignmentViolation, 6461); - R_DEFINE_ERROR_RESULT(UserNotExist, 6465); + R_DEFINE_ERROR_RESULT(NeedFlush, 6454); + R_DEFINE_ERROR_RESULT(FileNotClosed, 6455); + R_DEFINE_ERROR_RESULT(DirectoryNotClosed, 6456); + R_DEFINE_ERROR_RESULT(WriteModeFileNotClosed, 6457); + R_DEFINE_ERROR_RESULT(AllocatorAlreadyRegistered, 6458); + R_DEFINE_ERROR_RESULT(DefaultAllocatorUsed, 6459); + R_DEFINE_ERROR_RESULT(AllocatorAlignmentViolation, 6461); + R_DEFINE_ERROR_RESULT(UserNotExist, 6465); - R_DEFINE_ERROR_RANGE(NotFound, 6600, 6699); + R_DEFINE_ERROR_RANGE(NotFound, 6600, 6699); - R_DEFINE_ERROR_RANGE(OutOfResource, 6700, 6799); - R_DEFINE_ERROR_RESULT(BufferAllocationFailed, 6705); - R_DEFINE_ERROR_RESULT(MappingTableFull, 6706); - R_DEFINE_ERROR_RESULT(OpenCountLimit, 6709); + R_DEFINE_ERROR_RANGE(OutOfResource, 6700, 6799); + R_DEFINE_ERROR_RESULT(BufferAllocationFailed, 6705); + R_DEFINE_ERROR_RESULT(MappingTableFull, 6706); + R_DEFINE_ERROR_RESULT(OpenCountLimit, 6709); - R_DEFINE_ERROR_RANGE(MappingFailed, 6800, 6899); - R_DEFINE_ERROR_RESULT(MapFull, 6811); + R_DEFINE_ERROR_RANGE(MappingFailed, 6800, 6899); + R_DEFINE_ERROR_RESULT(MapFull, 6811); - R_DEFINE_ERROR_RANGE(BadState, 6900, 6999); - R_DEFINE_ERROR_RESULT(NotInitialized, 6902); - R_DEFINE_ERROR_RESULT(NotMounted, 6905); + R_DEFINE_ERROR_RANGE(BadState, 6900, 6999); + R_DEFINE_ERROR_RESULT(NotInitialized, 6902); + R_DEFINE_ERROR_RESULT(NotMounted, 6905); - R_DEFINE_ERROR_RANGE(DbmNotFound, 7901, 7904); - R_DEFINE_ERROR_RESULT(DbmKeyNotFound, 7902); - R_DEFINE_ERROR_RESULT(DbmFileNotFound, 7903); - R_DEFINE_ERROR_RESULT(DbmDirectoryNotFound, 7904); + R_DEFINE_ERROR_RANGE(DbmNotFound, 7901, 7904); + R_DEFINE_ERROR_RESULT(DbmKeyNotFound, 7902); + R_DEFINE_ERROR_RESULT(DbmFileNotFound, 7903); + R_DEFINE_ERROR_RESULT(DbmDirectoryNotFound, 7904); - R_DEFINE_ERROR_RESULT(DbmAlreadyExists, 7906); - R_DEFINE_ERROR_RESULT(DbmKeyFull, 7907); - R_DEFINE_ERROR_RESULT(DbmDirectoryEntryFull, 7908); - R_DEFINE_ERROR_RESULT(DbmFileEntryFull, 7909); + R_DEFINE_ERROR_RESULT(DbmAlreadyExists, 7906); + R_DEFINE_ERROR_RESULT(DbmKeyFull, 7907); + R_DEFINE_ERROR_RESULT(DbmDirectoryEntryFull, 7908); + R_DEFINE_ERROR_RESULT(DbmFileEntryFull, 7909); - R_DEFINE_ERROR_RANGE(DbmFindFinished, 7910, 7912); - R_DEFINE_ERROR_RESULT(DbmFindKeyFinished, 7911); - R_DEFINE_ERROR_RESULT(DbmIterationFinished, 7912); + R_DEFINE_ERROR_RANGE(DbmFindFinished, 7910, 7912); + R_DEFINE_ERROR_RESULT(DbmFindKeyFinished, 7911); + R_DEFINE_ERROR_RESULT(DbmIterationFinished, 7912); - R_DEFINE_ERROR_RESULT(DbmInvalidOperation, 7914); - R_DEFINE_ERROR_RESULT(DbmInvalidPathFormat, 7915); - R_DEFINE_ERROR_RESULT(DbmDirectoryNameTooLong, 7916); - R_DEFINE_ERROR_RESULT(DbmFileNameTooLong, 7917); + R_DEFINE_ERROR_RESULT(DbmInvalidOperation, 7914); + R_DEFINE_ERROR_RESULT(DbmInvalidPathFormat, 7915); + R_DEFINE_ERROR_RESULT(DbmDirectoryNameTooLong, 7916); + R_DEFINE_ERROR_RESULT(DbmFileNameTooLong, 7917); } diff --git a/libraries/libvapours/include/vapours/results/nim_results.hpp b/libraries/libvapours/include/vapours/results/nim_results.hpp new file mode 100644 index 000000000..c0607726c --- /dev/null +++ b/libraries/libvapours/include/vapours/results/nim_results.hpp @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include + +namespace ams::nim { + + R_DEFINE_NAMESPACE_RESULT_MODULE(137); + + R_DEFINE_ERROR_RESULT(HttpConnectionCanceled, 70); + +} diff --git a/libraries/libvapours/include/vapours/results/ns_results.hpp b/libraries/libvapours/include/vapours/results/ns_results.hpp new file mode 100644 index 000000000..32f954225 --- /dev/null +++ b/libraries/libvapours/include/vapours/results/ns_results.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include + +namespace ams::ns { + + R_DEFINE_NAMESPACE_RESULT_MODULE(16); + + R_DEFINE_ERROR_RESULT(Canceled, 90); + R_DEFINE_ERROR_RESULT(OutOfMaxRunningTask, 110); + R_DEFINE_ERROR_RESULT(CardUpdateNotSetup, 270); + R_DEFINE_ERROR_RESULT(CardUpdateNotPrepared, 280); + R_DEFINE_ERROR_RESULT(CardUpdateAlreadySetup, 290); + R_DEFINE_ERROR_RESULT(PrepareCardUpdateAlreadyRequested, 460); + +} diff --git a/libraries/libvapours/include/vapours/results/os_results.hpp b/libraries/libvapours/include/vapours/results/os_results.hpp index 742b12c6d..05479e26d 100644 --- a/libraries/libvapours/include/vapours/results/os_results.hpp +++ b/libraries/libvapours/include/vapours/results/os_results.hpp @@ -21,12 +21,20 @@ namespace ams::os { R_DEFINE_NAMESPACE_RESULT_MODULE(3); - R_DEFINE_ERROR_RESULT(Busy, 4); + R_DEFINE_ERROR_RESULT(Busy, 4); - R_DEFINE_ERROR_RESULT(OutOfMemory, 8); - R_DEFINE_ERROR_RESULT(OutOfResource, 9); + R_DEFINE_ERROR_RESULT(OutOfMemory, 8); + R_DEFINE_ERROR_RESULT(OutOfResource, 9); - R_DEFINE_ERROR_RESULT(OutOfVirtualAddressSpace, 12); - R_DEFINE_ERROR_RESULT(ResourceLimit, 13); + R_DEFINE_ERROR_RESULT(OutOfVirtualAddressSpace, 12); + R_DEFINE_ERROR_RESULT(ResourceLimit, 13); + + R_DEFINE_ERROR_RESULT(OutOfHandles, 500); + R_DEFINE_ERROR_RESULT(InvalidHandle, 501); + R_DEFINE_ERROR_RESULT(InvalidCurrentMemoryState, 502); + R_DEFINE_ERROR_RESULT(InvalidTransferMemoryState, 503); + R_DEFINE_ERROR_RESULT(InvalidTransferMemorySize, 504); + R_DEFINE_ERROR_RESULT(OutOfTransferMemory, 505); + R_DEFINE_ERROR_RESULT(OutOfAddressSpace, 506); } diff --git a/libraries/libvapours/include/vapours/results/spl_results.hpp b/libraries/libvapours/include/vapours/results/spl_results.hpp index 8ea794825..f8d857e19 100644 --- a/libraries/libvapours/include/vapours/results/spl_results.hpp +++ b/libraries/libvapours/include/vapours/results/spl_results.hpp @@ -28,13 +28,14 @@ namespace ams::spl { R_DEFINE_ERROR_RESULT(SecureMonitorNoAsyncOperation, 4); R_DEFINE_ERROR_RESULT(SecureMonitorInvalidAsyncOperation, 5); R_DEFINE_ERROR_RESULT(SecureMonitorNotPermitted, 6); + R_DEFINE_ERROR_RESULT(SecureMonitorNotInitialized, 7); R_DEFINE_ERROR_RESULT(InvalidSize, 100); R_DEFINE_ERROR_RESULT(UnknownSecureMonitorError, 101); R_DEFINE_ERROR_RESULT(DecryptionFailed, 102); - R_DEFINE_ERROR_RESULT(OutOfKeyslots, 104); - R_DEFINE_ERROR_RESULT(InvalidKeyslot, 105); + R_DEFINE_ERROR_RESULT(OutOfKeySlots, 104); + R_DEFINE_ERROR_RESULT(InvalidKeySlot, 105); R_DEFINE_ERROR_RESULT(BootReasonAlreadySet, 106); R_DEFINE_ERROR_RESULT(BootReasonNotSet, 107); R_DEFINE_ERROR_RESULT(InvalidArgument, 108); diff --git a/libraries/libvapours/include/vapours/results/svc_results.hpp b/libraries/libvapours/include/vapours/results/svc_results.hpp index 31672c97e..e73beb865 100644 --- a/libraries/libvapours/include/vapours/results/svc_results.hpp +++ b/libraries/libvapours/include/vapours/results/svc_results.hpp @@ -27,6 +27,8 @@ namespace ams::svc { R_DEFINE_ERROR_RESULT(NotImplemented, 33); + R_DEFINE_ERROR_RESULT(StopProcessingException, 54); + R_DEFINE_ERROR_RESULT(NoSynchronizationObject, 57); R_DEFINE_ERROR_RESULT(TerminationRequested, 59); @@ -40,7 +42,7 @@ namespace ams::svc { R_DEFINE_ERROR_RESULT(OutOfHandles, 105); R_DEFINE_ERROR_RESULT(InvalidCurrentMemory, 106); - R_DEFINE_ERROR_RESULT(InvalidNewMemoryPermissions, 108); + R_DEFINE_ERROR_RESULT(InvalidNewMemoryPermission, 108); R_DEFINE_ERROR_RESULT(InvalidMemoryRegion, 110); @@ -61,15 +63,19 @@ namespace ams::svc { R_DEFINE_ERROR_RESULT(ReservedUsed, 126); R_DEFINE_ERROR_RESULT(NotSupported, 127); R_DEFINE_ERROR_RESULT(Debug, 128); - R_DEFINE_ERROR_RESULT(ThreadNotOwned, 129); - + R_DEFINE_ERROR_RESULT(NoThread, 129); + R_DEFINE_ERROR_RESULT(UnknownThread, 130); R_DEFINE_ERROR_RESULT(PortClosed, 131); R_DEFINE_ERROR_RESULT(LimitReached, 132); + R_DEFINE_ERROR_RESULT(InvalidMemoryPool, 133); R_DEFINE_ERROR_RESULT(ReceiveListBroken, 258); R_DEFINE_ERROR_RESULT(OutOfAddressSpace, 259); R_DEFINE_ERROR_RESULT(MessageTooLarge, 260); + R_DEFINE_ERROR_RESULT(InvalidProcessId, 517); + R_DEFINE_ERROR_RESULT(InvalidThreadId, 518); + R_DEFINE_ERROR_RESULT(InvalidId, 519); R_DEFINE_ERROR_RESULT(ProcessTerminated, 520); } diff --git a/libraries/libvapours/include/vapours/svc/codegen/impl/svc_codegen_impl_kernel_svc_wrapper.hpp b/libraries/libvapours/include/vapours/svc/codegen/impl/svc_codegen_impl_kernel_svc_wrapper.hpp index 6872199b1..de6d25b32 100644 --- a/libraries/libvapours/include/vapours/svc/codegen/impl/svc_codegen_impl_kernel_svc_wrapper.hpp +++ b/libraries/libvapours/include/vapours/svc/codegen/impl/svc_codegen_impl_kernel_svc_wrapper.hpp @@ -514,7 +514,8 @@ namespace ams::svc::codegen::impl { private: using Traits = FunctionTraits; public: - using Impl = KernelSvcWrapperHelperImpl<_SvcAbiType, _UserAbiType, _KernelAbiType, typename Traits::ReturnType, typename Traits::ArgsType>; + using Impl = KernelSvcWrapperHelperImpl<_SvcAbiType, _UserAbiType, _KernelAbiType, typename Traits::ReturnType, typename Traits::ArgsType>; + using ReturnType = typename Traits::ReturnType; static constexpr bool IsAarch64Kernel = std::is_same<_KernelAbiType, Aarch64Lp64Abi>::value; static constexpr bool IsAarch32Kernel = std::is_same<_KernelAbiType, Aarch32Ilp32Abi>::value; @@ -525,17 +526,16 @@ namespace ams::svc::codegen::impl { static constexpr auto BeforeMetaCode = Impl::OptimizedBeforeMetaCode; static constexpr auto AfterMetaCode = Impl::OptimizedAfterMetaCode; - /* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */ #pragma GCC push_options #pragma GCC optimize ("omit-frame-pointer") - static ALWAYS_INLINE void WrapSvcFunction() { + static ALWAYS_INLINE ReturnType WrapSvcFunction() { /* Generate appropriate assembly. */ GenerateCodeForMetaCode(); ON_SCOPE_EXIT { GenerateCodeForMetaCode(); }; - return reinterpret_cast(Function)(); + return reinterpret_cast(Function)(); } #pragma GCC pop_options diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp index bce731284..aa650250d 100644 --- a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp @@ -20,7 +20,7 @@ namespace ams::svc::codegen { #if defined(ATMOSPHERE_ARCH_ARM64) || defined(ATMOSPHERE_ARCH_ARM) - template + template class KernelSvcWrapper { private: /* TODO: using Aarch32 = */ @@ -32,11 +32,22 @@ namespace ams::svc::codegen { #pragma GCC optimize ("omit-frame-pointer") static ALWAYS_INLINE void Call64() { - Aarch64::WrapSvcFunction(); + if constexpr (std::is_same::value) { + Aarch64::WrapSvcFunction(); + } else { + const auto &res = Aarch64::WrapSvcFunction(); + __asm__ __volatile__("" :: [res]"r"(res)); + } + } static ALWAYS_INLINE void Call64From32() { - Aarch64From32::WrapSvcFunction(); + if constexpr (std::is_same::value) { + Aarch64From32::WrapSvcFunction(); + } else { + const auto &res = Aarch64From32::WrapSvcFunction(); + __asm__ __volatile__("" :: [res]"r"(res)); + } } #pragma GCC pop_options diff --git a/libraries/libvapours/include/vapours/svc/ipc/svc_message_buffer.hpp b/libraries/libvapours/include/vapours/svc/ipc/svc_message_buffer.hpp index 1c379ca74..af3b3cb5c 100644 --- a/libraries/libvapours/include/vapours/svc/ipc/svc_message_buffer.hpp +++ b/libraries/libvapours/include/vapours/svc/ipc/svc_message_buffer.hpp @@ -159,11 +159,11 @@ namespace ams::svc::ipc { return this->header.Get(); } - constexpr ALWAYS_INLINE bool GetCopyHandleCount() const { + constexpr ALWAYS_INLINE s32 GetCopyHandleCount() const { return this->header.Get(); } - constexpr ALWAYS_INLINE bool GetMoveHandleCount() const { + constexpr ALWAYS_INLINE s32 GetMoveHandleCount() const { return this->header.Get(); } @@ -351,7 +351,7 @@ namespace ams::svc::ipc { ALWAYS_INLINE ReceiveListEntry(u32 a, u32 b) : data{util::BitPack32{a}, util::BitPack32{b}} { /* ... */ } - constexpr ALWAYS_INLINE uintptr_t GetAddress() { + constexpr ALWAYS_INLINE uintptr_t GetAddress() const { const u64 address = (static_cast(this->data[1].Get()) << AddressLow::Count) | this->data[0].Get(); return address; } @@ -439,34 +439,34 @@ namespace ams::svc::ipc { return index + (spc.GetHeaderSize() / sizeof(*this->buffer)); } - ALWAYS_INLINE s32 SetHandle(s32 index, const ::ams::svc::Handle &hnd) { + ALWAYS_INLINE s32 SetHandle(s32 index, const ::ams::svc::Handle &hnd) const { static_assert(util::IsAligned(sizeof(hnd), sizeof(*this->buffer))); __builtin_memcpy(this->buffer + index, std::addressof(hnd), sizeof(hnd)); return index + (sizeof(hnd) / sizeof(*this->buffer)); } - ALWAYS_INLINE s32 SetProcessId(s32 index, const u64 pid) { + ALWAYS_INLINE s32 SetProcessId(s32 index, const u64 pid) const { static_assert(util::IsAligned(sizeof(pid), sizeof(*this->buffer))); __builtin_memcpy(this->buffer + index, std::addressof(pid), sizeof(pid)); return index + (sizeof(pid) / sizeof(*this->buffer)); } - ALWAYS_INLINE s32 Set(s32 index, const MapAliasDescriptor &desc) { + ALWAYS_INLINE s32 Set(s32 index, const MapAliasDescriptor &desc) const { __builtin_memcpy(this->buffer + index, desc.GetData(), desc.GetDataSize()); return index + (desc.GetDataSize() / sizeof(*this->buffer)); } - ALWAYS_INLINE s32 Set(s32 index, const PointerDescriptor &desc) { + ALWAYS_INLINE s32 Set(s32 index, const PointerDescriptor &desc) const { __builtin_memcpy(this->buffer + index, desc.GetData(), desc.GetDataSize()); return index + (desc.GetDataSize() / sizeof(*this->buffer)); } - ALWAYS_INLINE s32 Set(s32 index, const ReceiveListEntry &desc) { + ALWAYS_INLINE s32 Set(s32 index, const ReceiveListEntry &desc) const { __builtin_memcpy(this->buffer + index, desc.GetData(), desc.GetDataSize()); return index + (desc.GetDataSize() / sizeof(*this->buffer)); } - ALWAYS_INLINE s32 Set(s32 index, const u32 val) { + ALWAYS_INLINE s32 Set(s32 index, const u32 val) const { static_assert(util::IsAligned(sizeof(val), sizeof(*this->buffer))); __builtin_memcpy(this->buffer + index, std::addressof(val), sizeof(val)); return index + (sizeof(val) / sizeof(*this->buffer)); @@ -521,7 +521,7 @@ namespace ams::svc::ipc { } } - static constexpr ALWAYS_INLINE s32 GetMessageBufferSize(const MessageHeader &hdr, const SpecialHeader &spc) { + static constexpr ALWAYS_INLINE size_t GetMessageBufferSize(const MessageHeader &hdr, const SpecialHeader &spc) { /* Get the size of the plain message. */ size_t msg_size = GetReceiveListIndex(hdr, spc) * sizeof(util::BitPack32); diff --git a/libraries/libvapours/include/vapours/svc/svc_common.hpp b/libraries/libvapours/include/vapours/svc/svc_common.hpp index 9e4fc430d..9f68ad73d 100644 --- a/libraries/libvapours/include/vapours/svc/svc_common.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_common.hpp @@ -32,7 +32,7 @@ namespace ams::svc { HandleWaitMask = (1u << 30), }; - constexpr inline size_t MaxWaitSynchronizationHandleCount = 0x40; + constexpr inline s32 ArgumentHandleCountMax = 0x40; constexpr inline s64 WaitInfinite = -1; diff --git a/libraries/libvapours/include/vapours/svc/svc_definitions.hpp b/libraries/libvapours/include/vapours/svc/svc_definitions.hpp index dee20ba88..558051a49 100644 --- a/libraries/libvapours/include/vapours/svc/svc_definitions.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_definitions.hpp @@ -52,7 +52,7 @@ HANDLER(0x15, Result, CreateTransferMemory, OUTPUT(::ams::svc::Handle, out_handle), INPUT(::ams::svc::Address, address), INPUT(::ams::svc::Size, size), INPUT(::ams::svc::MemoryPermission, map_perm)) \ HANDLER(0x16, Result, CloseHandle, INPUT(::ams::svc::Handle, handle)) \ HANDLER(0x17, Result, ResetSignal, INPUT(::ams::svc::Handle, handle)) \ - HANDLER(0x18, Result, WaitSynchronization, OUTPUT(int32_t, out_index), INPTR(::ams::svc::Handle, handles), INPUT(int32_t, numHandles), INPUT(int64_t, timeout_ns)) \ + HANDLER(0x18, Result, WaitSynchronization, OUTPUT(int32_t, out_index), INPTR(::ams::svc::Handle, handles), INPUT(int32_t, num_handles), INPUT(int64_t, timeout_ns)) \ HANDLER(0x19, Result, CancelSynchronization, INPUT(::ams::svc::Handle, handle)) \ HANDLER(0x1A, Result, ArbitrateLock, INPUT(::ams::svc::Handle, thread_handle), INPUT(::ams::svc::Address, address), INPUT(uint32_t, tag)) \ HANDLER(0x1B, Result, ArbitrateUnlock, INPUT(::ams::svc::Address, address)) \ @@ -107,7 +107,7 @@ HANDLER(0x52, Result, UnmapTransferMemory, INPUT(::ams::svc::Handle, trmem_handle), INPUT(::ams::svc::Address, address), INPUT(::ams::svc::Size, size)) \ HANDLER(0x53, Result, CreateInterruptEvent, OUTPUT(::ams::svc::Handle, out_read_handle), INPUT(int32_t, interrupt_id), INPUT(::ams::svc::InterruptType, interrupt_type)) \ HANDLER(0x54, Result, QueryPhysicalAddress, OUTPUT(::ams::svc::NAMESPACE::PhysicalMemoryInfo, out_info), INPUT(::ams::svc::Address, address)) \ - HANDLER(0x55, Result, QueryIoMapping, OUTPUT(::ams::svc::Address, out_address), INPUT(::ams::svc::PhysicalAddress, physical_address), INPUT(::ams::svc::Size, size)) \ + HANDLER(0x55, Result, QueryIoMapping, OUTPUT(::ams::svc::Address, out_address), OUTPUT(::ams::svc::Size, out_size), INPUT(::ams::svc::PhysicalAddress, physical_address), INPUT(::ams::svc::Size, size)) \ HANDLER(0x56, Result, CreateDeviceAddressSpace, OUTPUT(::ams::svc::Handle, out_handle), INPUT(uint64_t, das_address), INPUT(uint64_t, das_size)) \ HANDLER(0x57, Result, AttachDeviceAddressSpace, INPUT(::ams::svc::DeviceName, device_name), INPUT(::ams::svc::Handle, das_handle)) \ HANDLER(0x58, Result, DetachDeviceAddressSpace, INPUT(::ams::svc::DeviceName, device_name), INPUT(::ams::svc::Handle, das_handle)) \ @@ -149,7 +149,10 @@ HANDLER(0x7C, Result, GetProcessInfo, OUTPUT(int64_t, out_info), INPUT(::ams::svc::Handle, process_handle), INPUT(::ams::svc::ProcessInfoType, info_type)) \ HANDLER(0x7D, Result, CreateResourceLimit, OUTPUT(::ams::svc::Handle, out_handle)) \ HANDLER(0x7E, Result, SetResourceLimitLimitValue, INPUT(::ams::svc::Handle, resource_limit_handle), INPUT(::ams::svc::LimitableResource, which), INPUT(int64_t, limit_value)) \ - HANDLER(0x7F, void, CallSecureMonitor, OUTPUT(::ams::svc::NAMESPACE::SecureMonitorArguments, args)) + HANDLER(0x7F, void, CallSecureMonitor, OUTPUT(::ams::svc::NAMESPACE::SecureMonitorArguments, args)) \ + \ + HANDLER(0x55, Result, LegacyQueryIoMapping, OUTPUT(::ams::svc::Address, out_address), INPUT(::ams::svc::PhysicalAddress, physical_address), INPUT(::ams::svc::Size, size)) \ + HANDLER(0x64, Result, LegacyContinueDebugEvent, INPUT(::ams::svc::Handle, debug_handle), INPUT(uint32_t, flags), INPUT(uint64_t, thread_id)) #define AMS_SVC_FOREACH_USER_DEFINITION(HANDLER, NAMESPACE) AMS_SVC_FOREACH_DEFINITION_IMPL(HANDLER, NAMESPACE, AMS_SVC_USER_INPUT_HANDLER, AMS_SVC_USER_OUTPUT_HANDLER, AMS_SVC_USER_INPTR_HANDLER, AMS_SVC_USER_OUTPTR_HANDLER) #define AMS_SVC_FOREACH_KERN_DEFINITION(HANDLER, NAMESPACE) AMS_SVC_FOREACH_DEFINITION_IMPL(HANDLER, NAMESPACE, AMS_SVC_KERN_INPUT_HANDLER, AMS_SVC_KERN_OUTPUT_HANDLER, AMS_SVC_KERN_INPTR_HANDLER, AMS_SVC_KERN_OUTPTR_HANDLER) @@ -157,6 +160,19 @@ #define AMS_SVC_DECLARE_FUNCTION_PROTOTYPE(ID, RETURN_TYPE, NAME, ...) \ RETURN_TYPE NAME(__VA_ARGS__); +namespace ams::svc { + + #define AMS_SVC_DEFINE_ID_ENUM_MEMBER(ID, RETURN_TYPE, NAME, ...) \ + SvcId_##NAME = ID, + + enum SvcId : u32 { + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_SVC_DEFINE_ID_ENUM_MEMBER, _) + }; + + #undef AMS_SVC_DEFINE_ID_ENUM_MEMBER + +} + #ifdef ATMOSPHERE_IS_STRATOSPHERE namespace ams::svc { diff --git a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp index 8e7a5049e..4b8260b7d 100644 --- a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp @@ -116,10 +116,20 @@ namespace ams::svc { MemoryAttribute_Uncached = (1 << 3), }; + constexpr inline size_t HeapSizeAlignment = 2_MB; + struct PageInfo { u32 flags; }; + enum MemoryRegionType { + MemoryRegionType_None = 0, + MemoryRegionType_KernelTraceBuffer = 1, + MemoryRegionType_OnMemoryBootImage = 2, + MemoryRegionType_DTB = 3, + MemoryRegionType_Count, + }; + /* Info Types. */ enum InfoType : u32 { InfoType_CoreMask = 0, @@ -178,7 +188,7 @@ namespace ams::svc { }; enum LastThreadInfoFlag : u32 { - /* TODO */ + LastThreadInfoFlag_ThreadInSystemCall = (1u << 0), }; enum LimitableResource : u32 { @@ -282,6 +292,15 @@ namespace ams::svc { ThreadContextFlag_All = (ThreadContextFlag_General | ThreadContextFlag_Control | ThreadContextFlag_Fpu | ThreadContextFlag_FpuControl), }; + enum ContinueFlag : u32 { + ContinueFlag_ExceptionHandled = (1u << 0), + ContinueFlag_EnableExceptionEvent = (1u << 1), + ContinueFlag_ContinueAll = (1u << 2), + ContinueFlag_ContinueOthers = (1u << 3), + + ContinueFlag_AllMask = (1u << 4) - 1, + }; + enum ThreadExitReason : u32 { ThreadExitReason_ExitThread = 0, ThreadExitReason_TerminateThread = 1, @@ -364,12 +383,21 @@ namespace ams::svc { /* 7.x+ Should memory allocation be optimized? This requires IsApplication. */ CreateProcessFlag_OptimizeMemoryAllocation = (1 << 11), + + /* Mask of all flags. */ + CreateProcessFlag_All = CreateProcessFlag_Is64Bit | + CreateProcessFlag_AddressSpaceMask | + CreateProcessFlag_EnableDebug | + CreateProcessFlag_EnableAslr | + CreateProcessFlag_IsApplication | + CreateProcessFlag_PoolPartitionMask | + CreateProcessFlag_OptimizeMemoryAllocation, }; /* Debug types. */ enum DebugEvent : u32 { - DebugEvent_AttachProcess = 0, - DebugEvent_AttachThread = 1, + DebugEvent_CreateProcess = 0, + DebugEvent_CreateThread = 1, DebugEvent_ExitProcess = 2, DebugEvent_ExitThread = 3, DebugEvent_Exception = 4, @@ -396,6 +424,10 @@ namespace ams::svc { DebugException_MemorySystemError = 9, }; + enum DebugEventFlag : u32 { + DebugEventFlag_Stopped = (1u << 0), + }; + enum ExceptionType : u32 { ExceptionType_Init = 0x000, ExceptionType_InstructionAbort = 0x100, @@ -413,7 +445,16 @@ namespace ams::svc { }; enum BreakReason : u32 { - /* TODO */ + BreakReason_Panic = 0, + BreakReason_Assert = 1, + BreakReason_User = 2, + BreakReason_PreLoadDll = 3, + BreakReason_PostLoadDll = 4, + BreakReason_PreUnloadDll = 5, + BreakReason_PostUnloadDll = 6, + BreakReason_CppException = 7, + + BreakReason_NotificationOnlyFlag = 0x80000000, }; enum KernelDebugType : u32 { @@ -507,6 +548,7 @@ namespace ams::svc { struct ExceptionInfoStatus64 { u32 pstate; u32 afsr0; + u32 afsr1; u32 esr; u32 far; }; diff --git a/libraries/libvapours/include/vapours/svc/svc_types_dmnt.hpp b/libraries/libvapours/include/vapours/svc/svc_types_dmnt.hpp index da6145392..e65ab27c8 100644 --- a/libraries/libvapours/include/vapours/svc/svc_types_dmnt.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_types_dmnt.hpp @@ -20,7 +20,7 @@ namespace ams::svc { namespace lp64 { - struct DebugInfoAttachProcess { + struct DebugInfoCreateProcess { u64 program_id; u64 process_id; char name[0xC]; @@ -28,7 +28,7 @@ namespace ams::svc { u64 user_exception_context_address; /* 5.0.0+ */ }; - struct DebugInfoAttachThread { + struct DebugInfoCreateThread { u64 thread_id; u64 tls_address; u64 entrypoint; @@ -91,8 +91,8 @@ namespace ams::svc { }; union DebugInfo { - DebugInfoAttachProcess attach_process; - DebugInfoAttachThread attach_thread; + DebugInfoCreateProcess create_process; + DebugInfoCreateThread create_thread; DebugInfoExitProcess exit_process; DebugInfoExitThread exit_thread; DebugInfoException exception; @@ -110,7 +110,7 @@ namespace ams::svc { namespace ilp32 { - struct DebugInfoAttachProcess { + struct DebugInfoCreateProcess { u64 program_id; u64 process_id; char name[0xC]; @@ -118,7 +118,7 @@ namespace ams::svc { u32 user_exception_context_address; /* 5.0.0+ */ }; - struct DebugInfoAttachThread { + struct DebugInfoCreateThread { u64 thread_id; u32 tls_address; u32 entrypoint; @@ -181,8 +181,8 @@ namespace ams::svc { }; union DebugInfo { - DebugInfoAttachProcess attach_process; - DebugInfoAttachThread attach_thread; + DebugInfoCreateProcess create_process; + DebugInfoCreateThread create_thread; DebugInfoExitProcess exit_process; DebugInfoExitThread exit_thread; DebugInfoException exception; diff --git a/libraries/libvapours/include/vapours/svc/svc_types_priv.hpp b/libraries/libvapours/include/vapours/svc/svc_types_priv.hpp index f6da8637b..3e2e11758 100644 --- a/libraries/libvapours/include/vapours/svc/svc_types_priv.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_types_priv.hpp @@ -25,10 +25,10 @@ namespace ams::svc { u32 version; u64 program_id; u64 code_address; - u32 code_num_pages; + s32 code_num_pages; u32 flags; Handle reslimit; - u32 system_resource_num_pages; + s32 system_resource_num_pages; }; static_assert(sizeof(CreateProcessParameter) == 0x30); @@ -41,10 +41,10 @@ namespace ams::svc { u32 version; u64 program_id; u64 code_address; - u32 code_num_pages; + s32 code_num_pages; u32 flags; Handle reslimit; - u32 system_resource_num_pages; + s32 system_resource_num_pages; }; static_assert(sizeof(CreateProcessParameter) == 0x30); diff --git a/libraries/libvapours/include/vapours/timespan.hpp b/libraries/libvapours/include/vapours/timespan.hpp index 201b0c211..0cca06a84 100644 --- a/libraries/libvapours/include/vapours/timespan.hpp +++ b/libraries/libvapours/include/vapours/timespan.hpp @@ -31,7 +31,7 @@ namespace ams { static constexpr ALWAYS_INLINE TimeSpanType FromSeconds(s64 s) { return FromMilliSeconds(s * INT64_C(1000)); } static constexpr ALWAYS_INLINE TimeSpanType FromMinutes(s64 m) { return FromSeconds(m * INT64_C(60)); } static constexpr ALWAYS_INLINE TimeSpanType FromHours(s64 h) { return FromMinutes(h * INT64_C(60)); } - static constexpr ALWAYS_INLINE TimeSpanType FromDays(s64 d) { return FromMinutes(d * INT64_C(24)); } + static constexpr ALWAYS_INLINE TimeSpanType FromDays(s64 d) { return FromHours(d * INT64_C(24)); } constexpr ALWAYS_INLINE s64 GetNanoSeconds() const { return this->ns; } constexpr ALWAYS_INLINE s64 GetMicroSeconds() const { return this->GetNanoSeconds() / (INT64_C(1000)); } diff --git a/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp b/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp index 4244f93d3..d7910f5b8 100644 --- a/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp +++ b/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp @@ -279,6 +279,40 @@ namespace ams::util { static_assert(GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage)); }; + template> + class IntrusiveRedBlackTreeMemberTraitsDeferredAssert; + + template + class IntrusiveRedBlackTreeMemberTraitsDeferredAssert { + public: + template + using TreeType = IntrusiveRedBlackTree; + + static constexpr bool IsValid() { + TYPED_STORAGE(Derived) DerivedStorage = {}; + return GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage); + } + private: + template + friend class IntrusiveRedBlackTree; + + static constexpr IntrusiveRedBlackTreeNode *GetNode(Derived *parent) { + return std::addressof(parent->*Member); + } + + static constexpr IntrusiveRedBlackTreeNode const *GetNode(Derived const *parent) { + return std::addressof(parent->*Member); + } + + static constexpr Derived *GetParent(IntrusiveRedBlackTreeNode *node) { + return util::GetParentPointer(node); + } + + static constexpr Derived const *GetParent(IntrusiveRedBlackTreeNode const *node) { + return util::GetParentPointer(node); + } + }; + template class IntrusiveRedBlackTreeBaseNode : public IntrusiveRedBlackTreeNode{}; diff --git a/libraries/libvapours/include/vapours/util/util_string_util.hpp b/libraries/libvapours/include/vapours/util/util_string_util.hpp index a15367690..d06d96a7d 100644 --- a/libraries/libvapours/include/vapours/util/util_string_util.hpp +++ b/libraries/libvapours/include/vapours/util/util_string_util.hpp @@ -20,6 +20,54 @@ namespace ams::util { + template + constexpr T ToLower(T c) { + return ('A' <= c && c <= 'Z') ? (c - 'A' + 'a') : c; + } + + template + constexpr T ToUpper(T c) { + return ('a' <= c && c <= 'z') ? (c - 'a' + 'A') : c; + } + + template + int Strncmp(const T *lhs, const T *rhs, int count) { + AMS_ASSERT(lhs != nullptr); + AMS_ASSERT(rhs != nullptr); + AMS_ABORT_UNLESS(count >= 0); + + if (count == 0) { + return 0; + } + + T l, r; + do { + l = *(lhs++); + r = *(rhs++); + } while (l && (l == r) && (--count)); + + return l - r; + } + + template + int Strnicmp(const T *lhs, const T *rhs, int count) { + AMS_ASSERT(lhs != nullptr); + AMS_ASSERT(rhs != nullptr); + AMS_ABORT_UNLESS(count >= 0); + + if (count == 0) { + return 0; + } + + T l, r; + do { + l = ::ams::util::ToLower(*(lhs++)); + r = ::ams::util::ToLower(*(rhs++)); + } while (l && (l == r) && (--count)); + + return l - r; + } + template constexpr int Strlcpy(T *dst, const T *src, int count) { AMS_ASSERT(dst != nullptr); diff --git a/mesosphere/build_mesosphere.py b/mesosphere/build_mesosphere.py index 9bc8fe511..bbd06b62c 100644 --- a/mesosphere/build_mesosphere.py +++ b/mesosphere/build_mesosphere.py @@ -2,19 +2,8 @@ import sys, os from struct import pack as pk, unpack as up -ATMOSPHERE_TARGET_FIRMWARE_100 = 1 -ATMOSPHERE_TARGET_FIRMWARE_200 = 2 -ATMOSPHERE_TARGET_FIRMWARE_300 = 3 -ATMOSPHERE_TARGET_FIRMWARE_400 = 4 -ATMOSPHERE_TARGET_FIRMWARE_500 = 5 -ATMOSPHERE_TARGET_FIRMWARE_600 = 6 -ATMOSPHERE_TARGET_FIRMWARE_620 = 7 -ATMOSPHERE_TARGET_FIRMWARE_700 = 8 -ATMOSPHERE_TARGET_FIRMWARE_800 = 9 -ATMOSPHERE_TARGET_FIRMWARE_810 = 10 -ATMOSPHERE_TARGET_FIRMWARE_900 = 11 -ATMOSPHERE_TARGET_FIRMWARE_910 = 12 -ATMOSPHERE_TARGET_FIRMWARE_1000 = 13 +def atmosphere_target_firmware(major, minor, micro, rev = 0): + return (major << 24) | (minor << 16) | (micro << 8) | rev def align_up(val, algn): val += algn - 1 @@ -50,7 +39,7 @@ def main(argc, argv): with open('mesosphere.bin', 'wb') as f: f.write(kernel[:kernel_metadata_offset + 4]) - f.write(pk('. + */ + +/* ams::kern::svc::PatchSvcTableEntry(void (* const*)(), unsigned int, void (*)()) */ +.section .text._ZN3ams4kern3svc18PatchSvcTableEntryEPKPFvvEjS3_, "ax", %progbits +.global _ZN3ams4kern3svc18PatchSvcTableEntryEPKPFvvEjS3_ +.type _ZN3ams4kern3svc18PatchSvcTableEntryEPKPFvvEjS3_, %function +_ZN3ams4kern3svc18PatchSvcTableEntryEPKPFvvEjS3_: + /* This function violates const correctness by design, to patch the svc tables. */ + /* The svc tables live in .rodata (.rel.ro), but must be patched by initial constructors */ + /* to support firmware-specific table entries. */ + mov w1, w1 + str x2, [x0, x1, lsl #3] + ret \ No newline at end of file diff --git a/libraries/libmesosphere/source/libc/arch/arm64/asmdefs.h b/mesosphere/kernel/source/libc/arch/arm64/asmdefs.h similarity index 100% rename from libraries/libmesosphere/source/libc/arch/arm64/asmdefs.h rename to mesosphere/kernel/source/libc/arch/arm64/asmdefs.h diff --git a/libraries/libmesosphere/source/libc/arch/arm64/memcmp.arch.arm64-broken.s b/mesosphere/kernel/source/libc/arch/arm64/memcmp.arch.arm64.s similarity index 100% rename from libraries/libmesosphere/source/libc/arch/arm64/memcmp.arch.arm64-broken.s rename to mesosphere/kernel/source/libc/arch/arm64/memcmp.arch.arm64.s diff --git a/libraries/libmesosphere/source/libc/arch/arm64/memcpy.arch.arm64-broken.s b/mesosphere/kernel/source/libc/arch/arm64/memcpy.arch.arm64.s similarity index 100% rename from libraries/libmesosphere/source/libc/arch/arm64/memcpy.arch.arm64-broken.s rename to mesosphere/kernel/source/libc/arch/arm64/memcpy.arch.arm64.s diff --git a/libraries/libmesosphere/source/libc/arch/arm64/memset.arch.arm64-broken.s b/mesosphere/kernel/source/libc/arch/arm64/memset.arch.arm64.s similarity index 99% rename from libraries/libmesosphere/source/libc/arch/arm64/memset.arch.arm64-broken.s rename to mesosphere/kernel/source/libc/arch/arm64/memset.arch.arm64.s index d8d272726..700f0e847 100644 --- a/libraries/libmesosphere/source/libc/arch/arm64/memset.arch.arm64-broken.s +++ b/mesosphere/kernel/source/libc/arch/arm64/memset.arch.arm64.s @@ -76,11 +76,13 @@ L(set96): .p2align 4 L(set_long): stp val, val, [dstin] - bic dst, dstin, 15 #if DC_ZVA_THRESHOLD cmp count, DC_ZVA_THRESHOLD ccmp val, 0, 0, cs + bic dst, dstin, 15 b.eq L(zva_64) +#else + bic dst, dstin, 15 #endif /* Small-size or non-zero memset does not use DC ZVA. */ sub count, dstend, dst diff --git a/mesosphere/kernel/source/libc/kern_libc_config.arch.arm64.h b/mesosphere/kernel/source/libc/kern_libc_config.arch.arm64.h new file mode 100644 index 000000000..5d01bc77b --- /dev/null +++ b/mesosphere/kernel/source/libc/kern_libc_config.arch.arm64.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +/* Definitions for libc genericity. */ +#define MESOSPHERE_LIBC_MEMCPY_GENERIC 0 +#define MESOSPHERE_LIBC_MEMCMP_GENERIC 0 +#define MESOSPHERE_LIBC_MEMMOVE_GENERIC 0 +#define MESOSPHERE_LIBC_MEMSET_GENERIC 0 +#define MESOSPHERE_LIBC_STRNCPY_GENERIC 1 +#define MESOSPHERE_LIBC_STRNCMP_GENERIC 1 diff --git a/fusee/fusee-secondary/src/emu_dev.h b/mesosphere/kernel/source/libc/kern_libc_config.h similarity index 56% rename from fusee/fusee-secondary/src/emu_dev.h rename to mesosphere/kernel/source/libc/kern_libc_config.h index 435903eaa..f79fb7501 100644 --- a/fusee/fusee-secondary/src/emu_dev.h +++ b/mesosphere/kernel/source/libc/kern_libc_config.h @@ -13,22 +13,14 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ - -#ifndef FUSEE_EMU_DEV_H -#define FUSEE_EMU_DEV_H +#pragma once -#include -#include -#include -#include "device_partition.h" +#if defined(ATMOSPHERE_ARCH_ARM64) -#define EMUDEV_MAX_DEVICES 16 + #include "kern_libc_config.arch.arm64.h" -int emudev_mount_device(const char *name, const device_partition_t *devpart, const char *origin_path, int num_parts, uint64_t part_limit); -int emudev_register_device(const char *name); -int emudev_unregister_device(const char *name); -int emudev_unmount_device(const char *name); /* also unregisters. */ +#else -int emudev_unmount_all(void); + #error "Unknown architecture for libc" #endif diff --git a/libraries/libmesosphere/source/libc/kern_libc_generic.c b/mesosphere/kernel/source/libc/kern_libc_generic.c similarity index 96% rename from libraries/libmesosphere/source/libc/kern_libc_generic.c rename to mesosphere/kernel/source/libc/kern_libc_generic.c index fb7e4cc79..d41caa551 100644 --- a/libraries/libmesosphere/source/libc/kern_libc_generic.c +++ b/mesosphere/kernel/source/libc/kern_libc_generic.c @@ -16,6 +16,7 @@ #include #include #include +#include "kern_libc_config.h" /* Note: copied from newlib */ #ifdef __cplusplus @@ -58,6 +59,8 @@ QUICKREF #undef TOO_SMALL #define TOO_SMALL(LEN) ((LEN) < BIGBLOCKSIZE) +#if MESOSPHERE_LIBC_MEMMOVE_GENERIC + /*SUPPRESS 20*/ void * //__inhibit_loop_to_libcall @@ -147,6 +150,8 @@ memmove (void *dst_void, #endif /* not PREFER_SIZE_OVER_SPEED */ } +#endif /* MESOSPHERE_LIBC_MEMMOVE_GENERIC */ + /* FUNCTION <>---copy memory regions @@ -169,6 +174,8 @@ QUICKREF memcpy ansi pure */ +#if MESOSPHERE_LIBC_MEMCPY_GENERIC + void * __attribute__((weak)) memcpy (void * dst0, @@ -229,6 +236,8 @@ memcpy (void * dst0, #endif /* not PREFER_SIZE_OVER_SPEED */ } +#endif /* MESOSPHERE_LIBC_MEMCPY_GENERIC */ + /* FUNCTION <>---set an area of memory @@ -260,6 +269,8 @@ QUICKREF #define UNALIGNED(X) ((long)X & (LBLOCKSIZE - 1)) #define TOO_SMALL(LEN) ((LEN) < LBLOCKSIZE) +#if MESOSPHERE_LIBC_MEMSET_GENERIC + void * __attribute__((weak)) memset (void *m, @@ -322,6 +333,8 @@ memset (void *m, return m; } +#endif /* MESOSPHERE_LIBC_MEMSET_GENERIC */ + /* FUNCTION <>---compare two memory areas @@ -359,6 +372,8 @@ QUICKREF /* Threshhold for punting to the byte copier. */ #define TOO_SMALL(LEN) ((LEN) < LBLOCKSIZE) +#if MESOSPHERE_LIBC_MEMCMP_GENERIC + int __attribute__((weak)) memcmp (const void *m1, @@ -421,6 +436,8 @@ memcmp (const void *m1, #endif /* not PREFER_SIZE_OVER_SPEED */ } +#endif /* MESOSPHERE_LIBC_MEMCMP_GENERIC */ + /* FUNCTION <>---counted copy string @@ -475,6 +492,8 @@ QUICKREF #undef TOO_SMALL #define TOO_SMALL(LEN) ((LEN) < sizeof (long)) +#if MESOSPHERE_LIBC_STRNCMP_GENERIC + char * strncpy (char *__restrict dst0, const char *__restrict src0, @@ -534,6 +553,8 @@ strncpy (char *__restrict dst0, #endif /* not PREFER_SIZE_OVER_SPEED */ } +#endif /* MESOSPHERE_LIBC_STRNCPY_GENERIC */ + /* FUNCTION <>---character string compare @@ -581,6 +602,8 @@ QUICKREF #error long int is not a 32bit or 64bit byte #endif +#if MESOSPHERE_LIBC_STRNCMP_GENERIC + int strncmp (const char *s1, const char *s2, @@ -643,6 +666,8 @@ strncmp (const char *s1, #endif /* not PREFER_SIZE_OVER_SPEED */ } +#endif /* MESOSPHERE_LIBC_STRNCMP_GENERIC */ + #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/mesosphere/kernel_ldr/source/arch/arm64/start.s b/mesosphere/kernel_ldr/source/arch/arm64/start.s index 3a27f40c3..29efffa5c 100644 --- a/mesosphere/kernel_ldr/source/arch/arm64/start.s +++ b/mesosphere/kernel_ldr/source/arch/arm64/start.s @@ -18,6 +18,10 @@ #define cpuactlr_el1 s3_1_c15_c2_0 #define cpuectlr_el1 s3_1_c15_c2_1 +#define LOAD_IMMEDIATE_32(reg, val) \ + mov reg, #(((val) >> 0x00) & 0xFFFF); \ + movk reg, #(((val) >> 0x10) & 0xFFFF), lsl#16 + .section .crt0.text.start, "ax", %progbits .global _start _start: @@ -46,12 +50,25 @@ _main: ldr x17, [x17, #0x10] /* stack top */ add sp, x17, x18 - /* Stack is now set up. */ - /* Apply relocations and call init array for KernelLdr. */ + /* Stack is now set up, so save important state. */ sub sp, sp, #0x30 stp x0, x1, [sp, #0x00] stp x2, x30, [sp, #0x10] stp xzr, xzr, [sp, #0x20] + + /* Get the target firmware from exosphere. */ + LOAD_IMMEDIATE_32(w0, 0xC3000004) + mov w1, #65000 + smc #1 + cmp x0, #0 +0: + b.ne 0b + + /* Store the target firmware. */ + adr x0, __metadata_target_firmware + str w1, [x0] + + /* Apply relocations and call init array for KernelLdr. */ adr x0, _start adr x1, __external_references ldr x1, [x1, #0x18] /* .dynamic. */ diff --git a/mesosphere/kernel_ldr/source/kern_init_loader.cpp b/mesosphere/kernel_ldr/source/kern_init_loader.cpp index cb36fab23..cafe8a0b7 100644 --- a/mesosphere/kernel_ldr/source/kern_init_loader.cpp +++ b/mesosphere/kernel_ldr/source/kern_init_loader.cpp @@ -55,7 +55,6 @@ namespace ams::kern::init::loader { } void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout) { - /* TODO: Proper secure monitor call. */ KPhysicalAddress correct_base = KSystemControl::Init::GetKernelPhysicalBaseAddress(base_address); if (correct_base != base_address) { const uintptr_t diff = GetInteger(correct_base) - base_address; @@ -241,9 +240,6 @@ namespace ams::kern::init::loader { RelocateKernelPhysically(base_address, layout); /* Validate kernel layout. */ - /* TODO: constexpr 0x1000 definition somewhere. */ - /* In stratosphere, this is os::MemoryPageSize. */ - /* We don't have ams::os, this may go in hw:: or something. */ const uintptr_t rx_offset = layout->rx_offset; const uintptr_t rx_end_offset = layout->rx_end_offset; const uintptr_t ro_offset = layout->ro_offset; @@ -251,12 +247,12 @@ namespace ams::kern::init::loader { const uintptr_t rw_offset = layout->rw_offset; /* UNUSED: const uintptr_t rw_end_offset = layout->rw_end_offset; */ const uintptr_t bss_end_offset = layout->bss_end_offset; - MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rx_offset, 0x1000)); - MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rx_end_offset, 0x1000)); - MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(ro_offset, 0x1000)); - MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(ro_end_offset, 0x1000)); - MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rw_offset, 0x1000)); - MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(bss_end_offset, 0x1000)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rx_offset, PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rx_end_offset, PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(ro_offset, PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(ro_end_offset, PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(rw_offset, PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(bss_end_offset, PageSize)); const uintptr_t bss_offset = layout->bss_offset; const uintptr_t ini_load_offset = layout->ini_load_offset; const uintptr_t dynamic_offset = layout->dynamic_offset; @@ -317,12 +313,14 @@ namespace ams::kern::init::loader { const Elf::Dyn *kernel_dynamic = reinterpret_cast(GetInteger(virtual_base_address) + dynamic_offset); Elf::ApplyRelocations(GetInteger(virtual_base_address), kernel_dynamic); + /* Call the kernel's init array functions. */ + /* NOTE: The kernel does this after reprotecting .rodata, but we do it before. */ + /* This allows our global constructors to edit .rodata, which is valuable for editing the SVC tables to support older firmwares' ABIs. */ + Elf::CallInitArrayFuncs(GetInteger(virtual_base_address) + init_array_offset, GetInteger(virtual_base_address) + init_array_end_offset); + /* Reprotect .rodata as R-- */ ttbr1_table.Reprotect(virtual_base_address + ro_offset, ro_end_offset - ro_offset, KernelRwDataAttribute, KernelRoDataAttribute); - /* Call the kernel's init array functions. */ - Elf::CallInitArrayFuncs(GetInteger(virtual_base_address) + init_array_offset, GetInteger(virtual_base_address) + init_array_end_offset); - /* Return the difference between the random virtual base and the physical base. */ return GetInteger(virtual_base_address) - base_address; } diff --git a/mesosphere/kernel_ldr/source/libc/kern_libc_config.arch.arm64.h b/mesosphere/kernel_ldr/source/libc/kern_libc_config.arch.arm64.h new file mode 100644 index 000000000..df67cea27 --- /dev/null +++ b/mesosphere/kernel_ldr/source/libc/kern_libc_config.arch.arm64.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +/* Definitions for libc genericity. */ +#define MESOSPHERE_LIBC_MEMCPY_GENERIC 1 +#define MESOSPHERE_LIBC_MEMCMP_GENERIC 1 +#define MESOSPHERE_LIBC_MEMMOVE_GENERIC 1 +#define MESOSPHERE_LIBC_MEMSET_GENERIC 1 +#define MESOSPHERE_LIBC_STRNCPY_GENERIC 1 +#define MESOSPHERE_LIBC_STRNCMP_GENERIC 1 diff --git a/mesosphere/kernel_ldr/source/libc/kern_libc_config.h b/mesosphere/kernel_ldr/source/libc/kern_libc_config.h new file mode 100644 index 000000000..f79fb7501 --- /dev/null +++ b/mesosphere/kernel_ldr/source/libc/kern_libc_config.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include "kern_libc_config.arch.arm64.h" + +#else + + #error "Unknown architecture for libc" + +#endif diff --git a/mesosphere/kernel_ldr/source/libc/kern_libc_generic.c b/mesosphere/kernel_ldr/source/libc/kern_libc_generic.c new file mode 100644 index 000000000..d41caa551 --- /dev/null +++ b/mesosphere/kernel_ldr/source/libc/kern_libc_generic.c @@ -0,0 +1,673 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include "kern_libc_config.h" + +/* Note: copied from newlib */ +#ifdef __cplusplus +extern "C" { +#endif + +/* +FUNCTION + <>---move possibly overlapping memory +INDEX + memmove +SYNOPSIS + #include + void *memmove(void *<[dst]>, const void *<[src]>, size_t <[length]>); +DESCRIPTION + This function moves <[length]> characters from the block of + memory starting at <<*<[src]>>> to the memory starting at + <<*<[dst]>>>. <> reproduces the characters correctly + at <<*<[dst]>>> even if the two areas overlap. +RETURNS + The function returns <[dst]> as passed. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + memmove ansi pure +*/ + +/* Nonzero if either X or Y is not aligned on a "long" boundary. */ +#define UNALIGNED(X, Y) \ + (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1))) + +/* How many bytes are copied each iteration of the 4X unrolled loop. */ +#define BIGBLOCKSIZE (sizeof (long) << 2) + +/* How many bytes are copied each iteration of the word copy loop. */ +#define LITTLEBLOCKSIZE (sizeof (long)) + +/* Threshhold for punting to the byte copier. */ +#undef TOO_SMALL +#define TOO_SMALL(LEN) ((LEN) < BIGBLOCKSIZE) + +#if MESOSPHERE_LIBC_MEMMOVE_GENERIC + +/*SUPPRESS 20*/ +void * +//__inhibit_loop_to_libcall +__attribute__((weak)) +memmove (void *dst_void, + const void *src_void, + size_t length) +{ +#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) + char *dst = dst_void; + const char *src = src_void; + + if (src < dst && dst < src + length) + { + /* Have to copy backwards */ + src += length; + dst += length; + while (length--) + { + *--dst = *--src; + } + } + else + { + while (length--) + { + *dst++ = *src++; + } + } + + return dst_void; +#else + char *dst = dst_void; + const char *src = src_void; + long *aligned_dst; + const long *aligned_src; + + if (src < dst && dst < src + length) + { + /* Destructive overlap...have to copy backwards */ + src += length; + dst += length; + while (length--) + { + *--dst = *--src; + } + } + else + { + /* Use optimizing algorithm for a non-destructive copy to closely + match memcpy. If the size is small or either SRC or DST is unaligned, + then punt into the byte copy loop. This should be rare. */ + if (!TOO_SMALL(length) && !UNALIGNED (src, dst)) + { + aligned_dst = (long*)dst; + aligned_src = (long*)src; + + /* Copy 4X long words at a time if possible. */ + while (length >= BIGBLOCKSIZE) + { + *aligned_dst++ = *aligned_src++; + *aligned_dst++ = *aligned_src++; + *aligned_dst++ = *aligned_src++; + *aligned_dst++ = *aligned_src++; + length -= BIGBLOCKSIZE; + } + + /* Copy one long word at a time if possible. */ + while (length >= LITTLEBLOCKSIZE) + { + *aligned_dst++ = *aligned_src++; + length -= LITTLEBLOCKSIZE; + } + + /* Pick up any residual with a byte copier. */ + dst = (char*)aligned_dst; + src = (char*)aligned_src; + } + + while (length--) + { + *dst++ = *src++; + } + } + + return dst_void; +#endif /* not PREFER_SIZE_OVER_SPEED */ +} + +#endif /* MESOSPHERE_LIBC_MEMMOVE_GENERIC */ + +/* +FUNCTION + <>---copy memory regions +SYNOPSIS + #include + void* memcpy(void *restrict <[out]>, const void *restrict <[in]>, + size_t <[n]>); +DESCRIPTION + This function copies <[n]> bytes from the memory region + pointed to by <[in]> to the memory region pointed to by + <[out]>. + If the regions overlap, the behavior is undefined. +RETURNS + <> returns a pointer to the first byte of the <[out]> + region. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + memcpy ansi pure + */ + +#if MESOSPHERE_LIBC_MEMCPY_GENERIC + +void * +__attribute__((weak)) +memcpy (void * dst0, + const void * __restrict src0, + size_t len0) +{ +#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) + char *dst = (char *) dst0; + char *src = (char *) src0; + + void *save = dst0; + + while (len0--) + { + *dst++ = *src++; + } + + return save; +#else + char *dst = dst0; + const char *src = src0; + long *aligned_dst; + const long *aligned_src; + + /* If the size is small, or either SRC or DST is unaligned, + then punt into the byte copy loop. This should be rare. */ + if (!TOO_SMALL(len0) && !UNALIGNED (src, dst)) + { + aligned_dst = (long*)dst; + aligned_src = (long*)src; + + /* Copy 4X long words at a time if possible. */ + while (len0 >= BIGBLOCKSIZE) + { + *aligned_dst++ = *aligned_src++; + *aligned_dst++ = *aligned_src++; + *aligned_dst++ = *aligned_src++; + *aligned_dst++ = *aligned_src++; + len0 -= BIGBLOCKSIZE; + } + + /* Copy one long word at a time if possible. */ + while (len0 >= LITTLEBLOCKSIZE) + { + *aligned_dst++ = *aligned_src++; + len0 -= LITTLEBLOCKSIZE; + } + + /* Pick up any residual with a byte copier. */ + dst = (char*)aligned_dst; + src = (char*)aligned_src; + } + + while (len0--) + *dst++ = *src++; + + return dst0; +#endif /* not PREFER_SIZE_OVER_SPEED */ +} + +#endif /* MESOSPHERE_LIBC_MEMCPY_GENERIC */ + +/* +FUNCTION + <>---set an area of memory +INDEX + memset +SYNOPSIS + #include + void *memset(void *<[dst]>, int <[c]>, size_t <[length]>); +DESCRIPTION + This function converts the argument <[c]> into an unsigned + char and fills the first <[length]> characters of the array + pointed to by <[dst]> to the value. +RETURNS + <> returns the value of <[dst]>. +PORTABILITY +<> is ANSI C. + <> requires no supporting OS subroutines. +QUICKREF + memset ansi pure +*/ + +#include + +#undef LBLOCKSIZE +#undef UNALIGNED +#undef TOO_SMALL + +#define LBLOCKSIZE (sizeof(long)) +#define UNALIGNED(X) ((long)X & (LBLOCKSIZE - 1)) +#define TOO_SMALL(LEN) ((LEN) < LBLOCKSIZE) + +#if MESOSPHERE_LIBC_MEMSET_GENERIC + +void * +__attribute__((weak)) +memset (void *m, + int c, + size_t n) +{ + char *s = (char *) m; + +#if !defined(PREFER_SIZE_OVER_SPEED) && !defined(__OPTIMIZE_SIZE__) + unsigned int i; + unsigned long buffer; + unsigned long *aligned_addr; + unsigned int d = c & 0xff; /* To avoid sign extension, copy C to an + unsigned variable. */ + + while (UNALIGNED (s)) + { + if (n--) + *s++ = (char) c; + else + return m; + } + + if (!TOO_SMALL (n)) + { + /* If we get this far, we know that n is large and s is word-aligned. */ + aligned_addr = (unsigned long *) s; + + /* Store D into each char sized location in BUFFER so that + we can set large blocks quickly. */ + buffer = (d << 8) | d; + buffer |= (buffer << 16); + for (i = 32; i < LBLOCKSIZE * 8; i <<= 1) + buffer = (buffer << i) | buffer; + + /* Unroll the loop. */ + while (n >= LBLOCKSIZE*4) + { + *aligned_addr++ = buffer; + *aligned_addr++ = buffer; + *aligned_addr++ = buffer; + *aligned_addr++ = buffer; + n -= 4*LBLOCKSIZE; + } + + while (n >= LBLOCKSIZE) + { + *aligned_addr++ = buffer; + n -= LBLOCKSIZE; + } + /* Pick up the remainder with a bytewise loop. */ + s = (char*)aligned_addr; + } + +#endif /* not PREFER_SIZE_OVER_SPEED */ + + while (n--) + *s++ = (char) c; + + return m; +} + +#endif /* MESOSPHERE_LIBC_MEMSET_GENERIC */ + +/* +FUNCTION + <>---compare two memory areas +INDEX + memcmp +SYNOPSIS + #include + int memcmp(const void *<[s1]>, const void *<[s2]>, size_t <[n]>); +DESCRIPTION + This function compares not more than <[n]> characters of the + object pointed to by <[s1]> with the object pointed to by <[s2]>. +RETURNS + The function returns an integer greater than, equal to or + less than zero according to whether the object pointed to by + <[s1]> is greater than, equal to or less than the object + pointed to by <[s2]>. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + memcmp ansi pure +*/ + +#undef LBLOCKSIZE +#undef UNALIGNED +#undef TOO_SMALL + +/* Nonzero if either X or Y is not aligned on a "long" boundary. */ +#define UNALIGNED(X, Y) \ + (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1))) + +/* How many bytes are copied each iteration of the word copy loop. */ +#define LBLOCKSIZE (sizeof (long)) + +/* Threshhold for punting to the byte copier. */ +#define TOO_SMALL(LEN) ((LEN) < LBLOCKSIZE) + +#if MESOSPHERE_LIBC_MEMCMP_GENERIC + +int +__attribute__((weak)) +memcmp (const void *m1, + const void *m2, + size_t n) +{ +#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) + unsigned char *s1 = (unsigned char *) m1; + unsigned char *s2 = (unsigned char *) m2; + + while (n--) + { + if (*s1 != *s2) + { + return *s1 - *s2; + } + s1++; + s2++; + } + return 0; +#else + unsigned char *s1 = (unsigned char *) m1; + unsigned char *s2 = (unsigned char *) m2; + unsigned long *a1; + unsigned long *a2; + + /* If the size is too small, or either pointer is unaligned, + then we punt to the byte compare loop. Hopefully this will + not turn up in inner loops. */ + if (!TOO_SMALL(n) && !UNALIGNED(s1,s2)) + { + /* Otherwise, load and compare the blocks of memory one + word at a time. */ + a1 = (unsigned long*) s1; + a2 = (unsigned long*) s2; + while (n >= LBLOCKSIZE) + { + if (*a1 != *a2) + break; + a1++; + a2++; + n -= LBLOCKSIZE; + } + + /* check m mod LBLOCKSIZE remaining characters */ + + s1 = (unsigned char*)a1; + s2 = (unsigned char*)a2; + } + + while (n--) + { + if (*s1 != *s2) + return *s1 - *s2; + s1++; + s2++; + } + + return 0; +#endif /* not PREFER_SIZE_OVER_SPEED */ +} + +#endif /* MESOSPHERE_LIBC_MEMCMP_GENERIC */ + +/* +FUNCTION + <>---counted copy string +INDEX + strncpy +SYNOPSIS + #include + char *strncpy(char *restrict <[dst]>, const char *restrict <[src]>, + size_t <[length]>); +DESCRIPTION + <> copies not more than <[length]> characters from the + the string pointed to by <[src]> (including the terminating + null character) to the array pointed to by <[dst]>. If the + string pointed to by <[src]> is shorter than <[length]> + characters, null characters are appended to the destination + array until a total of <[length]> characters have been + written. +RETURNS + This function returns the initial value of <[dst]>. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + strncpy ansi pure +*/ + +#include +#include + +/*SUPPRESS 560*/ +/*SUPPRESS 530*/ + +/* Nonzero if either X or Y is not aligned on a "long" boundary. */ +#define UNALIGNED(X, Y) \ + (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1))) + +#if LONG_MAX == 2147483647L +#define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080) +#else +#if LONG_MAX == 9223372036854775807L +/* Nonzero if X (a long int) contains a NULL byte. */ +#define DETECTNULL(X) (((X) - 0x0101010101010101) & ~(X) & 0x8080808080808080) +#else +#error long int is not a 32bit or 64bit type. +#endif +#endif + +#ifndef DETECTNULL +#error long int is not a 32bit or 64bit byte +#endif + +#undef TOO_SMALL +#define TOO_SMALL(LEN) ((LEN) < sizeof (long)) + +#if MESOSPHERE_LIBC_STRNCMP_GENERIC + +char * +strncpy (char *__restrict dst0, + const char *__restrict src0, + size_t count) +{ +#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) + char *dscan; + const char *sscan; + + dscan = dst0; + sscan = src0; + while (count > 0) + { + --count; + if ((*dscan++ = *sscan++) == '\0') + break; + } + while (count-- > 0) + *dscan++ = '\0'; + + return dst0; +#else + char *dst = dst0; + const char *src = src0; + long *aligned_dst; + const long *aligned_src; + + /* If SRC and DEST is aligned and count large enough, then copy words. */ + if (!UNALIGNED (src, dst) && !TOO_SMALL (count)) + { + aligned_dst = (long*)dst; + aligned_src = (long*)src; + + /* SRC and DEST are both "long int" aligned, try to do "long int" + sized copies. */ + while (count >= sizeof (long int) && !DETECTNULL(*aligned_src)) + { + count -= sizeof (long int); + *aligned_dst++ = *aligned_src++; + } + + dst = (char*)aligned_dst; + src = (char*)aligned_src; + } + + while (count > 0) + { + --count; + if ((*dst++ = *src++) == '\0') + break; + } + + while (count-- > 0) + *dst++ = '\0'; + + return dst0; +#endif /* not PREFER_SIZE_OVER_SPEED */ +} + +#endif /* MESOSPHERE_LIBC_STRNCPY_GENERIC */ + +/* +FUNCTION + <>---character string compare + +INDEX + strncmp +SYNOPSIS + #include + int strncmp(const char *<[a]>, const char * <[b]>, size_t <[length]>); +DESCRIPTION + <> compares up to <[length]> characters + from the string at <[a]> to the string at <[b]>. +RETURNS + If <<*<[a]>>> sorts lexicographically after <<*<[b]>>>, + <> returns a number greater than zero. If the two + strings are equivalent, <> returns zero. If <<*<[a]>>> + sorts lexicographically before <<*<[b]>>>, <> returns a + number less than zero. +PORTABILITY +<> is ANSI C. +<> requires no supporting OS subroutines. +QUICKREF + strncmp ansi pure +*/ + +#include +#include + +/* Nonzero if either X or Y is not aligned on a "long" boundary. */ +#define UNALIGNED(X, Y) \ + (((long)X & (sizeof (long) - 1)) | ((long)Y & (sizeof (long) - 1))) + +/* DETECTNULL returns nonzero if (long)X contains a NULL byte. */ +#if LONG_MAX == 2147483647L +#define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080) +#else +#if LONG_MAX == 9223372036854775807L +#define DETECTNULL(X) (((X) - 0x0101010101010101) & ~(X) & 0x8080808080808080) +#else +#error long int is not a 32bit or 64bit type. +#endif +#endif + +#ifndef DETECTNULL +#error long int is not a 32bit or 64bit byte +#endif + +#if MESOSPHERE_LIBC_STRNCMP_GENERIC + +int +strncmp (const char *s1, + const char *s2, + size_t n) +{ +#if defined(PREFER_SIZE_OVER_SPEED) || defined(__OPTIMIZE_SIZE__) + if (n == 0) + return 0; + + while (n-- != 0 && *s1 == *s2) + { + if (n == 0 || *s1 == '\0') + break; + s1++; + s2++; + } + + return (*(unsigned char *) s1) - (*(unsigned char *) s2); +#else + unsigned long *a1; + unsigned long *a2; + + if (n == 0) + return 0; + + /* If s1 or s2 are unaligned, then compare bytes. */ + if (!UNALIGNED (s1, s2)) + { + /* If s1 and s2 are word-aligned, compare them a word at a time. */ + a1 = (unsigned long*)s1; + a2 = (unsigned long*)s2; + while (n >= sizeof (long) && *a1 == *a2) + { + n -= sizeof (long); + + /* If we've run out of bytes or hit a null, return zero + since we already know *a1 == *a2. */ + if (n == 0 || DETECTNULL (*a1)) + return 0; + + a1++; + a2++; + } + + /* A difference was detected in last few bytes of s1, so search bytewise */ + s1 = (char*)a1; + s2 = (char*)a2; + } + + while (n-- > 0 && *s1 == *s2) + { + /* If we've run out of bytes or hit a null, return zero + since we already know *s1 == *s2. */ + if (n == 0 || *s1 == '\0') + return 0; + s1++; + s2++; + } + return (*(unsigned char *) s1) - (*(unsigned char *) s2); +#endif /* not PREFER_SIZE_OVER_SPEED */ +} + +#endif /* MESOSPHERE_LIBC_STRNCMP_GENERIC */ + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/sept/sept-secondary/src/exception_handlers.c b/sept/sept-secondary/src/exception_handlers.c index 821f8cb2d..128425891 100644 --- a/sept/sept-secondary/src/exception_handlers.c +++ b/sept/sept-secondary/src/exception_handlers.c @@ -19,9 +19,10 @@ #include "exception_handlers.h" #include "utils.h" #include "lib/log.h" +#include "lib/vsprintf.h" #define CODE_DUMP_SIZE 0x30 -#define STACK_DUMP_SIZE 0x60 +#define STACK_DUMP_SIZE 0x30 extern const uint32_t exception_handler_table[]; @@ -34,6 +35,40 @@ static const char *register_names[] = { "SP", "LR", "PC", "CPSR", }; +/* Adapted from https://gist.github.com/ccbrown/9722406 */ +static void hexdump(const void* data, size_t size, uintptr_t addrbase, char* strbuf) { + const uint8_t *d = (const uint8_t *)data; + char ascii[17] = {0}; + ascii[16] = '\0'; + + for (size_t i = 0; i < size; i++) { + if (i % 16 == 0) { + strbuf += sprintf(strbuf, "%0*" PRIXPTR ": | ", 2 * sizeof(addrbase), addrbase + i); + } + strbuf += sprintf(strbuf, "%02X ", d[i]); + if (d[i] >= ' ' && d[i] <= '~') { + ascii[i % 16] = d[i]; + } else { + ascii[i % 16] = '.'; + } + if ((i+1) % 8 == 0 || i+1 == size) { + strbuf += sprintf(strbuf, " "); + if ((i+1) % 16 == 0) { + strbuf += sprintf(strbuf, "| %s \n", ascii); + } else if (i+1 == size) { + ascii[(i+1) % 16] = '\0'; + if ((i+1) % 16 <= 8) { + strbuf += sprintf(strbuf, " "); + } + for (size_t j = (i+1) % 16; j < 16; j++) { + strbuf += sprintf(strbuf, " "); + } + strbuf += sprintf(strbuf, "| %s \n", ascii); + } + } + } +} + void setup_exception_handlers(void) { volatile uint32_t *bpmp_exception_handler_table = (volatile uint32_t *)0x6000F200; for (int i = 0; i < 8; i++) { @@ -44,38 +79,40 @@ void setup_exception_handlers(void) { } void exception_handler_main(uint32_t *registers, unsigned int exception_type) { - uint8_t code_dump[CODE_DUMP_SIZE]; - uint8_t stack_dump[STACK_DUMP_SIZE]; - size_t code_dump_size; - size_t stack_dump_size; + char exception_log[0x400] = {0}; + uint8_t code_dump[CODE_DUMP_SIZE] = {0}; + uint8_t stack_dump[STACK_DUMP_SIZE] = {0}; + size_t code_dump_size = 0; + size_t stack_dump_size = 0; uint32_t pc = registers[15]; uint32_t cpsr = registers[16]; - uint32_t instr_addr = pc + ((cpsr & 0x20) ? 2 : 4) - CODE_DUMP_SIZE; - print(SCREEN_LOG_LEVEL_ERROR, "\nSomething went wrong...\n"); - + sprintf(exception_log, "An exception occured!\n"); + code_dump_size = safecpy(code_dump, (const void *)instr_addr, CODE_DUMP_SIZE); stack_dump_size = safecpy(stack_dump, (const void *)registers[13], STACK_DUMP_SIZE); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nException type: %s\n", - exception_names[exception_type]); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nRegisters:\n\n"); + sprintf(exception_log + strlen(exception_log), "\nException type: %s\n", exception_names[exception_type]); + sprintf(exception_log + strlen(exception_log), "\nRegisters:\n"); /* Print r0 to pc. */ for (int i = 0; i < 16; i += 2) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "%-7s%08"PRIX32" %-7s%08"PRIX32"\n", + sprintf(exception_log + strlen(exception_log), "%-7s%08"PRIX32" %-7s%08"PRIX32"\n", register_names[i], registers[i], register_names[i+1], registers[i+1]); } /* Print cpsr. */ - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "%-7s%08"PRIX32"\n", register_names[16], registers[16]); - - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nCode dump:\n"); - hexdump(code_dump, code_dump_size, instr_addr); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\nStack dump:\n"); - hexdump(stack_dump, stack_dump_size, registers[13]); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\n"); - fatal_error("An exception occured!\n"); + sprintf(exception_log + strlen(exception_log), "%-7s%08"PRIX32"\n", register_names[16], registers[16]); + + /* Print code and stack regions. */ + sprintf(exception_log + strlen(exception_log), "\nCode dump:\n"); + hexdump(code_dump, code_dump_size, instr_addr, exception_log + strlen(exception_log)); + sprintf(exception_log + strlen(exception_log), "\nStack dump:\n"); + hexdump(stack_dump, stack_dump_size, registers[13], exception_log + strlen(exception_log)); + sprintf(exception_log + strlen(exception_log), "\n"); + + /* Throw fatal error with the full exception log. */ + fatal_error(exception_log); } diff --git a/sept/sept-secondary/src/lib/log.h b/sept/sept-secondary/src/lib/log.h index 8a28b0835..d1c5777f3 100644 --- a/sept/sept-secondary/src/lib/log.h +++ b/sept/sept-secondary/src/lib/log.h @@ -17,7 +17,7 @@ #ifndef FUSEE_LOG_H #define FUSEE_LOG_H -#define PRINT_MESSAGE_MAX_LENGTH 512 +#define PRINT_MESSAGE_MAX_LENGTH 1024 #include diff --git a/sept/sept-secondary/src/utils.c b/sept/sept-secondary/src/utils.c index 6ccf4cdd9..3a180c247 100644 --- a/sept/sept-secondary/src/utils.c +++ b/sept/sept-secondary/src/utils.c @@ -105,12 +105,18 @@ __attribute__ ((noreturn)) void generic_panic(void) { } __attribute__((noreturn)) void fatal_error(const char *fmt, ...) { + /* Override the global logging level. */ + log_set_log_level(SCREEN_LOG_LEVEL_ERROR); + + /* Display fatal error. */ va_list args; print(SCREEN_LOG_LEVEL_ERROR, "Fatal error: "); va_start(args, fmt); vprint(SCREEN_LOG_LEVEL_ERROR, fmt, args); va_end(args); - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX,"\nPress POWER to reboot\n"); + print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "\n Press POWER to reboot.\n"); + + /* Wait for button and reboot. */ wait_for_button_and_reboot(); } @@ -122,37 +128,3 @@ __attribute__((noinline)) bool overlaps(uint64_t as, uint64_t ae, uint64_t bs, u return true; return false; } - -/* Adapted from https://gist.github.com/ccbrown/9722406 */ -void hexdump(const void* data, size_t size, uintptr_t addrbase) { - const uint8_t *d = (const uint8_t *)data; - char ascii[17]; - ascii[16] = '\0'; - - for (size_t i = 0; i < size; i++) { - if (i % 16 == 0) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "%0*" PRIXPTR ": | ", 2 * sizeof(addrbase), addrbase + i); - } - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "%02X ", d[i]); - if (d[i] >= ' ' && d[i] <= '~') { - ascii[i % 16] = d[i]; - } else { - ascii[i % 16] = '.'; - } - if ((i+1) % 8 == 0 || i+1 == size) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, " "); - if ((i+1) % 16 == 0) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "| %s \n", ascii); - } else if (i+1 == size) { - ascii[(i+1) % 16] = '\0'; - if ((i+1) % 16 <= 8) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, " "); - } - for (size_t j = (i+1) % 16; j < 16; j++) { - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, " "); - } - print(SCREEN_LOG_LEVEL_ERROR | SCREEN_LOG_LEVEL_NO_PREFIX, "| %s \n", ascii); - } - } - } -} diff --git a/sept/sept-secondary/src/utils.h b/sept/sept-secondary/src/utils.h index d4f0d8e6b..457df7602 100644 --- a/sept/sept-secondary/src/utils.h +++ b/sept/sept-secondary/src/utils.h @@ -117,8 +117,6 @@ static inline bool check_32bit_address_range_in_program(uintptr_t addr, size_t s overlaps_a(start, end, __start__, __end__); } -void hexdump(const void* data, size_t size, uintptr_t addrbase); - __attribute__((noreturn)) void watchdog_reboot(void); __attribute__((noreturn)) void pmc_reboot(uint32_t scratch0); void prepare_for_reboot_to_self(void); @@ -126,7 +124,6 @@ __attribute__((noreturn)) void reboot_to_self(void); __attribute__((noreturn)) void wait_for_button_and_reboot(void); __attribute__((noreturn)) void generic_panic(void); - __attribute__((noreturn)) void fatal_error(const char *fmt, ...); #endif diff --git a/stratosphere/ams_mitm/ams_mitm.json b/stratosphere/ams_mitm/ams_mitm.json index 431a1f5f0..c78327d05 100644 --- a/stratosphere/ams_mitm/ams_mitm.json +++ b/stratosphere/ams_mitm/ams_mitm.json @@ -62,6 +62,8 @@ "svcReplyAndReceive": "0x43", "svcReplyAndReceiveWithUserBuffer": "0x44", "svcCreateEvent": "0x45", + "svcMapTransferMemory": "0x51", + "svcUnmapTransferMemory": "0x52", "svcCreateInterruptEvent": "0x53", "svcReadWriteRegister": "0x4E", "svcQueryIoMapping": "0x55", diff --git a/stratosphere/ams_mitm/source/amsmitm_initialization.cpp b/stratosphere/ams_mitm/source/amsmitm_initialization.cpp index 96f89c68a..405a43cc1 100644 --- a/stratosphere/ams_mitm/source/amsmitm_initialization.cpp +++ b/stratosphere/ams_mitm/source/amsmitm_initialization.cpp @@ -139,6 +139,9 @@ namespace ams::mitm { /* Open global SD card file system, so that other threads can begin using the SD. */ mitm::fs::OpenGlobalSdCardFileSystem(); + /* Mount the sd card at a convenient mountpoint. */ + ams::fs::MountSdCard(ams::fs::impl::SdCardFileSystemMountName); + /* Initialize the reboot manager (load a payload off the SD). */ /* Discard result, since it doesn't need to succeed. */ mitm::bpc::LoadRebootPayload(); @@ -157,6 +160,7 @@ namespace ams::mitm { /* Connect to set:sys. */ sm::DoWithSession([]() { + R_ABORT_UNLESS(setInitialize()); R_ABORT_UNLESS(setsysInitialize()); }); diff --git a/stratosphere/ams_mitm/source/amsmitm_main.cpp b/stratosphere/ams_mitm/source/amsmitm_main.cpp index 88faca823..7e9b57bc8 100644 --- a/stratosphere/ams_mitm/source/amsmitm_main.cpp +++ b/stratosphere/ams_mitm/source/amsmitm_main.cpp @@ -17,6 +17,7 @@ #include "amsmitm_initialization.hpp" #include "amsmitm_module_management.hpp" #include "bpc_mitm/bpc_ams_power_utils.hpp" +#include "sysupdater/sysupdater_fs_utils.hpp" extern "C" { extern u32 __start__; @@ -81,15 +82,26 @@ void __appInit(void) { R_ABORT_UNLESS(fsInitialize()); R_ABORT_UNLESS(pmdmntInitialize()); R_ABORT_UNLESS(pminfoInitialize()); + ncm::Initialize(); spl::InitializeForFs(); }); + /* Disable auto-abort in fs operations. */ + fs::SetEnabledAutoAbort(false); + + /* Initialize fssystem library. */ + fssystem::InitializeForFileSystemProxy(); + + /* Configure ncm to use fssystem library to mount content from the sd card. */ + ncm::SetMountContentMetaFunction(mitm::sysupdater::MountSdCardContentMeta); + ams::CheckApiVersion(); } void __appExit(void) { /* Cleanup services. */ spl::Finalize(); + ncm::Finalize(); pminfoExit(); pmdmntExit(); fsExit(); diff --git a/stratosphere/ams_mitm/source/amsmitm_module.hpp b/stratosphere/ams_mitm/source/amsmitm_module.hpp index 1c68f3200..1c5796f37 100644 --- a/stratosphere/ams_mitm/source/amsmitm_module.hpp +++ b/stratosphere/ams_mitm/source/amsmitm_module.hpp @@ -18,7 +18,6 @@ namespace ams::mitm { - /* TODO: C++20 Concepts will make this a lot less stupid. */ template concept IsModule = requires(T, void *arg) { { T::ThreadPriority } -> std::convertible_to; diff --git a/stratosphere/ams_mitm/source/amsmitm_module_management.cpp b/stratosphere/ams_mitm/source/amsmitm_module_management.cpp index ffa792fb9..03a7d779d 100644 --- a/stratosphere/ams_mitm/source/amsmitm_module_management.cpp +++ b/stratosphere/ams_mitm/source/amsmitm_module_management.cpp @@ -23,6 +23,7 @@ #include "bpc_mitm/bpc_ams_module.hpp" #include "ns_mitm/nsmitm_module.hpp" #include "hid_mitm/hidmitm_module.hpp" +#include "sysupdater/sysupdater_module.hpp" namespace ams::mitm { @@ -35,6 +36,7 @@ namespace ams::mitm { ModuleId_BpcAms, ModuleId_NsMitm, ModuleId_HidMitm, + ModuleId_Sysupdater, ModuleId_Count, }; @@ -67,6 +69,7 @@ namespace ams::mitm { GetModuleDefinition(), GetModuleDefinition(), GetModuleDefinition(), + GetModuleDefinition(), }; } diff --git a/stratosphere/ams_mitm/source/bpc_mitm/bpc_ams_module.cpp b/stratosphere/ams_mitm/source/bpc_mitm/bpc_ams_module.cpp index e2733d18f..06a45b876 100644 --- a/stratosphere/ams_mitm/source/bpc_mitm/bpc_ams_module.cpp +++ b/stratosphere/ams_mitm/source/bpc_mitm/bpc_ams_module.cpp @@ -37,7 +37,7 @@ namespace ams::mitm::bpc_ams { { Handle bpcams_h; R_ABORT_UNLESS(svcManageNamedPort(&bpcams_h, AtmosphereServiceName.name, AtmosphereMaxSessions)); - g_server_manager.RegisterServer(bpcams_h); + g_server_manager.RegisterServer(bpcams_h); } /* Loop forever, servicing our services. */ diff --git a/stratosphere/ams_mitm/source/bpc_mitm/bpc_ams_service.hpp b/stratosphere/ams_mitm/source/bpc_mitm/bpc_ams_service.hpp index fb15dc540..234b92c91 100644 --- a/stratosphere/ams_mitm/source/bpc_mitm/bpc_ams_service.hpp +++ b/stratosphere/ams_mitm/source/bpc_mitm/bpc_ams_service.hpp @@ -13,26 +13,26 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ - #pragma once #include namespace ams::mitm::bpc { - class AtmosphereService final : public sf::IServiceObject { - private: - enum class CommandId { - RebootToFatalError = 65000, - SetRebootPayload = 65001, - }; - private: + namespace impl { + + #define AMS_BPC_MITM_ATMOSPHERE_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 65000, void, RebootToFatalError, (const ams::FatalErrorContext &ctx)) \ + AMS_SF_METHOD_INFO(C, H, 65001, void, SetRebootPayload, (const ams::sf::InBuffer &payload)) + + AMS_SF_DEFINE_INTERFACE(IAtmosphereInterface, AMS_BPC_MITM_ATMOSPHERE_INTERFACE_INTERFACE_INFO) + + } + + class AtmosphereService final { + public: void RebootToFatalError(const ams::FatalErrorContext &ctx); void SetRebootPayload(const ams::sf::InBuffer &payload); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(RebootToFatalError), - MAKE_SERVICE_COMMAND_META(SetRebootPayload), - }; }; + static_assert(impl::IsIAtmosphereInterface); } diff --git a/stratosphere/ams_mitm/source/bpc_mitm/bpc_mitm_service.hpp b/stratosphere/ams_mitm/source/bpc_mitm/bpc_mitm_service.hpp index b4bd1d4bd..de88296f5 100644 --- a/stratosphere/ams_mitm/source/bpc_mitm/bpc_mitm_service.hpp +++ b/stratosphere/ams_mitm/source/bpc_mitm/bpc_mitm_service.hpp @@ -18,12 +18,19 @@ namespace ams::mitm::bpc { - class BpcMitmService : public sf::IMitmServiceObject { - private: - enum class CommandId { - ShutdownSystem = 0, - RebootSystem = 1, - }; + namespace impl { + + #define AMS_BPC_MITM_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, ShutdownSystem, ()) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, RebootSystem, ()) + + AMS_SF_DEFINE_MITM_INTERFACE(IBpcMitmInterface, AMS_BPC_MITM_INTERFACE_INFO) + + } + + class BpcMitmService : public sf::MitmServiceImplBase { + public: + using MitmServiceImplBase::MitmServiceImplBase; public: static bool ShouldMitm(const sm::MitmProcessInfo &client_info) { /* We will mitm: @@ -36,16 +43,10 @@ namespace ams::mitm::bpc { client_info.override_status.IsHbl(); } public: - SF_MITM_SERVICE_OBJECT_CTOR(BpcMitmService) { /* ... */ } - protected: /* Overridden commands. */ Result ShutdownSystem(); Result RebootSystem(); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(ShutdownSystem), - MAKE_SERVICE_COMMAND_META(RebootSystem), - }; }; + static_assert(impl::IsIBpcMitmInterface); } diff --git a/stratosphere/ams_mitm/source/bpc_mitm/bpcmitm_module.cpp b/stratosphere/ams_mitm/source/bpc_mitm/bpcmitm_module.cpp index 36f3934a0..1ea2b7eb9 100644 --- a/stratosphere/ams_mitm/source/bpc_mitm/bpcmitm_module.cpp +++ b/stratosphere/ams_mitm/source/bpc_mitm/bpcmitm_module.cpp @@ -39,7 +39,7 @@ namespace ams::mitm::bpc { /* Create bpc mitm. */ const sm::ServiceName service_name = (hos::GetVersion() >= hos::Version_2_0_0) ? MitmServiceName : DeprecatedMitmServiceName; - R_ABORT_UNLESS(g_server_manager.RegisterMitmServer(service_name)); + R_ABORT_UNLESS((g_server_manager.RegisterMitmServer(service_name))); /* Loop forever, servicing our services. */ g_server_manager.LoopProcess(); diff --git a/stratosphere/ams_mitm/source/fs_mitm/fs_mitm_service.cpp b/stratosphere/ams_mitm/source/fs_mitm/fs_mitm_service.cpp index 60aec454e..5a3525fcc 100644 --- a/stratosphere/ams_mitm/source/fs_mitm/fs_mitm_service.cpp +++ b/stratosphere/ams_mitm/source/fs_mitm/fs_mitm_service.cpp @@ -35,9 +35,9 @@ namespace ams::mitm::fs { os::Mutex g_data_storage_lock(false); os::Mutex g_storage_cache_lock(false); - std::unordered_map> g_storage_cache; + std::unordered_map> g_storage_cache; - std::shared_ptr GetStorageCacheEntry(ncm::ProgramId program_id) { + std::shared_ptr GetStorageCacheEntry(ncm::ProgramId program_id) { std::scoped_lock lk(g_storage_cache_lock); auto it = g_storage_cache.find(static_cast(program_id)); @@ -48,7 +48,7 @@ namespace ams::mitm::fs { return it->second.lock(); } - void SetStorageCacheEntry(ncm::ProgramId program_id, std::shared_ptr *new_intf) { + void SetStorageCacheEntry(ncm::ProgramId program_id, std::shared_ptr *new_intf) { std::scoped_lock lk(g_storage_cache_lock); auto it = g_storage_cache.find(static_cast(program_id)); @@ -69,7 +69,17 @@ namespace ams::mitm::fs { return (tmp != 0); } - Result OpenHblWebContentFileSystem(sf::Out> &out, ncm::ProgramId client_program_id, ncm::ProgramId program_id, FsFileSystemType filesystem_type) { + template + constexpr ALWAYS_INLINE auto MakeSharedFileSystem(Arguments &&... args) { + return sf::MakeShared(std::forward(args)...); + } + + template + constexpr ALWAYS_INLINE auto MakeSharedStorage(Arguments &&... args) { + return sf::MakeShared(std::forward(args)...); + } + + Result OpenHblWebContentFileSystem(sf::Out> &out, ncm::ProgramId client_program_id, ncm::ProgramId program_id, FsFileSystemType filesystem_type) { /* Verify eligibility. */ bool is_hbl; R_UNLESS(R_SUCCEEDED(pm::info::IsHblProgramId(&is_hbl, program_id)), sm::mitm::ResultShouldForwardToSession()); @@ -88,11 +98,11 @@ namespace ams::mitm::fs { const sf::cmif::DomainObjectId target_object_id{serviceGetObjectId(&sd_fs.s)}; std::unique_ptr sd_ifs = std::make_unique(sd_fs); - out.SetValue(std::make_shared(std::make_shared(std::make_unique(std::move(sd_ifs), AtmosphereHblWebContentDir)), false), target_object_id); + out.SetValue(MakeSharedFileSystem(std::make_shared(std::make_unique(std::move(sd_ifs), AtmosphereHblWebContentDir)), false), target_object_id); return ResultSuccess(); } - Result OpenProgramSpecificWebContentFileSystem(sf::Out> &out, ncm::ProgramId client_program_id, ncm::ProgramId program_id, FsFileSystemType filesystem_type, Service *fwd, const fssrv::sf::Path *path, bool with_id) { + Result OpenProgramSpecificWebContentFileSystem(sf::Out> &out, ncm::ProgramId client_program_id, ncm::ProgramId program_id, FsFileSystemType filesystem_type, Service *fwd, const fssrv::sf::Path *path, bool with_id) { /* Directory must exist. */ { FsDir d; @@ -132,13 +142,13 @@ namespace ams::mitm::fs { new_fs = std::make_shared(std::move(subdir_fs)); } - out.SetValue(std::make_shared(std::move(new_fs), false), target_object_id); + out.SetValue(MakeSharedFileSystem(std::move(new_fs), false), target_object_id); } return ResultSuccess(); } - Result OpenWebContentFileSystem(sf::Out> &out, ncm::ProgramId client_program_id, ncm::ProgramId program_id, FsFileSystemType filesystem_type, Service *fwd, const fssrv::sf::Path *path, bool with_id, bool try_program_specific) { + Result OpenWebContentFileSystem(sf::Out> &out, ncm::ProgramId client_program_id, ncm::ProgramId program_id, FsFileSystemType filesystem_type, Service *fwd, const fssrv::sf::Path *path, bool with_id, bool try_program_specific) { /* Check first that we're a web applet opening web content. */ R_UNLESS(ncm::IsWebAppletId(client_program_id), sm::mitm::ResultShouldForwardToSession()); R_UNLESS(filesystem_type == FsFileSystemType_ContentManual, sm::mitm::ResultShouldForwardToSession()); @@ -155,15 +165,15 @@ namespace ams::mitm::fs { } - Result FsMitmService::OpenFileSystemWithPatch(sf::Out> out, ncm::ProgramId program_id, u32 _filesystem_type) { + Result FsMitmService::OpenFileSystemWithPatch(sf::Out> out, ncm::ProgramId program_id, u32 _filesystem_type) { return OpenWebContentFileSystem(out, this->client_info.program_id, program_id, static_cast(_filesystem_type), this->forward_service.get(), nullptr, false, this->client_info.override_status.IsProgramSpecific()); } - Result FsMitmService::OpenFileSystemWithId(sf::Out> out, const fssrv::sf::Path &path, ncm::ProgramId program_id, u32 _filesystem_type) { + Result FsMitmService::OpenFileSystemWithId(sf::Out> out, const fssrv::sf::Path &path, ncm::ProgramId program_id, u32 _filesystem_type) { return OpenWebContentFileSystem(out, this->client_info.program_id, program_id, static_cast(_filesystem_type), this->forward_service.get(), std::addressof(path), true, this->client_info.override_status.IsProgramSpecific()); } - Result FsMitmService::OpenSdCardFileSystem(sf::Out> out) { + Result FsMitmService::OpenSdCardFileSystem(sf::Out> out) { /* We only care about redirecting this for NS/emummc. */ R_UNLESS(this->client_info.program_id == ncm::SystemProgramId::Ns, sm::mitm::ResultShouldForwardToSession()); R_UNLESS(emummc::IsActive(), sm::mitm::ResultShouldForwardToSession()); @@ -175,11 +185,11 @@ namespace ams::mitm::fs { /* Return output filesystem. */ std::shared_ptr redir_fs = std::make_shared(std::make_shared(sd_fs), "/Nintendo", emummc::GetNintendoDirPath()); - out.SetValue(std::make_shared(std::move(redir_fs), false), target_object_id); + out.SetValue(MakeSharedFileSystem(std::move(redir_fs), false), target_object_id); return ResultSuccess(); } - Result FsMitmService::OpenSaveDataFileSystem(sf::Out> out, u8 _space_id, const fs::SaveDataAttribute &attribute) { + Result FsMitmService::OpenSaveDataFileSystem(sf::Out> out, u8 _space_id, const fs::SaveDataAttribute &attribute) { /* We only want to intercept saves for games, right now. */ const bool is_game_or_hbl = this->client_info.override_status.IsHbl() || ncm::IsApplicationId(this->client_info.program_id); R_UNLESS(is_game_or_hbl, sm::mitm::ResultShouldForwardToSession()); @@ -240,11 +250,11 @@ namespace ams::mitm::fs { } /* Set output. */ - out.SetValue(std::make_shared(std::move(dirsave_ifs), false), target_object_id); + out.SetValue(MakeSharedFileSystem(std::move(dirsave_ifs), false), target_object_id); return ResultSuccess(); } - Result FsMitmService::OpenBisStorage(sf::Out> out, u32 _bis_partition_id) { + Result FsMitmService::OpenBisStorage(sf::Out> out, u32 _bis_partition_id) { const ::FsBisPartitionId bis_partition_id = static_cast<::FsBisPartitionId>(_bis_partition_id); /* Try to open a storage for the partition. */ @@ -265,23 +275,23 @@ namespace ams::mitm::fs { /* Set output storage. */ if (bis_partition_id == FsBisPartitionId_BootPartition1Root) { - out.SetValue(std::make_shared(new Boot0Storage(bis_storage, this->client_info)), target_object_id); + out.SetValue(MakeSharedStorage(new Boot0Storage(bis_storage, this->client_info)), target_object_id); } else if (bis_partition_id == FsBisPartitionId_CalibrationBinary) { - out.SetValue(std::make_shared(new CalibrationBinaryStorage(bis_storage, this->client_info)), target_object_id); + out.SetValue(MakeSharedStorage(new CalibrationBinaryStorage(bis_storage, this->client_info)), target_object_id); } else { if (can_write_bis || can_write_bis_for_choi_support) { /* We can write, so create a writable storage. */ - out.SetValue(std::make_shared(new RemoteStorage(bis_storage)), target_object_id); + out.SetValue(MakeSharedStorage(new RemoteStorage(bis_storage)), target_object_id); } else { /* We can only read, so create a readable storage. */ - out.SetValue(std::make_shared(new ReadOnlyStorageAdapter(new RemoteStorage(bis_storage))), target_object_id); + out.SetValue(MakeSharedStorage(new ReadOnlyStorageAdapter(new RemoteStorage(bis_storage))), target_object_id); } } return ResultSuccess(); } - Result FsMitmService::OpenDataStorageByCurrentProcess(sf::Out> out) { + Result FsMitmService::OpenDataStorageByCurrentProcess(sf::Out> out) { /* Only mitm if we should override contents for the current process. */ R_UNLESS(this->client_info.override_status.IsProgramSpecific(), sm::mitm::ResultShouldForwardToSession()); @@ -298,7 +308,7 @@ namespace ams::mitm::fs { /* Try to get a storage from the cache. */ { - std::shared_ptr cached_storage = GetStorageCacheEntry(this->client_info.program_id); + std::shared_ptr cached_storage = GetStorageCacheEntry(this->client_info.program_id); if (cached_storage != nullptr) { out.SetValue(std::move(cached_storage), target_object_id); return ResultSuccess(); @@ -307,18 +317,18 @@ namespace ams::mitm::fs { /* Make a new layered romfs, and cache to storage. */ { - std::shared_ptr new_storage_intf = nullptr; + std::shared_ptr new_storage_intf = nullptr; /* Create the layered storage. */ FsFile data_file; if (R_SUCCEEDED(OpenAtmosphereSdFile(&data_file, this->client_info.program_id, "romfs.bin", OpenMode_Read))) { auto layered_storage = std::make_shared(std::make_unique(new RemoteStorage(data_storage)), std::make_unique(new FileStorage(new RemoteFile(data_file))), this->client_info.program_id); layered_storage->BeginInitialize(); - new_storage_intf = std::make_shared(layered_storage); + new_storage_intf = MakeSharedStorage(layered_storage); } else { auto layered_storage = std::make_shared(std::make_unique(new RemoteStorage(data_storage)), nullptr, this->client_info.program_id); layered_storage->BeginInitialize(); - new_storage_intf = std::make_shared(layered_storage); + new_storage_intf = MakeSharedStorage(layered_storage); } SetStorageCacheEntry(this->client_info.program_id, &new_storage_intf); @@ -328,7 +338,7 @@ namespace ams::mitm::fs { return ResultSuccess(); } - Result FsMitmService::OpenDataStorageByDataId(sf::Out> out, ncm::DataId _data_id, u8 storage_id) { + Result FsMitmService::OpenDataStorageByDataId(sf::Out> out, ncm::DataId _data_id, u8 storage_id) { /* Only mitm if we should override contents for the current process. */ R_UNLESS(this->client_info.override_status.IsProgramSpecific(), sm::mitm::ResultShouldForwardToSession()); @@ -348,7 +358,7 @@ namespace ams::mitm::fs { /* Try to get a storage from the cache. */ { - std::shared_ptr cached_storage = GetStorageCacheEntry(data_id); + std::shared_ptr cached_storage = GetStorageCacheEntry(data_id); if (cached_storage != nullptr) { out.SetValue(std::move(cached_storage), target_object_id); return ResultSuccess(); @@ -357,18 +367,18 @@ namespace ams::mitm::fs { /* Make a new layered romfs, and cache to storage. */ { - std::shared_ptr new_storage_intf = nullptr; + std::shared_ptr new_storage_intf = nullptr; /* Create the layered storage. */ FsFile data_file; if (R_SUCCEEDED(OpenAtmosphereSdFile(&data_file, data_id, "romfs.bin", OpenMode_Read))) { auto layered_storage = std::make_shared(std::make_unique(new RemoteStorage(data_storage)), std::make_unique(new FileStorage(new RemoteFile(data_file))), data_id); layered_storage->BeginInitialize(); - new_storage_intf = std::make_shared(layered_storage); + new_storage_intf = MakeSharedStorage(layered_storage); } else { auto layered_storage = std::make_shared(std::make_unique(new RemoteStorage(data_storage)), nullptr, data_id); layered_storage->BeginInitialize(); - new_storage_intf = std::make_shared(layered_storage); + new_storage_intf = MakeSharedStorage(layered_storage); } SetStorageCacheEntry(data_id, &new_storage_intf); diff --git a/stratosphere/ams_mitm/source/fs_mitm/fs_mitm_service.hpp b/stratosphere/ams_mitm/source/fs_mitm/fs_mitm_service.hpp index 6ac10a225..d674b197a 100644 --- a/stratosphere/ams_mitm/source/fs_mitm/fs_mitm_service.hpp +++ b/stratosphere/ams_mitm/source/fs_mitm/fs_mitm_service.hpp @@ -20,30 +20,26 @@ namespace ams::mitm::fs { - using IStorageInterface = ams::fssrv::impl::StorageInterfaceAdapter; - using IFileSystemInterface = ams::fssrv::impl::FileSystemInterfaceAdapter; + namespace { - /* TODO: Consider re-enabling the mitm flag logic. */ + #define AMS_FS_MITM_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, OpenFileSystemWithPatch, (sf::Out> out, ncm::ProgramId program_id, u32 _filesystem_type), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 8, Result, OpenFileSystemWithId, (sf::Out> out, const fssrv::sf::Path &path, ncm::ProgramId program_id, u32 _filesystem_type), hos::Version_2_0_0) \ + AMS_SF_METHOD_INFO(C, H, 18, Result, OpenSdCardFileSystem, (sf::Out> out)) \ + AMS_SF_METHOD_INFO(C, H, 51, Result, OpenSaveDataFileSystem, (sf::Out> out, u8 space_id, const ams::fs::SaveDataAttribute &attribute)) \ + AMS_SF_METHOD_INFO(C, H, 12, Result, OpenBisStorage, (sf::Out> out, u32 bis_partition_id)) \ + AMS_SF_METHOD_INFO(C, H, 200, Result, OpenDataStorageByCurrentProcess, (sf::Out> out)) \ + AMS_SF_METHOD_INFO(C, H, 202, Result, OpenDataStorageByDataId, (sf::Out> out, ncm::DataId data_id, u8 storage_id)) - class FsMitmService : public sf::IMitmServiceObject { - private: - enum class CommandId { - OpenFileSystemDeprecated = 0, + AMS_SF_DEFINE_MITM_INTERFACE(IFsMitmInterface, AMS_FS_MITM_INTERFACE_INFO) - SetCurrentProcess = 1, - OpenFileSystemWithPatch = 7, - OpenFileSystemWithId = 8, + } - OpenSdCardFileSystem = 18, - - OpenSaveDataFileSystem = 51, - - OpenBisStorage = 12, - OpenDataStorageByCurrentProcess = 200, - OpenDataStorageByDataId = 202, - }; + class FsMitmService : public sf::MitmServiceImplBase { public: - NX_CONSTEXPR bool ShouldMitmProgramId(const ncm::ProgramId program_id) { + using MitmServiceImplBase::MitmServiceImplBase; + public: + static constexpr ALWAYS_INLINE bool ShouldMitmProgramId(const ncm::ProgramId program_id) { /* We want to mitm everything that isn't a system-module. */ if (!ncm::IsSystemProgramId(program_id)) { return true; @@ -81,26 +77,14 @@ namespace ams::mitm::fs { return has_launched_qlaunch || ShouldMitmProgramId(client_info.program_id); } public: - SF_MITM_SERVICE_OBJECT_CTOR(FsMitmService) { /* ... */ } - protected: /* Overridden commands. */ - Result OpenFileSystemWithPatch(sf::Out> out, ncm::ProgramId program_id, u32 _filesystem_type); - Result OpenFileSystemWithId(sf::Out> out, const fssrv::sf::Path &path, ncm::ProgramId program_id, u32 _filesystem_type); - Result OpenSdCardFileSystem(sf::Out> out); - Result OpenSaveDataFileSystem(sf::Out> out, u8 space_id, const ams::fs::SaveDataAttribute &attribute); - Result OpenBisStorage(sf::Out> out, u32 bis_partition_id); - Result OpenDataStorageByCurrentProcess(sf::Out> out); - Result OpenDataStorageByDataId(sf::Out> out, ncm::DataId data_id, u8 storage_id); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(OpenFileSystemWithPatch, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(OpenFileSystemWithId, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(OpenSdCardFileSystem), - MAKE_SERVICE_COMMAND_META(OpenSaveDataFileSystem), - MAKE_SERVICE_COMMAND_META(OpenBisStorage), - MAKE_SERVICE_COMMAND_META(OpenDataStorageByCurrentProcess), - MAKE_SERVICE_COMMAND_META(OpenDataStorageByDataId), - }; + Result OpenFileSystemWithPatch(sf::Out> out, ncm::ProgramId program_id, u32 _filesystem_type); + Result OpenFileSystemWithId(sf::Out> out, const fssrv::sf::Path &path, ncm::ProgramId program_id, u32 _filesystem_type); + Result OpenSdCardFileSystem(sf::Out> out); + Result OpenSaveDataFileSystem(sf::Out> out, u8 space_id, const ams::fs::SaveDataAttribute &attribute); + Result OpenBisStorage(sf::Out> out, u32 bis_partition_id); + Result OpenDataStorageByCurrentProcess(sf::Out> out); + Result OpenDataStorageByDataId(sf::Out> out, ncm::DataId data_id, u8 storage_id); }; } diff --git a/stratosphere/ams_mitm/source/fs_mitm/fsmitm_module.cpp b/stratosphere/ams_mitm/source/fs_mitm/fsmitm_module.cpp index a8a709287..ad0894d5c 100644 --- a/stratosphere/ams_mitm/source/fs_mitm/fsmitm_module.cpp +++ b/stratosphere/ams_mitm/source/fs_mitm/fsmitm_module.cpp @@ -78,7 +78,7 @@ namespace ams::mitm::fs { void MitmModule::ThreadFunction(void *arg) { /* Create fs mitm. */ - R_ABORT_UNLESS(g_server_manager.RegisterMitmServer(MitmServiceName)); + R_ABORT_UNLESS((g_server_manager.RegisterMitmServer(MitmServiceName))); /* Process for the server. */ ProcessForServerOnAllThreads(); diff --git a/stratosphere/ams_mitm/source/hid_mitm/hid_mitm_service.hpp b/stratosphere/ams_mitm/source/hid_mitm/hid_mitm_service.hpp index 71002d532..53beaf305 100644 --- a/stratosphere/ams_mitm/source/hid_mitm/hid_mitm_service.hpp +++ b/stratosphere/ams_mitm/source/hid_mitm/hid_mitm_service.hpp @@ -19,11 +19,18 @@ namespace ams::mitm::hid { - class HidMitmService : public sf::IMitmServiceObject { - private: - enum class CommandId { - SetSupportedNpadStyleSet = 100, - }; + namespace { + + #define AMS_HID_MITM_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 100, Result, SetSupportedNpadStyleSet, (const sf::ClientAppletResourceUserId &client_aruid, u32 style_set)) + + AMS_SF_DEFINE_MITM_INTERFACE(IHidMitmInterface, AMS_HID_MITM_INTERFACE_INFO) + + } + + class HidMitmService : public sf::MitmServiceImplBase { + public: + using MitmServiceImplBase::MitmServiceImplBase; public: static bool ShouldMitm(const sm::MitmProcessInfo &client_info) { /* TODO: Remove in Atmosphere 0.10.2. */ @@ -33,14 +40,9 @@ namespace ams::mitm::hid { return client_info.override_status.IsHbl(); } public: - SF_MITM_SERVICE_OBJECT_CTOR(HidMitmService) { /* ... */ } - protected: /* Overridden commands. */ Result SetSupportedNpadStyleSet(const sf::ClientAppletResourceUserId &client_aruid, u32 style_set); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(SetSupportedNpadStyleSet), - }; }; + static_assert(IsIHidMitmInterface); } diff --git a/stratosphere/ams_mitm/source/hid_mitm/hidmitm_module.cpp b/stratosphere/ams_mitm/source/hid_mitm/hidmitm_module.cpp index c40d39b56..583095376 100644 --- a/stratosphere/ams_mitm/source/hid_mitm/hidmitm_module.cpp +++ b/stratosphere/ams_mitm/source/hid_mitm/hidmitm_module.cpp @@ -59,7 +59,7 @@ namespace ams::mitm::hid { } /* Create hid mitm. */ - R_ABORT_UNLESS(g_server_manager.RegisterMitmServer(MitmServiceName)); + R_ABORT_UNLESS((g_server_manager.RegisterMitmServer(MitmServiceName))); /* Loop forever, servicing our services. */ g_server_manager.LoopProcess(); diff --git a/stratosphere/ams_mitm/source/ns_mitm/ns_am_mitm_service.hpp b/stratosphere/ams_mitm/source/ns_mitm/ns_am_mitm_service.hpp index 085e6097b..f84f3f6c0 100644 --- a/stratosphere/ams_mitm/source/ns_mitm/ns_am_mitm_service.hpp +++ b/stratosphere/ams_mitm/source/ns_mitm/ns_am_mitm_service.hpp @@ -18,13 +18,20 @@ namespace ams::mitm::ns { - class NsAmMitmService : public sf::IMitmServiceObject { - private: - enum class CommandId { - GetApplicationContentPath = 21, - ResolveApplicationContentPath = 23, - GetRunningApplicationProgramId = 92, - }; + namespace impl { + + #define AMS_NS_AM_MITM_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 21, Result, GetApplicationContentPath, (const sf::OutBuffer &out_path, ncm::ProgramId application_id, u8 content_type)) \ + AMS_SF_METHOD_INFO(C, H, 23, Result, ResolveApplicationContentPath, (ncm::ProgramId application_id, u8 content_type)) \ + AMS_SF_METHOD_INFO(C, H, 92, Result, GetRunningApplicationProgramId, (sf::Out out, ncm::ProgramId application_id), hos::Version_6_0_0) + + AMS_SF_DEFINE_MITM_INTERFACE(IAmMitmInterface, AMS_NS_AM_MITM_INTERFACE_INFO) + + } + + class NsAmMitmService : public sf::MitmServiceImplBase { + public: + using MitmServiceImplBase::MitmServiceImplBase; public: static bool ShouldMitm(const sm::MitmProcessInfo &client_info) { /* We will mitm: @@ -33,18 +40,11 @@ namespace ams::mitm::ns { return ncm::IsWebAppletId(client_info.program_id); } public: - SF_MITM_SERVICE_OBJECT_CTOR(NsAmMitmService) { /* ... */ } - protected: /* Actual command API. */ Result GetApplicationContentPath(const sf::OutBuffer &out_path, ncm::ProgramId application_id, u8 content_type); Result ResolveApplicationContentPath(ncm::ProgramId application_id, u8 content_type); Result GetRunningApplicationProgramId(sf::Out out, ncm::ProgramId application_id); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetApplicationContentPath), - MAKE_SERVICE_COMMAND_META(ResolveApplicationContentPath), - MAKE_SERVICE_COMMAND_META(GetRunningApplicationProgramId, hos::Version_6_0_0), - }; }; + static_assert(impl::IsIAmMitmInterface); } diff --git a/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.cpp b/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.cpp index 04a46e401..f364b5401 100644 --- a/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.cpp +++ b/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.cpp @@ -37,13 +37,13 @@ namespace ams::mitm::ns { return nswebGetRunningApplicationProgramId(this->srv.get(), reinterpret_cast(out.GetPointer()), static_cast(application_id)); } - Result NsWebMitmService::GetDocumentInterface(sf::Out> out) { + Result NsWebMitmService::GetDocumentInterface(sf::Out> out) { /* Open a document interface. */ NsDocumentInterface doc; R_TRY(nsGetDocumentInterfaceFwd(this->forward_service.get(), &doc)); const sf::cmif::DomainObjectId target_object_id{serviceGetObjectId(&doc.s)}; - out.SetValue(std::make_shared(this->client_info, std::make_unique(doc)), target_object_id); + out.SetValue(sf::MakeShared(this->client_info, std::make_unique(doc)), target_object_id); return ResultSuccess(); } diff --git a/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.hpp b/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.hpp index d525927a6..8f7574ee8 100644 --- a/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.hpp +++ b/stratosphere/ams_mitm/source/ns_mitm/ns_web_mitm_service.hpp @@ -20,13 +20,23 @@ namespace ams::mitm::ns { - class NsDocumentService : public sf::IServiceObject { - private: - enum class CommandId { - GetApplicationContentPath = 21, - ResolveApplicationContentPath = 23, - GetRunningApplicationProgramId = 92, - }; + namespace impl { + + #define AMS_NS_DOCUMENT_MITM_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 21, Result, GetApplicationContentPath, (const sf::OutBuffer &out_path, ncm::ProgramId application_id, u8 content_type)) \ + AMS_SF_METHOD_INFO(C, H, 23, Result, ResolveApplicationContentPath, (ncm::ProgramId application_id, u8 content_type)) \ + AMS_SF_METHOD_INFO(C, H, 92, Result, GetRunningApplicationProgramId, (sf::Out out, ncm::ProgramId application_id), hos::Version_6_0_0) + + AMS_SF_DEFINE_INTERFACE(IDocumentInterface, AMS_NS_DOCUMENT_MITM_INTERFACE_INFO) + + #define AMS_NS_WEB_MITM_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 7999, Result, GetDocumentInterface, (sf::Out> out)) + + AMS_SF_DEFINE_MITM_INTERFACE(IWebMitmInterface, AMS_NS_WEB_MITM_INTERFACE_INFO) + + } + + class NsDocumentService { private: sm::MitmProcessInfo client_info; std::unique_ptr<::NsDocumentInterface> srv; @@ -36,24 +46,17 @@ namespace ams::mitm::ns { virtual ~NsDocumentService() { nsDocumentInterfaceClose(this->srv.get()); } - protected: + public: /* Actual command API. */ Result GetApplicationContentPath(const sf::OutBuffer &out_path, ncm::ProgramId application_id, u8 content_type); Result ResolveApplicationContentPath(ncm::ProgramId application_id, u8 content_type); Result GetRunningApplicationProgramId(sf::Out out, ncm::ProgramId application_id); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetApplicationContentPath), - MAKE_SERVICE_COMMAND_META(ResolveApplicationContentPath), - MAKE_SERVICE_COMMAND_META(GetRunningApplicationProgramId, hos::Version_6_0_0), - }; }; + static_assert(impl::IsIDocumentInterface); - class NsWebMitmService : public sf::IMitmServiceObject { - private: - enum class CommandId { - GetDocumentInterface = 7999, - }; + class NsWebMitmService : public sf::MitmServiceImplBase { + public: + using MitmServiceImplBase::MitmServiceImplBase; public: static bool ShouldMitm(const sm::MitmProcessInfo &client_info) { /* We will mitm: @@ -62,13 +65,8 @@ namespace ams::mitm::ns { return ncm::IsWebAppletId(client_info.program_id); } public: - SF_MITM_SERVICE_OBJECT_CTOR(NsWebMitmService) { /* ... */ } - protected: - Result GetDocumentInterface(sf::Out> out); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetDocumentInterface), - }; + Result GetDocumentInterface(sf::Out> out); }; + static_assert(impl::IsIWebMitmInterface); } diff --git a/stratosphere/ams_mitm/source/ns_mitm/nsmitm_module.cpp b/stratosphere/ams_mitm/source/ns_mitm/nsmitm_module.cpp index 33db54be5..4a7a708ac 100644 --- a/stratosphere/ams_mitm/source/ns_mitm/nsmitm_module.cpp +++ b/stratosphere/ams_mitm/source/ns_mitm/nsmitm_module.cpp @@ -39,9 +39,9 @@ namespace ams::mitm::ns { /* Create mitm servers. */ if (hos::GetVersion() < hos::Version_3_0_0) { - R_ABORT_UNLESS(g_server_manager.RegisterMitmServer(NsAmMitmServiceName)); + R_ABORT_UNLESS((g_server_manager.RegisterMitmServer(NsAmMitmServiceName))); } else { - R_ABORT_UNLESS(g_server_manager.RegisterMitmServer(NsWebMitmServiceName)); + R_ABORT_UNLESS((g_server_manager.RegisterMitmServer(NsWebMitmServiceName))); } /* Loop forever, servicing our services. */ diff --git a/stratosphere/ams_mitm/source/set_mitm/set_mitm_service.hpp b/stratosphere/ams_mitm/source/set_mitm/set_mitm_service.hpp index 7c5f3f1d4..fe57f425f 100644 --- a/stratosphere/ams_mitm/source/set_mitm/set_mitm_service.hpp +++ b/stratosphere/ams_mitm/source/set_mitm/set_mitm_service.hpp @@ -18,16 +18,23 @@ namespace ams::mitm::settings { - class SetMitmService : public sf::IMitmServiceObject { - private: - enum class CommandId { - GetLanguageCode = 0, - GetRegionCode = 4, - }; + namespace { + + #define AMS_SETTINGS_MITM_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GetLanguageCode, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, GetRegionCode, (sf::Out out)) + + AMS_SF_DEFINE_MITM_INTERFACE(ISetMitmInterface, AMS_SETTINGS_MITM_INTERFACE_INFO) + + } + + class SetMitmService : public sf::MitmServiceImplBase { private: os::Mutex lock{false}; cfg::OverrideLocale locale; - bool got_locale; + bool got_locale = false; + public: + using MitmServiceImplBase::MitmServiceImplBase; public: static bool ShouldMitm(const sm::MitmProcessInfo &client_info) { /* We will mitm: @@ -36,20 +43,12 @@ namespace ams::mitm::settings { const bool is_game = (ncm::IsApplicationId(client_info.program_id) && !client_info.override_status.IsHbl()); return client_info.program_id == ncm::SystemProgramId::Ns || is_game; } - public: - SF_MITM_SERVICE_OBJECT_CTOR(SetMitmService) { - this->got_locale = false; - } private: Result EnsureLocale(); - protected: + public: Result GetLanguageCode(sf::Out out); Result GetRegionCode(sf::Out out); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetLanguageCode), - MAKE_SERVICE_COMMAND_META(GetRegionCode), - }; }; + static_assert(IsISetMitmInterface); } diff --git a/stratosphere/ams_mitm/source/set_mitm/setmitm_module.cpp b/stratosphere/ams_mitm/source/set_mitm/setmitm_module.cpp index 26637a092..787e285e8 100644 --- a/stratosphere/ams_mitm/source/set_mitm/setmitm_module.cpp +++ b/stratosphere/ams_mitm/source/set_mitm/setmitm_module.cpp @@ -43,8 +43,8 @@ namespace ams::mitm::settings { mitm::WaitInitialized(); /* Create mitm servers. */ - R_ABORT_UNLESS(g_server_manager.RegisterMitmServer(SetMitmServiceName)); - R_ABORT_UNLESS(g_server_manager.RegisterMitmServer(SetSysMitmServiceName)); + R_ABORT_UNLESS((g_server_manager.RegisterMitmServer(SetMitmServiceName))); + R_ABORT_UNLESS((g_server_manager.RegisterMitmServer(SetSysMitmServiceName))); /* Loop forever, servicing our services. */ g_server_manager.LoopProcess(); diff --git a/stratosphere/ams_mitm/source/set_mitm/setsys_mitm_service.cpp b/stratosphere/ams_mitm/source/set_mitm/setsys_mitm_service.cpp index 130314ce0..7f21d3c28 100644 --- a/stratosphere/ams_mitm/source/set_mitm/setsys_mitm_service.cpp +++ b/stratosphere/ams_mitm/source/set_mitm/setsys_mitm_service.cpp @@ -56,15 +56,11 @@ namespace ams::mitm::settings { const auto api_info = exosphere::GetApiInfo(); const char emummc_char = emummc::IsActive() ? 'E' : 'S'; - /* GCC complains about the following snprintf possibly truncating, but this is not a problem and has been carefully accounted for. */ - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wformat-truncation" - { - char display_version[sizeof(g_ams_firmware_version.display_version)]; - std::snprintf(display_version, sizeof(display_version), "%s|AMS %u.%u.%u|%c", g_ams_firmware_version.display_version, api_info.GetMajorVersion(), api_info.GetMinorVersion(), api_info.GetMicroVersion(), emummc_char); - std::memcpy(g_ams_firmware_version.display_version, display_version, sizeof(display_version)); - } - #pragma GCC diagnostic pop + /* NOTE: We have carefully accounted for the size of the string we print. */ + /* No truncation occurs assuming two-digits for all version number components. */ + char display_version[sizeof(g_ams_firmware_version.display_version)]; + std::snprintf(display_version, sizeof(display_version), "%s|AMS %u.%u.%u|%c", g_ams_firmware_version.display_version, api_info.GetMajorVersion(), api_info.GetMinorVersion(), api_info.GetMicroVersion(), emummc_char); + std::memcpy(g_ams_firmware_version.display_version, display_version, sizeof(display_version)); } g_cached_firmware_version = true; diff --git a/stratosphere/ams_mitm/source/set_mitm/setsys_mitm_service.hpp b/stratosphere/ams_mitm/source/set_mitm/setsys_mitm_service.hpp index 4dade3eb1..54bb22ecd 100644 --- a/stratosphere/ams_mitm/source/set_mitm/setsys_mitm_service.hpp +++ b/stratosphere/ams_mitm/source/set_mitm/setsys_mitm_service.hpp @@ -18,15 +18,21 @@ namespace ams::mitm::settings { - class SetSysMitmService : public sf::IMitmServiceObject { - private: - enum class CommandId { - GetFirmwareVersion = 3, - GetFirmwareVersion2 = 4, + namespace { - GetSettingsItemValueSize = 37, - GetSettingsItemValue = 38, - }; + #define AMS_SETTINGS_SYSTEM_MITM_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, GetFirmwareVersion, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, GetFirmwareVersion2, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 37, Result, GetSettingsItemValueSize, (sf::Out out_size, const ams::settings::fwdbg::SettingsName &name, const ams::settings::fwdbg::SettingsItemKey &key)) \ + AMS_SF_METHOD_INFO(C, H, 38, Result, GetSettingsItemValue, (sf::Out out_size, const sf::OutBuffer &out, const ams::settings::fwdbg::SettingsName &name, const ams::settings::fwdbg::SettingsItemKey &key)) + + AMS_SF_DEFINE_MITM_INTERFACE(ISetSysMitmInterface, AMS_SETTINGS_SYSTEM_MITM_INTERFACE_INFO) + + } + + class SetSysMitmService : public sf::MitmServiceImplBase { + public: + using MitmServiceImplBase::MitmServiceImplBase; public: static bool ShouldMitm(const sm::MitmProcessInfo &client_info) { /* We will mitm: @@ -35,19 +41,11 @@ namespace ams::mitm::settings { return true; } public: - SF_MITM_SERVICE_OBJECT_CTOR(SetSysMitmService) { /* ... */ } - protected: Result GetFirmwareVersion(sf::Out out); Result GetFirmwareVersion2(sf::Out out); Result GetSettingsItemValueSize(sf::Out out_size, const ams::settings::fwdbg::SettingsName &name, const ams::settings::fwdbg::SettingsItemKey &key); Result GetSettingsItemValue(sf::Out out_size, const sf::OutBuffer &out, const ams::settings::fwdbg::SettingsName &name, const ams::settings::fwdbg::SettingsItemKey &key); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetFirmwareVersion), - MAKE_SERVICE_COMMAND_META(GetFirmwareVersion2), - MAKE_SERVICE_COMMAND_META(GetSettingsItemValueSize), - MAKE_SERVICE_COMMAND_META(GetSettingsItemValue), - }; }; + static_assert(IsISetSysMitmInterface); } diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_apply_manager.cpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_apply_manager.cpp new file mode 100644 index 000000000..17591503a --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_apply_manager.cpp @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "sysupdater_apply_manager.hpp" + +namespace ams::mitm::sysupdater { + + namespace { + + alignas(os::MemoryPageSize) u8 g_boot_image_update_buffer[64_KB]; + + updater::BootImageUpdateType GetBootImageUpdateType() { + int boot_image_update_type; + auto size = settings::fwdbg::GetSettingsItemValue(std::addressof(boot_image_update_type), sizeof(boot_image_update_type), "systeminitializer", "boot_image_update_type"); + if (size != sizeof(boot_image_update_type)) { + return updater::BootImageUpdateType::Erista; + } + return updater::GetBootImageUpdateType(boot_image_update_type); + } + + Result MarkPreCommitForBootImages() { + /* Set verification required for both normal and safe mode. */ + R_TRY(updater::MarkVerifyingRequired(updater::BootModeType::Normal, g_boot_image_update_buffer, sizeof(g_boot_image_update_buffer))); + R_TRY(updater::MarkVerifyingRequired(updater::BootModeType::Safe, g_boot_image_update_buffer, sizeof(g_boot_image_update_buffer))); + + /* Pre-commit is now marked. */ + return ResultSuccess(); + } + + Result UpdateBootImages() { + /* Define a helper to update the images. */ + auto UpdateBootImageImpl = [](updater::BootModeType boot_mode, updater::BootImageUpdateType boot_image_update_type) -> Result { + /* Get the boot image package id. */ + ncm::SystemDataId boot_image_package_id = {}; + R_TRY_CATCH(updater::GetBootImagePackageId(std::addressof(boot_image_package_id), boot_mode, g_boot_image_update_buffer, sizeof(g_boot_image_update_buffer))) { + R_CATCH(updater::ResultBootImagePackageNotFound) { + /* Nintendo simply falls through when the package is not found. */ + } + } R_END_TRY_CATCH; + + + /* Update the boot images. */ + R_TRY_CATCH(updater::UpdateBootImagesFromPackage(boot_image_package_id, boot_mode, g_boot_image_update_buffer, sizeof(g_boot_image_update_buffer), boot_image_update_type)) { + R_CATCH(updater::ResultBootImagePackageNotFound) { + /* Nintendo simply falls through when the package is not found. */ + } + } R_END_TRY_CATCH; + + /* Mark the images verified. */ + R_TRY(updater::MarkVerified(boot_mode, g_boot_image_update_buffer, sizeof(g_boot_image_update_buffer))); + + /* The boot images are updated. */ + return ResultSuccess(); + }; + + /* Get the boot image update type. */ + auto boot_image_update_type = GetBootImageUpdateType(); + + /* Update boot images for safe mode. */ + R_TRY(UpdateBootImageImpl(updater::BootModeType::Safe, boot_image_update_type)); + + /* Update boot images for normal mode. */ + R_TRY(UpdateBootImageImpl(updater::BootModeType::Normal, boot_image_update_type)); + + /* Both sets of images are updated. */ + return ResultSuccess(); + } + + } + + Result SystemUpdateApplyManager::ApplyPackageTask(ncm::PackageSystemDowngradeTask *task) { + /* Lock the apply mutex. */ + std::scoped_lock lk(this->apply_mutex); + + /* NOTE: Here, Nintendo creates a system report for the update. */ + + /* Mark boot images to note that we're updating. */ + R_TRY(MarkPreCommitForBootImages()); + + /* Commit the task. */ + R_TRY(task->Commit()); + + /* Update the boot images. */ + R_TRY(UpdateBootImages()); + + return ResultSuccess(); + } + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_apply_manager.hpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_apply_manager.hpp new file mode 100644 index 000000000..d05bad8c4 --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_apply_manager.hpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::mitm::sysupdater { + + class SystemUpdateApplyManager { + private: + os::Mutex apply_mutex; + public: + constexpr SystemUpdateApplyManager() : apply_mutex(false) { /* ... */ } + + Result ApplyPackageTask(ncm::PackageSystemDowngradeTask *task); + }; + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_async_impl.cpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_async_impl.cpp new file mode 100644 index 000000000..caebfc224 --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_async_impl.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "sysupdater_async_impl.hpp" +#include "sysupdater_async_thread_allocator.hpp" + +namespace ams::mitm::sysupdater { + + Result AsyncBase::ToAsyncResult(Result result) { + R_TRY_CATCH(result) { + R_CONVERT(nim::ResultHttpConnectionCanceled, ns::ResultCanceled()); + R_CONVERT(ncm::ResultInstallTaskCancelled, ns::ResultCanceled()); + } R_END_TRY_CATCH; + + return ResultSuccess(); + } + + AsyncPrepareSdCardUpdateImpl::~AsyncPrepareSdCardUpdateImpl() { + if (this->thread_info) { + os::WaitThread(this->thread_info->thread); + os::DestroyThread(this->thread_info->thread); + GetAsyncThreadAllocator()->Free(*this->thread_info); + } + } + + Result AsyncPrepareSdCardUpdateImpl::Run() { + /* Get a thread info. */ + ThreadInfo info; + R_TRY(GetAsyncThreadAllocator()->Allocate(std::addressof(info))); + + /* Set the thread info's priority. */ + info.priority = AMS_GET_SYSTEM_THREAD_PRIORITY(mitm_sysupdater, AsyncPrepareSdCardUpdateTask); + + /* Ensure that we clean up appropriately. */ + ON_SCOPE_EXIT { + if (!this->thread_info) { + GetAsyncThreadAllocator()->Free(info); + } + }; + + /* Create a thread for the task. */ + R_TRY(os::CreateThread(info.thread, [](void *arg) { + auto *_this = reinterpret_cast(arg); + _this->result = _this->Execute(); + _this->event.Signal(); + }, this, info.stack, info.stack_size, info.priority)); + + /* Set the thread name. */ + os::SetThreadNamePointer(info.thread, AMS_GET_SYSTEM_THREAD_NAME(mitm_sysupdater, AsyncPrepareSdCardUpdateTask)); + + /* Start the thread. */ + os::StartThread(info.thread); + + /* Set our thread info. */ + this->thread_info = info; + return ResultSuccess(); + } + + Result AsyncPrepareSdCardUpdateImpl::Execute() { + return this->task->PrepareAndExecute(); + } + + void AsyncPrepareSdCardUpdateImpl::CancelImpl() { + this->task->Cancel(); + } + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_async_impl.hpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_async_impl.hpp new file mode 100644 index 000000000..ff062c3ed --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_async_impl.hpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include "sysupdater_thread_allocator.hpp" + +namespace ams::mitm::sysupdater { + + class ErrorContextHolder { + private: + err::ErrorContext error_context; + public: + constexpr ErrorContextHolder() : error_context{} { /* ... */ } + + virtual ~ErrorContextHolder() { /* ... */ } + + template + Result SaveErrorContextIfFailed(T &async, Result result) { + if (R_FAILED(result)) { + async.GetErrorContext(std::addressof(this->error_context)); + return result; + } + + return ResultSuccess(); + } + + template + Result GetAndSaveErrorContext(T &async) { + R_TRY(this->SaveErrorContextIfFailed(async, async.Get())); + return ResultSuccess(); + } + + template + Result SaveInternalTaskErrorContextIfFailed(T &async, Result result) { + if (R_FAILED(result)) { + async.CreateErrorContext(std::addressof(this->error_context)); + return result; + } + + return ResultSuccess(); + } + + const err::ErrorContext &GetErrorContextImpl() { + return this->error_context; + } + }; + + class AsyncBase { + public: + virtual ~AsyncBase() { /* ... */ } + + static Result ToAsyncResult(Result result); + + Result Cancel() { + this->CancelImpl(); + return ResultSuccess(); + } + + virtual Result GetErrorContext(sf::Out out) { + *out = {}; + return ResultSuccess(); + } + private: + virtual void CancelImpl() = 0; + }; + + class AsyncResultBase : public AsyncBase { + public: + virtual ~AsyncResultBase() { /* ... */ } + + Result Get() { + return ToAsyncResult(this->GetImpl()); + } + private: + virtual Result GetImpl() = 0; + }; + static_assert(ns::impl::IsIAsyncResult); + + /* NOTE: Based off of ns AsyncPrepareCardUpdateImpl. */ + /* We don't implement the RequestServer::ManagedStop details, as we don't implement stoppable request list. */ + class AsyncPrepareSdCardUpdateImpl : public AsyncResultBase, private ErrorContextHolder { + private: + Result result; + os::SystemEvent event; + std::optional thread_info; + ncm::InstallTaskBase *task; + public: + AsyncPrepareSdCardUpdateImpl(ncm::InstallTaskBase *task) : result(ResultSuccess()), event(os::EventClearMode_ManualClear, true), thread_info(), task(task) { /* ... */ } + virtual ~AsyncPrepareSdCardUpdateImpl(); + + os::SystemEvent &GetEvent() { return this->event; } + + virtual Result GetErrorContext(sf::Out out) override { + *out = ErrorContextHolder::GetErrorContextImpl(); + return ResultSuccess(); + } + + Result Run(); + private: + Result Execute(); + + virtual void CancelImpl() override; + virtual Result GetImpl() override { return this->result; } + }; + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_async_thread_allocator.cpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_async_thread_allocator.cpp new file mode 100644 index 000000000..8b02fd81a --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_async_thread_allocator.cpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "sysupdater_async_thread_allocator.hpp" + +namespace ams::mitm::sysupdater { + + namespace { + + constexpr inline int AsyncThreadCount = 1; + constexpr inline size_t AsyncThreadStackSize = 16_KB; + + os::ThreadType g_async_threads[AsyncThreadCount]; + alignas(os::ThreadStackAlignment) u8 g_async_thread_stack_heap[AsyncThreadCount * AsyncThreadStackSize]; + + constinit ThreadAllocator g_async_thread_allocator(g_async_threads, AsyncThreadCount, os::InvalidThreadPriority, g_async_thread_stack_heap, sizeof(g_async_thread_stack_heap), AsyncThreadStackSize); + + } + + ThreadAllocator *GetAsyncThreadAllocator() { + return std::addressof(g_async_thread_allocator); + } + + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_async_thread_allocator.hpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_async_thread_allocator.hpp new file mode 100644 index 000000000..b051689c6 --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_async_thread_allocator.hpp @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include "sysupdater_thread_allocator.hpp" + +namespace ams::mitm::sysupdater { + + ThreadAllocator *GetAsyncThreadAllocator(); + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_fs_utils.cpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_fs_utils.cpp new file mode 100644 index 000000000..b8a88fc13 --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_fs_utils.cpp @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "sysupdater_fs_utils.hpp" + +namespace ams::mitm::sysupdater { + + namespace { + + constexpr inline const char * const NcaExtension = ".nca"; + constexpr inline const char * const NspExtension = ".nsp"; + constexpr inline const size_t NcaExtensionSize = 4; + constexpr inline const size_t NspExtensionSize = 4; + + static_assert(NcaExtensionSize == NspExtensionSize); + constexpr inline const size_t NcaNspExtensionSize = NcaExtensionSize; + + constexpr inline std::underlying_type::type SdCardContentMetaPathNormalizeOption = fssrv::PathNormalizer::Option_PreserveTailSeparator | + fssrv::PathNormalizer::Option_HasMountName; + + Result CheckNcaOrNsp(const char **path) { + /* Ensure that the path is currently at the mount name delimeter. */ + R_UNLESS(std::strncmp(*path, ams::fs::impl::MountNameDelimiter, strnlen(ams::fs::impl::MountNameDelimiter, ams::fs::EntryNameLengthMax)) == 0, fs::ResultPathNotFound()); + + /* Advance past the :. */ + static_assert(ams::fs::impl::MountNameDelimiter[0] == ':'); + *path += 1; + + /* Ensure path is long enough for the extension. */ + const auto path_len = strnlen(*path, ams::fs::EntryNameLengthMax); + R_UNLESS(path_len > NcaNspExtensionSize, fs::ResultPathNotFound()); + + /* Get the extension. */ + const char * const extension = *path + path_len - NcaNspExtensionSize; + + /* Ensure nca or nsp. */ + const bool is_nca = util::Strnicmp(extension, NcaExtension, NcaNspExtensionSize) == 0; + const bool is_nsp = util::Strnicmp(extension, NspExtension, NcaNspExtensionSize) == 0; + R_UNLESS(is_nca || is_nsp, fs::ResultPathNotFound()); + + return ResultSuccess(); + } + + Result ParseMountName(const char **path, std::shared_ptr *out) { + /* The equivalent function here supports all the common mount names; we'll only support the SD card, system content storage. */ + if (const auto mount_len = strnlen(ams::fs::impl::SdCardFileSystemMountName, ams::fs::MountNameLengthMax); std::strncmp(*path, ams::fs::impl::SdCardFileSystemMountName, mount_len) == 0) { + /* Advance the path. */ + *path += mount_len; + + /* Open the SD card. This uses libnx bindings. */ + FsFileSystem fs; + R_TRY(fsOpenSdCardFileSystem(std::addressof(fs))); + + /* Allocate a new filesystem wrapper. */ + auto fsa = std::make_shared(fs); + R_UNLESS(fsa != nullptr, fs::ResultAllocationFailureInSdCardA()); + + /* Set the output fs. */ + *out = std::move(fsa); + } else if (const auto mount_len = strnlen(ams::fs::impl::ContentStorageSystemMountName, ams::fs::MountNameLengthMax); std::strncmp(*path, ams::fs::impl::ContentStorageSystemMountName, mount_len) == 0) { + /* Advance the path. */ + *path += mount_len; + + /* Open the system content storage. This uses libnx bindings. */ + FsFileSystem fs; + R_TRY(fsOpenContentStorageFileSystem(std::addressof(fs), FsContentStorageId_System)); + + /* Allocate a new filesystem wrapper. */ + auto fsa = std::make_shared(fs); + R_UNLESS(fsa != nullptr, fs::ResultAllocationFailureInContentStorageA()); + + /* Set the output fs. */ + *out = std::move(fsa); + } else { + return fs::ResultPathNotFound(); + } + + /* Ensure that there's something that could be a mount name delimiter. */ + R_UNLESS(strnlen(*path, fs::EntryNameLengthMax) != 0, fs::ResultPathNotFound()); + + return ResultSuccess(); + } + + Result ParseNsp(const char **path, std::shared_ptr *out, std::shared_ptr base_fs) { + const char *work_path = *path; + + /* Advance to the nsp extension. */ + while (true) { + if (util::Strnicmp(work_path, NspExtension, NspExtensionSize) == 0) { + if (work_path[NspExtensionSize] == '\x00' || work_path[NspExtensionSize] == '/') { + break; + } + work_path += NspExtensionSize; + } else { + R_UNLESS(*work_path != '\x00', fs::ResultPathNotFound()); + + work_path += 1; + } + } + + /* Advance past the extension. */ + work_path += NspExtensionSize; + + /* Get the nsp path. */ + char nsp_path[fs::EntryNameLengthMax + 1]; + R_UNLESS(static_cast(work_path - *path) <= sizeof(nsp_path), fs::ResultTooLongPath()); + std::memcpy(nsp_path, *path, work_path - *path); + nsp_path[work_path - *path] = '\x00'; + + /* Open the file storage. */ + std::shared_ptr file_storage = fssystem::AllocateShared(); + R_UNLESS(file_storage != nullptr, fs::ResultAllocationFailureInFileSystemProxyCoreImplD()); + R_TRY(file_storage->Initialize(std::move(base_fs), nsp_path, ams::fs::OpenMode_Read)); + + /* Create a partition fs. */ + R_TRY(fssystem::GetFileSystemCreatorInterfaces()->partition_fs_creator->Create(out, std::move(file_storage))); + + /* Update the path. */ + *path = work_path; + + return ResultSuccess(); + } + + Result ParseNca(const char **path, std::shared_ptr *out, std::shared_ptr base_fs) { + /* Open the file storage. */ + std::shared_ptr file_storage = fssystem::AllocateShared(); + R_UNLESS(file_storage != nullptr, fs::ResultAllocationFailureInFileSystemProxyCoreImplE()); + R_TRY(file_storage->Initialize(std::move(base_fs), *path, ams::fs::OpenMode_Read)); + + /* Create the nca reader. */ + std::shared_ptr nca_reader; + R_TRY(fssystem::GetFileSystemCreatorInterfaces()->storage_on_nca_creator->CreateNcaReader(std::addressof(nca_reader), file_storage)); + + /* NOTE: Here Nintendo validates program ID, but this does not need checking in the meta case. */ + + /* Set output reader. */ + *out = std::move(nca_reader); + return ResultSuccess(); + } + + Result OpenMetaStorage(std::shared_ptr *out, std::shared_ptr nca_reader, fssystem::NcaFsHeader::FsType *out_fs_type) { + /* Ensure the nca is a meta nca. */ + R_UNLESS(nca_reader->GetContentType() == fssystem::NcaHeader::ContentType::Meta, fs::ResultPreconditionViolation()); + + /* We only support SD card ncas, so ensure this isn't a gamecard nca. */ + R_UNLESS(nca_reader->GetDistributionType() != fssystem::NcaHeader::DistributionType::GameCard, fs::ResultPermissionDenied()); + + /* Here Nintendo would call GetPartitionIndex(), but we don't need to, because it's meta. */ + constexpr int MetaPartitionIndex = 0; + + /* Open fs header reader. */ + fssystem::NcaFsHeaderReader fs_header_reader; + R_TRY(fssystem::GetFileSystemCreatorInterfaces()->storage_on_nca_creator->Create(out, std::addressof(fs_header_reader), std::move(nca_reader), MetaPartitionIndex, false)); + + /* Set the output fs type. */ + *out_fs_type = fs_header_reader.GetFsType(); + return ResultSuccess(); + } + + Result OpenContentMetaFileSystem(std::shared_ptr *out, const char *path) { + /* Parse the mount name to get a filesystem. */ + const char *cur_path = path; + std::shared_ptr base_fs; + R_TRY(ParseMountName(std::addressof(cur_path), std::addressof(base_fs))); + + /* Ensure the path is an nca or nsp. */ + R_TRY(CheckNcaOrNsp(std::addressof(cur_path))); + + /* Try to parse as nsp. */ + std::shared_ptr nsp_fs; + if (R_SUCCEEDED(ParseNsp(std::addressof(cur_path), std::addressof(nsp_fs), base_fs))) { + /* nsp target is only allowed for type package, and we're assuming type meta. */ + R_UNLESS(*path != '\x00', fs::ResultInvalidArgument()); + + /* Use the nsp fs as the base fs. */ + base_fs = std::move(nsp_fs); + } + + /* Parse as nca. */ + std::shared_ptr nca_reader; + R_TRY(ParseNca(std::addressof(cur_path), std::addressof(nca_reader), std::move(base_fs))); + + /* Open meta storage. */ + std::shared_ptr storage; + fssystem::NcaFsHeader::FsType fs_type; + R_TRY(OpenMetaStorage(std::addressof(storage), std::move(nca_reader), std::addressof(fs_type))); + + /* Open the appropriate interface. */ + const auto * const creator_intfs = fssystem::GetFileSystemCreatorInterfaces(); + switch (fs_type) { + case fssystem::NcaFsHeader::FsType::PartitionFs: return creator_intfs->partition_fs_creator->Create(out, std::move(storage)); + case fssystem::NcaFsHeader::FsType::RomFs: return creator_intfs->rom_fs_creator->Create(out, std::move(storage)); + default: + return fs::ResultInvalidNcaFileSystemType(); + } + } + + } + + bool PathView::HasPrefix(std::string_view prefix) const { + return this->path.compare(0, prefix.length(), prefix) == 0; + } + + bool PathView::HasSuffix(std::string_view suffix) const { + return this->path.compare(this->path.length() - suffix.length(), suffix.length(), suffix) == 0; + } + + std::string_view PathView::GetFileName() const { + auto pos = this->path.find_last_of("/"); + return pos != std::string_view::npos ? this->path.substr(pos + 1) : this->path; + } + + Result MountSdCardContentMeta(const char *mount_name, const char *path) { + /* Sanitize input. */ + /* NOTE: This is an internal API, so we won't bother with mount name sanitization. */ + R_UNLESS(path != nullptr, fs::ResultInvalidPath()); + + /* Normalize the path. */ + fssrv::PathNormalizer normalized_path(path, SdCardContentMetaPathNormalizeOption); + R_TRY(normalized_path.GetResult()); + + /* Open the filesystem. */ + std::shared_ptr fs; + R_TRY(OpenContentMetaFileSystem(std::addressof(fs), normalized_path.GetPath())); + + /* Create a holder for the fs. */ + std::unique_ptr unique_fs = std::make_unique(std::move(fs)); + R_UNLESS(unique_fs != nullptr, fs::ResultAllocationFailureInNew()); + + /* Register the fs. */ + return ams::fs::fsa::Register(mount_name, std::move(unique_fs)); + } + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_fs_utils.hpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_fs_utils.hpp new file mode 100644 index 000000000..43c094fe9 --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_fs_utils.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::mitm::sysupdater { + + class PathView { + private: + std::string_view path; /* Nintendo uses util::string_view here. */ + public: + PathView(std::string_view p) : path(p) { /* ...*/ } + bool HasPrefix(std::string_view prefix) const; + bool HasSuffix(std::string_view suffix) const; + std::string_view GetFileName() const; + }; + + Result MountSdCardContentMeta(const char *mount_name, const char *path); + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_module.cpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_module.cpp new file mode 100644 index 000000000..5ab7ff6e5 --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_module.cpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "../amsmitm_initialization.hpp" +#include "sysupdater_module.hpp" +#include "sysupdater_service.hpp" +#include "sysupdater_async_impl.hpp" + +namespace ams::mitm::sysupdater { + + namespace { + + constexpr sm::ServiceName SystemUpdateServiceName = sm::ServiceName::Encode("ams:su"); + constexpr size_t SystemUpdateMaxSessions = 1; + + constexpr size_t MaxServers = 1; + constexpr size_t MaxSessions = SystemUpdateMaxSessions + 3; + + struct ServerOptions { + static constexpr size_t PointerBufferSize = 1_KB; + static constexpr size_t MaxDomains = 0; + static constexpr size_t MaxDomainObjects = 0; + }; + + sf::hipc::ServerManager g_server_manager; + + constinit sysupdater::SystemUpdateService g_system_update_service_object; + + } + + void MitmModule::ThreadFunction(void *arg) { + /* Wait until initialization is complete. */ + mitm::WaitInitialized(); + + /* Connect to nim. */ + sm::DoWithSession([]() { nim::InitializeForNetworkInstallManager(); }); + ON_SCOPE_EXIT { nim::FinalizeForNetworkInstallManager(); }; + + /* Register ams:su. */ + R_ABORT_UNLESS((g_server_manager.RegisterServer(SystemUpdateServiceName, SystemUpdateMaxSessions, sf::GetSharedPointerTo(g_system_update_service_object)))); + + /* Loop forever, servicing our services. */ + g_server_manager.LoopProcess(); + } + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_module.hpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_module.hpp new file mode 100644 index 000000000..caf8ac688 --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_module.hpp @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include "../amsmitm_module.hpp" + +namespace ams::mitm::sysupdater { + + DEFINE_MITM_MODULE_CLASS(0x8000, AMS_GET_SYSTEM_THREAD_PRIORITY(mitm_sysupdater, IpcServer)); + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_service.cpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_service.cpp new file mode 100644 index 000000000..713a208c2 --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_service.cpp @@ -0,0 +1,524 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "sysupdater_service.hpp" +#include "sysupdater_async_impl.hpp" +#include "sysupdater_fs_utils.hpp" + +namespace ams::mitm::sysupdater { + + namespace { + + /* ExFat NCAs prior to 2.0.0 do not actually include the exfat driver, and don't boot. */ + constexpr inline u32 MinimumVersionForExFatDriver = 65536; + + bool IsExFatDriverSupported(const ncm::ContentMetaInfo &info) { + return info.version >= MinimumVersionForExFatDriver && ((info.attributes & ncm::ContentMetaAttribute_IncludesExFatDriver) != 0); + } + + template + Result ForEachFileInDirectory(const char *root_path, F f) { + /* Open the directory. */ + fs::DirectoryHandle dir; + R_TRY(fs::OpenDirectory(std::addressof(dir), root_path, fs::OpenDirectoryMode_File)); + ON_SCOPE_EXIT { fs::CloseDirectory(dir); }; + + while (true) { + /* Read the current entry. */ + s64 count; + fs::DirectoryEntry entry; + R_TRY(fs::ReadDirectory(std::addressof(count), std::addressof(entry), dir, 1)); + if (count == 0) { + break; + } + + /* Invoke our handler on the entry. */ + bool done; + R_TRY(f(std::addressof(done), entry)); + R_SUCCEED_IF(done); + } + + return ResultSuccess(); + } + + Result ConvertToFsCommonPath(char *dst, size_t dst_size, const char *package_root_path, const char *entry_path) { + char package_path[ams::fs::EntryNameLengthMax]; + + const size_t path_len = std::snprintf(package_path, sizeof(package_path), "%s%s", package_root_path, entry_path); + AMS_ABORT_UNLESS(path_len < ams::fs::EntryNameLengthMax); + + return ams::fs::ConvertToFsCommonPath(dst, dst_size, package_path); + } + + Result LoadContentMeta(ncm::AutoBuffer *out, const char *package_root_path, const fs::DirectoryEntry &entry) { + AMS_ABORT_UNLESS(PathView(entry.name).HasSuffix(".cnmt.nca")); + + char path[ams::fs::EntryNameLengthMax]; + R_TRY(ConvertToFsCommonPath(path, sizeof(path), package_root_path, entry.name)); + + return ncm::ReadContentMetaPath(out, path); + } + + Result ReadContentMetaPath(ncm::AutoBuffer *out, const char *package_root, const ncm::ContentInfo &content_info) { + /* Get the .cnmt.nca path for the info. */ + char cnmt_nca_name[ncm::ContentIdStringLength + 10]; + ncm::GetStringFromContentId(cnmt_nca_name, sizeof(cnmt_nca_name), content_info.GetId()); + std::memcpy(cnmt_nca_name + ncm::ContentIdStringLength, ".cnmt.nca", std::strlen(".cnmt.nca")); + cnmt_nca_name[sizeof(cnmt_nca_name) - 1] = '\x00'; + + /* Create a new path. */ + ncm::Path content_path; + R_TRY(ConvertToFsCommonPath(content_path.str, sizeof(content_path.str), package_root, cnmt_nca_name)); + + /* Read the content meta path. */ + return ncm::ReadContentMetaPath(out, content_path.str); + } + + Result GetSystemUpdateUpdateContentInfoFromPackage(ncm::ContentInfo *out, const char *package_root) { + bool found_system_update = false; + + /* Iterate over all files to find the system update meta. */ + R_TRY(ForEachFileInDirectory(package_root, [&](bool *done, const fs::DirectoryEntry &entry) -> Result { + /* Don't early terminate by default. */ + *done = false; + + /* We have nothing to list if we're not looking at a meta. */ + R_SUCCEED_IF(!PathView(entry.name).HasSuffix(".cnmt.nca")); + + /* Read the content meta path, and build. */ + ncm::AutoBuffer package_meta; + R_TRY(LoadContentMeta(std::addressof(package_meta), package_root, entry)); + + /* Create a reader. */ + const auto reader = ncm::PackagedContentMetaReader(package_meta.Get(), package_meta.GetSize()); + + /* If we find a system update, we're potentially done. */ + if (reader.GetHeader()->type == ncm::ContentMetaType::SystemUpdate) { + /* Try to parse a content id from the name. */ + auto content_id = ncm::GetContentIdFromString(entry.name, sizeof(entry.name)); + R_UNLESS(content_id, ncm::ResultInvalidPackageFormat()); + + /* We're done. */ + *done = true; + found_system_update = true; + + *out = ncm::ContentInfo::Make(*content_id, entry.file_size, ncm::ContentType::Meta); + } + + return ResultSuccess(); + })); + + /* If we didn't find anything, error. */ + R_UNLESS(found_system_update, ncm::ResultSystemUpdateNotFoundInPackage()); + + return ResultSuccess(); + } + + Result ValidateSystemUpdate(Result *out_result, Result *out_exfat_result, UpdateValidationInfo *out_info, const ncm::PackagedContentMetaReader &update_reader, const char *package_root) { + /* Clear output. */ + *out_result = ResultSuccess(); + *out_exfat_result = ResultSuccess(); + + /* We want to track all content the update requires. */ + const size_t num_content_metas = update_reader.GetContentMetaCount(); + bool content_meta_valid[num_content_metas] = {}; + + /* Allocate a buffer to use for validation. */ + size_t data_buffer_size = 1_MB; + void *data_buffer; + do { + data_buffer = std::malloc(data_buffer_size); + if (data_buffer != nullptr) { + break; + } + + data_buffer_size /= 2; + } while (data_buffer_size >= 16_KB); + R_UNLESS(data_buffer != nullptr, fs::ResultAllocationFailureInNew()); + + ON_SCOPE_EXIT { std::free(data_buffer); }; + + /* Declare helper for result validation. */ + auto ValidateResult = [&] ALWAYS_INLINE_LAMBDA (Result result) -> Result { + *out_result = result; + return result; + }; + + /* Iterate over all files to find all content metas. */ + R_TRY(ForEachFileInDirectory(package_root, [&](bool *done, const fs::DirectoryEntry &entry) -> Result { + /* Clear output. */ + *out_info = {}; + + /* Don't early terminate by default. */ + *done = false; + + /* We have nothing to list if we're not looking at a meta. */ + R_SUCCEED_IF(!PathView(entry.name).HasSuffix(".cnmt.nca")); + + /* Read the content meta path, and build. */ + ncm::AutoBuffer package_meta; + R_TRY(LoadContentMeta(std::addressof(package_meta), package_root, entry)); + + /* Create a reader. */ + const auto reader = ncm::PackagedContentMetaReader(package_meta.Get(), package_meta.GetSize()); + + /* Get the key for the reader. */ + const auto key = reader.GetKey(); + + /* Check if we need to validate this content. */ + bool need_validate = false; + size_t validation_index = 0; + for (size_t i = 0; i < num_content_metas; ++i) { + if (update_reader.GetContentMetaInfo(i)->ToKey() == key) { + need_validate = true; + validation_index = i; + break; + } + } + + /* If we don't need to validate, continue. */ + R_SUCCEED_IF(!need_validate); + + /* We're validating. */ + out_info->invalid_key = key; + + /* Validate all contents. */ + for (size_t i = 0; i < reader.GetContentCount(); ++i) { + const auto *content_info = reader.GetContentInfo(i); + const auto &content_id = content_info->GetId(); + const s64 content_size = content_info->info.GetSize(); + out_info->invalid_content_id = content_id; + + /* Get the content id string. */ + auto content_id_str = ncm::GetContentIdString(content_id); + + /* Open the file. */ + fs::FileHandle file; + { + char path[fs::EntryNameLengthMax]; + std::snprintf(path, sizeof(path), "%s%s%s", package_root, content_id_str.data, content_info->GetType() == ncm::ContentType::Meta ? ".cnmt.nca" : ".nca"); + if (R_FAILED(ValidateResult(fs::OpenFile(std::addressof(file), path, ams::fs::OpenMode_Read)))) { + *done = true; + return ResultSuccess(); + } + } + ON_SCOPE_EXIT { fs::CloseFile(file); }; + + /* Validate the file size is correct. */ + s64 file_size; + if (R_FAILED(ValidateResult(fs::GetFileSize(std::addressof(file_size), file)))) { + *done = true; + return ResultSuccess(); + } + if (file_size != content_size) { + *out_result = ncm::ResultInvalidContentHash(); + *done = true; + return ResultSuccess(); + } + + /* Read and hash the file in chunks. */ + crypto::Sha256Generator sha; + sha.Initialize(); + + s64 ofs = 0; + while (ofs < content_size) { + const size_t cur_size = std::min(static_cast(content_size - ofs), data_buffer_size); + if (R_FAILED(ValidateResult(fs::ReadFile(file, ofs, data_buffer, cur_size)))) { + *done = true; + return ResultSuccess(); + } + + sha.Update(data_buffer, cur_size); + + ofs += cur_size; + } + + /* Get the hash. */ + ncm::Digest calc_digest; + sha.GetHash(std::addressof(calc_digest), sizeof(calc_digest)); + + /* Validate the hash. */ + if (std::memcmp(std::addressof(calc_digest), std::addressof(content_info->digest), sizeof(ncm::Digest)) != 0) { + *out_result = ncm::ResultInvalidContentHash(); + *done = true; + return ResultSuccess(); + } + } + + /* Mark the relevant content as validated. */ + content_meta_valid[validation_index] = true; + *out_info = {}; + + return ResultSuccess(); + })); + + /* If we're otherwise going to succeed, ensure that every content was found. */ + if (R_SUCCEEDED(*out_result)) { + for (size_t i = 0; i < num_content_metas; ++i) { + if (!content_meta_valid[i]) { + const ncm::ContentMetaInfo *info = update_reader.GetContentMetaInfo(i); + + *out_info = { .invalid_key = info->ToKey(), }; + + if (IsExFatDriverSupported(*info)) { + *out_exfat_result = fs::ResultPathNotFound(); + /* Continue, in case there's a non-exFAT failure result. */ + } else { + *out_result = fs::ResultPathNotFound(); + break; + } + } + } + } + + return ResultSuccess(); + } + + Result FormatUserPackagePath(ncm::Path *out, const ncm::Path &user_path) { + /* Ensure that the user path is valid. */ + R_UNLESS(user_path.str[0] == '/', fs::ResultInvalidPath()); + + /* Print as @Sdcard:/ */ + std::snprintf(out->str, sizeof(out->str), "%s:%s/", ams::fs::impl::SdCardFileSystemMountName, user_path.str); + + /* Normalize, if the user provided an ending / */ + const size_t len = std::strlen(out->str); + if (out->str[len - 1] == '/' && out->str[len - 2] == '/') { + out->str[len - 1] = '\x00'; + } + + return ResultSuccess(); + } + + const char *GetFirmwareVariationSettingName(settings::system::PlatformRegion region) { + switch (region) { + case settings::system::PlatformRegion_Global: return "firmware_variation"; + case settings::system::PlatformRegion_China: return "t_firmware_variation"; + AMS_UNREACHABLE_DEFAULT_CASE(); + } + } + + ncm::FirmwareVariationId GetFirmwareVariationId() { + /* Get the firmware variation setting name. */ + const char * const setting_name = GetFirmwareVariationSettingName(settings::system::GetPlatformRegion()); + + /* Retrieve the firmware variation id. */ + ncm::FirmwareVariationId id = {}; + settings::fwdbg::GetSettingsItemValue(std::addressof(id.value), sizeof(u8), "ns.systemupdate", setting_name); + + return id; + } + + } + + Result SystemUpdateService::GetUpdateInformation(sf::Out out, const ncm::Path &path) { + /* Adjust the path. */ + ncm::Path package_root; + R_TRY(FormatUserPackagePath(std::addressof(package_root), path)); + + /* Create a new update information. */ + UpdateInformation update_info = {}; + + /* Parse the update. */ + { + /* Get the content info for the system update. */ + ncm::ContentInfo content_info; + R_TRY(GetSystemUpdateUpdateContentInfoFromPackage(std::addressof(content_info), package_root.str)); + + /* Read the content meta. */ + ncm::AutoBuffer content_meta_buffer; + R_TRY(ReadContentMetaPath(std::addressof(content_meta_buffer), package_root.str, content_info)); + + /* Create a reader. */ + const auto reader = ncm::PackagedContentMetaReader(content_meta_buffer.Get(), content_meta_buffer.GetSize()); + + /* Get the version from the header. */ + update_info.version = reader.GetHeader()->version; + + /* Iterate over infos to find the system update info. */ + for (size_t i = 0; i < reader.GetContentMetaCount(); ++i) { + const auto &meta_info = *reader.GetContentMetaInfo(i); + + switch (meta_info.type) { + case ncm::ContentMetaType::BootImagePackage: + /* Detect exFAT support. */ + update_info.exfat_supported |= IsExFatDriverSupported(meta_info); + break; + default: + break; + } + } + + /* Default to no firmware variations. */ + update_info.firmware_variation_count = 0; + + /* Parse firmware variations if relevant. */ + if (reader.GetExtendedDataSize() != 0) { + /* Get the actual firmware variation count. */ + ncm::SystemUpdateMetaExtendedDataReader extended_data_reader(reader.GetExtendedData(), reader.GetExtendedDataSize()); + update_info.firmware_variation_count = extended_data_reader.GetFirmwareVariationCount(); + + /* NOTE: Update this if Nintendo ever actually releases an update with this many variations? */ + R_UNLESS(update_info.firmware_variation_count <= FirmwareVariationCountMax, ncm::ResultInvalidFirmwareVariation()); + + for (size_t i = 0; i < update_info.firmware_variation_count; ++i) { + update_info.firmware_variation_ids[i] = *extended_data_reader.GetFirmwareVariationId(i); + } + } + } + + /* Set the parsed update info. */ + out.SetValue(update_info); + return ResultSuccess(); + } + + Result SystemUpdateService::ValidateUpdate(sf::Out out_validate_result, sf::Out out_validate_exfat_result, sf::Out out_validate_info, const ncm::Path &path) { + /* Adjust the path. */ + ncm::Path package_root; + R_TRY(FormatUserPackagePath(std::addressof(package_root), path)); + + /* Parse the update. */ + { + /* Get the content info for the system update. */ + ncm::ContentInfo content_info; + R_TRY(GetSystemUpdateUpdateContentInfoFromPackage(std::addressof(content_info), package_root.str)); + + /* Read the content meta. */ + ncm::AutoBuffer content_meta_buffer; + R_TRY(ReadContentMetaPath(std::addressof(content_meta_buffer), package_root.str, content_info)); + + /* Create a reader. */ + const auto reader = ncm::PackagedContentMetaReader(content_meta_buffer.Get(), content_meta_buffer.GetSize()); + + /* Validate the update. */ + R_TRY(ValidateSystemUpdate(out_validate_result.GetPointer(), out_validate_exfat_result.GetPointer(), out_validate_info.GetPointer(), reader, package_root.str)); + } + + return ResultSuccess(); + }; + + Result SystemUpdateService::SetupUpdate(sf::CopyHandle transfer_memory, u64 transfer_memory_size, const ncm::Path &path, bool exfat) { + return this->SetupUpdateImpl(transfer_memory.GetValue(), transfer_memory_size, path, exfat, GetFirmwareVariationId()); + } + + Result SystemUpdateService::SetupUpdateWithVariation(sf::CopyHandle transfer_memory, u64 transfer_memory_size, const ncm::Path &path, bool exfat, ncm::FirmwareVariationId firmware_variation_id) { + return this->SetupUpdateImpl(transfer_memory.GetValue(), transfer_memory_size, path, exfat, firmware_variation_id); + } + + Result SystemUpdateService::RequestPrepareUpdate(sf::OutCopyHandle out_event_handle, sf::Out> out_async) { + /* Ensure the update is setup but not prepared. */ + R_UNLESS(this->setup_update, ns::ResultCardUpdateNotSetup()); + R_UNLESS(!this->requested_update, ns::ResultPrepareCardUpdateAlreadyRequested()); + + /* Create the async result. */ + auto async_result = sf::MakeShared(std::addressof(*this->update_task)); + R_UNLESS(async_result != nullptr, ns::ResultOutOfMaxRunningTask()); + + /* Run the task. */ + R_TRY(async_result->GetImpl().Run()); + + /* We prepared the task! */ + this->requested_update = true; + out_event_handle.SetValue(async_result->GetImpl().GetEvent().GetReadableHandle()); + out_async.SetValue(std::move(async_result)); + + return ResultSuccess(); + } + + Result SystemUpdateService::GetPrepareUpdateProgress(sf::Out out) { + /* Ensure the update is setup. */ + R_UNLESS(this->setup_update, ns::ResultCardUpdateNotSetup()); + + /* Get the progress. */ + auto install_progress = this->update_task->GetProgress(); + out.SetValue({ .current_size = install_progress.installed_size, .total_size = install_progress.total_size }); + return ResultSuccess(); + } + + Result SystemUpdateService::HasPreparedUpdate(sf::Out out) { + /* Ensure the update is setup. */ + R_UNLESS(this->setup_update, ns::ResultCardUpdateNotSetup()); + + out.SetValue(this->update_task->GetProgress().state == ncm::InstallProgressState::Downloaded); + return ResultSuccess(); + } + + Result SystemUpdateService::ApplyPreparedUpdate() { + /* Ensure the update is setup. */ + R_UNLESS(this->setup_update, ns::ResultCardUpdateNotSetup()); + + /* Ensure the update is prepared. */ + R_UNLESS(this->update_task->GetProgress().state == ncm::InstallProgressState::Downloaded, ns::ResultCardUpdateNotPrepared()); + + /* Apply the task. */ + R_TRY(this->apply_manager.ApplyPackageTask(std::addressof(*this->update_task))); + + return ResultSuccess(); + } + + Result SystemUpdateService::SetupUpdateImpl(os::ManagedHandle transfer_memory, u64 transfer_memory_size, const ncm::Path &path, bool exfat, ncm::FirmwareVariationId firmware_variation_id) { + /* Ensure we don't already have an update set up. */ + R_UNLESS(!this->setup_update, ns::ResultCardUpdateAlreadySetup()); + + /* Destroy any existing update tasks. */ + nim::SystemUpdateTaskId id; + auto count = nim::ListSystemUpdateTask(std::addressof(id), 1); + if (count > 0) { + R_TRY(nim::DestroySystemUpdateTask(id)); + } + + /* Initialize the update task. */ + R_TRY(InitializeUpdateTask(transfer_memory, transfer_memory_size, path, exfat, firmware_variation_id)); + + /* The update is now set up. */ + this->setup_update = true; + return ResultSuccess(); + } + + Result SystemUpdateService::InitializeUpdateTask(os::ManagedHandle &transfer_memory_handle, u64 transfer_memory_size, const ncm::Path &path, bool exfat, ncm::FirmwareVariationId firmware_variation_id) { + /* Map the transfer memory. */ + const size_t tmem_buffer_size = static_cast(transfer_memory_size); + this->update_transfer_memory.emplace(tmem_buffer_size, transfer_memory_handle.Get(), true); + + void *tmem_buffer; + R_TRY(this->update_transfer_memory->Map(std::addressof(tmem_buffer), os::MemoryPermission_None)); + auto tmem_guard = SCOPE_GUARD { + this->update_transfer_memory->Unmap(); + this->update_transfer_memory = std::nullopt; + }; + + /* Now that the memory is mapped, the input handle is managed and can be released. */ + transfer_memory_handle.Detach(); + + /* Adjust the package root. */ + ncm::Path package_root; + R_TRY(FormatUserPackagePath(std::addressof(package_root), path)); + + /* Ensure that we can create an update context. */ + R_TRY(fs::EnsureDirectoryRecursively("@Sdcard:/atmosphere/update/")); + const char *context_path = "@Sdcard:/atmosphere/update/cup.ctx"; + + /* Create and initialize the update task. */ + this->update_task.emplace(); + R_TRY(this->update_task->Initialize(package_root.str, context_path, tmem_buffer, tmem_buffer_size, exfat, firmware_variation_id)); + + /* We successfully setup the update. */ + tmem_guard.Cancel(); + + return ResultSuccess(); + } + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_service.hpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_service.hpp new file mode 100644 index 000000000..f0d05b12c --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_service.hpp @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include "sysupdater_apply_manager.hpp" + +namespace ams::mitm::sysupdater { + + constexpr inline size_t FirmwareVariationCountMax = 16; + + struct UpdateInformation { + u32 version; + bool exfat_supported; + u32 firmware_variation_count; + ncm::FirmwareVariationId firmware_variation_ids[FirmwareVariationCountMax]; + }; + + struct UpdateValidationInfo { + ncm::ContentMetaKey invalid_key; + ncm::ContentId invalid_content_id; + }; + + struct SystemUpdateProgress { + s64 current_size; + s64 total_size; + }; + + namespace impl { + + #define AMS_SYSUPDATER_SYSTEM_UPDATE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, Result, GetUpdateInformation, (sf::Out out, const ncm::Path &path)) \ + AMS_SF_METHOD_INFO(C, H, 1, Result, ValidateUpdate, (sf::Out out_validate_result, sf::Out out_validate_exfat_result, sf::Out out_validate_info, const ncm::Path &path)) \ + AMS_SF_METHOD_INFO(C, H, 2, Result, SetupUpdate, (sf::CopyHandle transfer_memory, u64 transfer_memory_size, const ncm::Path &path, bool exfat)) \ + AMS_SF_METHOD_INFO(C, H, 3, Result, SetupUpdateWithVariation, (sf::CopyHandle transfer_memory, u64 transfer_memory_size, const ncm::Path &path, bool exfat, ncm::FirmwareVariationId firmware_variation_id)) \ + AMS_SF_METHOD_INFO(C, H, 4, Result, RequestPrepareUpdate, (sf::OutCopyHandle out_event_handle, sf::Out> out_async)) \ + AMS_SF_METHOD_INFO(C, H, 5, Result, GetPrepareUpdateProgress, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 6, Result, HasPreparedUpdate, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 7, Result, ApplyPreparedUpdate, ()) + + AMS_SF_DEFINE_INTERFACE(ISystemUpdateInterface, AMS_SYSUPDATER_SYSTEM_UPDATE_INTERFACE_INFO) + + + + } + + class SystemUpdateService final { + private: + SystemUpdateApplyManager apply_manager; + std::optional update_task; + std::optional update_transfer_memory; + bool setup_update; + bool requested_update; + public: + constexpr SystemUpdateService() : apply_manager(), update_task(), update_transfer_memory(), setup_update(false), requested_update(false) { /* ... */ } + private: + Result SetupUpdateImpl(os::ManagedHandle transfer_memory, u64 transfer_memory_size, const ncm::Path &path, bool exfat, ncm::FirmwareVariationId firmware_variation_id); + Result InitializeUpdateTask(os::ManagedHandle &transfer_memory, u64 transfer_memory_size, const ncm::Path &path, bool exfat, ncm::FirmwareVariationId firmware_variation_id); + public: + Result GetUpdateInformation(sf::Out out, const ncm::Path &path); + Result ValidateUpdate(sf::Out out_validate_result, sf::Out out_validate_exfat_result, sf::Out out_validate_info, const ncm::Path &path); + Result SetupUpdate(sf::CopyHandle transfer_memory, u64 transfer_memory_size, const ncm::Path &path, bool exfat); + Result SetupUpdateWithVariation(sf::CopyHandle transfer_memory, u64 transfer_memory_size, const ncm::Path &path, bool exfat, ncm::FirmwareVariationId firmware_variation_id); + Result RequestPrepareUpdate(sf::OutCopyHandle out_event_handle, sf::Out> out_async); + Result GetPrepareUpdateProgress(sf::Out out); + Result HasPreparedUpdate(sf::Out out); + Result ApplyPreparedUpdate(); + }; + static_assert(impl::IsISystemUpdateInterface); + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_thread_allocator.cpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_thread_allocator.cpp new file mode 100644 index 000000000..f01997683 --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_thread_allocator.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "sysupdater_thread_allocator.hpp" + +namespace ams::mitm::sysupdater { + + Result ThreadAllocator::Allocate(ThreadInfo *out) { + std::scoped_lock lk(this->mutex); + + for (int i = 0; i < this->thread_count; ++i) { + const u64 mask = (static_cast(1) << i); + if ((this->bitmap & mask) == 0) { + *out = { + .thread = this->thread_list + i, + .priority = this->thread_priority, + .stack = this->stack_heap + (this->stack_size * i), + .stack_size = this->stack_size, + }; + this->bitmap |= mask; + return ResultSuccess(); + } + } + + return ns::ResultOutOfMaxRunningTask(); + } + + void ThreadAllocator::Free(const ThreadInfo &info) { + std::scoped_lock lk(this->mutex); + + for (int i = 0; i < this->thread_count; ++i) { + if (info.thread == std::addressof(this->thread_list[i])) { + const u64 mask = (static_cast(1) << i); + this->bitmap &= ~mask; + return; + } + } + + AMS_ABORT("Invalid thread passed to ThreadAllocator::Free"); + } + +} diff --git a/stratosphere/ams_mitm/source/sysupdater/sysupdater_thread_allocator.hpp b/stratosphere/ams_mitm/source/sysupdater/sysupdater_thread_allocator.hpp new file mode 100644 index 000000000..9c57570c6 --- /dev/null +++ b/stratosphere/ams_mitm/source/sysupdater/sysupdater_thread_allocator.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::mitm::sysupdater { + + struct ThreadInfo { + os::ThreadType *thread; + int priority; + void *stack; + size_t stack_size; + }; + + /* NOTE: Nintendo uses a util::BitArray, but this seems excessive. */ + class ThreadAllocator { + private: + os::ThreadType *thread_list; + const int thread_priority; + const int thread_count; + u8 *stack_heap; + const size_t stack_heap_size; + const size_t stack_size; + u64 bitmap; + os::SdkMutex mutex; + public: + constexpr ThreadAllocator(os::ThreadType *thread_list, int count, int priority, u8 *stack_heap, size_t stack_heap_size, size_t stack_size) + : thread_list(thread_list), thread_priority(priority), thread_count(count), stack_heap(stack_heap), stack_heap_size(stack_heap_size), stack_size(stack_size), bitmap() + { + AMS_ASSERT(count <= static_cast(stack_heap_size / stack_size)); + AMS_ASSERT(count <= static_cast(BITSIZEOF(this->bitmap))); + } + + Result Allocate(ThreadInfo *out); + void Free(const ThreadInfo &info); + }; + +} diff --git a/stratosphere/creport/source/creport_crash_report.cpp b/stratosphere/creport/source/creport_crash_report.cpp index 173d8c88f..01a468385 100644 --- a/stratosphere/creport/source/creport_crash_report.cpp +++ b/stratosphere/creport/source/creport_crash_report.cpp @@ -163,11 +163,11 @@ namespace ams::creport { svc::DebugEventInfo d; while (R_SUCCEEDED(svcGetDebugEvent(reinterpret_cast(&d), this->debug_handle))) { switch (d.type) { - case svc::DebugEvent_AttachProcess: - this->HandleDebugEventInfoAttachProcess(d); + case svc::DebugEvent_CreateProcess: + this->HandleDebugEventInfoCreateProcess(d); break; - case svc::DebugEvent_AttachThread: - this->HandleDebugEventInfoAttachThread(d); + case svc::DebugEvent_CreateThread: + this->HandleDebugEventInfoCreateThread(d); break; case svc::DebugEvent_Exception: this->HandleDebugEventInfoException(d); @@ -182,8 +182,8 @@ namespace ams::creport { this->crashed_thread.ReadFromProcess(this->debug_handle, this->thread_tls_map, this->crashed_thread_id, this->Is64Bit()); } - void CrashReport::HandleDebugEventInfoAttachProcess(const svc::DebugEventInfo &d) { - this->process_info = d.info.attach_process; + void CrashReport::HandleDebugEventInfoCreateProcess(const svc::DebugEventInfo &d) { + this->process_info = d.info.create_process; /* On 5.0.0+, we want to parse out a dying message from application crashes. */ if (hos::GetVersion() < hos::Version_5_0_0 || !IsApplication()) { @@ -217,9 +217,9 @@ namespace ams::creport { this->dying_message_size = userdata_size; } - void CrashReport::HandleDebugEventInfoAttachThread(const svc::DebugEventInfo &d) { + void CrashReport::HandleDebugEventInfoCreateThread(const svc::DebugEventInfo &d) { /* Save info on the thread's TLS address for later. */ - this->thread_tls_map[d.info.attach_thread.thread_id] = d.info.attach_thread.tls_address; + this->thread_tls_map[d.info.create_thread.thread_id] = d.info.create_thread.tls_address; } void CrashReport::HandleDebugEventInfoException(const svc::DebugEventInfo &d) { diff --git a/stratosphere/creport/source/creport_crash_report.hpp b/stratosphere/creport/source/creport_crash_report.hpp index 07174c33b..15858c02b 100644 --- a/stratosphere/creport/source/creport_crash_report.hpp +++ b/stratosphere/creport/source/creport_crash_report.hpp @@ -33,7 +33,7 @@ namespace ams::creport { std::map thread_tls_map; /* Attach process info. */ - svc::DebugInfoAttachProcess process_info = {}; + svc::DebugInfoCreateProcess process_info = {}; u64 dying_message_address = 0; u64 dying_message_size = 0; u8 *dying_message = nullptr; @@ -95,8 +95,8 @@ namespace ams::creport { private: void ProcessExceptions(); void ProcessDyingMessage(); - void HandleDebugEventInfoAttachProcess(const svc::DebugEventInfo &d); - void HandleDebugEventInfoAttachThread(const svc::DebugEventInfo &d); + void HandleDebugEventInfoCreateProcess(const svc::DebugEventInfo &d); + void HandleDebugEventInfoCreateThread(const svc::DebugEventInfo &d); void HandleDebugEventInfoException(const svc::DebugEventInfo &d); void SaveToFile(ScopedFile &file); diff --git a/stratosphere/dmnt/source/cheat/dmnt_cheat_service.hpp b/stratosphere/dmnt/source/cheat/dmnt_cheat_service.hpp index 35c7d2c7c..9a507fb3a 100644 --- a/stratosphere/dmnt/source/cheat/dmnt_cheat_service.hpp +++ b/stratosphere/dmnt/source/cheat/dmnt_cheat_service.hpp @@ -18,43 +18,42 @@ namespace ams::dmnt::cheat { - class CheatService final : public sf::IServiceObject { - private: - enum class CommandId { - /* Meta */ - HasCheatProcess = 65000, - GetCheatProcessEvent = 65001, - GetCheatProcessMetadata = 65002, - ForceOpenCheatProcess = 65003, - PauseCheatProcess = 65004, - ResumeCheatProcess = 65005, + /* TODO: In libstratosphere, eventually? */ + namespace impl { - /* Interact with Memory */ - GetCheatProcessMappingCount = 65100, - GetCheatProcessMappings = 65101, - ReadCheatProcessMemory = 65102, - WriteCheatProcessMemory = 65103, - QueryCheatProcessMemory = 65104, + #define AMS_DMNT_I_CHEAT_INTERFACE_INTERFACE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 65000, void, HasCheatProcess, (sf::Out out)) \ + AMS_SF_METHOD_INFO(C, H, 65001, void, GetCheatProcessEvent, (sf::OutCopyHandle out_event)) \ + AMS_SF_METHOD_INFO(C, H, 65002, Result, GetCheatProcessMetadata, (sf::Out out_metadata)) \ + AMS_SF_METHOD_INFO(C, H, 65003, Result, ForceOpenCheatProcess, ()) \ + AMS_SF_METHOD_INFO(C, H, 65004, Result, PauseCheatProcess, ()) \ + AMS_SF_METHOD_INFO(C, H, 65005, Result, ResumeCheatProcess, ()) \ + AMS_SF_METHOD_INFO(C, H, 65100, Result, GetCheatProcessMappingCount, (sf::Out out_count)) \ + AMS_SF_METHOD_INFO(C, H, 65101, Result, GetCheatProcessMappings, (const sf::OutArray &mappings, sf::Out out_count, u64 offset)) \ + AMS_SF_METHOD_INFO(C, H, 65102, Result, ReadCheatProcessMemory, (const sf::OutBuffer &buffer, u64 address, u64 out_size)) \ + AMS_SF_METHOD_INFO(C, H, 65103, Result, WriteCheatProcessMemory, (const sf::InBuffer &buffer, u64 address, u64 in_size)) \ + AMS_SF_METHOD_INFO(C, H, 65104, Result, QueryCheatProcessMemory, (sf::Out mapping, u64 address)) \ + AMS_SF_METHOD_INFO(C, H, 65200, Result, GetCheatCount, (sf::Out out_count)) \ + AMS_SF_METHOD_INFO(C, H, 65201, Result, GetCheats, (const sf::OutArray &cheats, sf::Out out_count, u64 offset)) \ + AMS_SF_METHOD_INFO(C, H, 65202, Result, GetCheatById, (sf::Out cheat, u32 cheat_id)) \ + AMS_SF_METHOD_INFO(C, H, 65203, Result, ToggleCheat, (u32 cheat_id)) \ + AMS_SF_METHOD_INFO(C, H, 65204, Result, AddCheat, (const CheatDefinition &cheat, sf::Out out_cheat_id, bool enabled)) \ + AMS_SF_METHOD_INFO(C, H, 65205, Result, RemoveCheat, (u32 cheat_id)) \ + AMS_SF_METHOD_INFO(C, H, 65206, Result, ReadStaticRegister, (sf::Out out, u8 which)) \ + AMS_SF_METHOD_INFO(C, H, 65207, Result, WriteStaticRegister, (u8 which, u64 value)) \ + AMS_SF_METHOD_INFO(C, H, 65208, Result, ResetStaticRegisters, ()) \ + AMS_SF_METHOD_INFO(C, H, 65300, Result, GetFrozenAddressCount, (sf::Out out_count)) \ + AMS_SF_METHOD_INFO(C, H, 65301, Result, GetFrozenAddresses, (const sf::OutArray &addresses, sf::Out out_count, u64 offset)) \ + AMS_SF_METHOD_INFO(C, H, 65302, Result, GetFrozenAddress, (sf::Out entry, u64 address)) \ + AMS_SF_METHOD_INFO(C, H, 65303, Result, EnableFrozenAddress, (sf::Out out_value, u64 address, u64 width)) \ + AMS_SF_METHOD_INFO(C, H, 65304, Result, DisableFrozenAddress, (u64 address)) - /* Interact with Cheats */ - GetCheatCount = 65200, - GetCheats = 65201, - GetCheatById = 65202, - ToggleCheat = 65203, - AddCheat = 65204, - RemoveCheat = 65205, - ReadStaticRegister = 65206, - WriteStaticRegister = 65207, - ResetStaticRegisters = 65208, + AMS_SF_DEFINE_INTERFACE(ICheatInterface, AMS_DMNT_I_CHEAT_INTERFACE_INTERFACE_INFO) - /* Interact with Frozen Addresses */ - GetFrozenAddressCount = 65300, - GetFrozenAddresses = 65301, - GetFrozenAddress = 65302, - EnableFrozenAddress = 65303, - DisableFrozenAddress = 65304, - }; - private: + } + + class CheatService final { + public: void HasCheatProcess(sf::Out out); void GetCheatProcessEvent(sf::OutCopyHandle out_event); Result GetCheatProcessMetadata(sf::Out out_metadata); @@ -83,38 +82,7 @@ namespace ams::dmnt::cheat { Result GetFrozenAddress(sf::Out entry, u64 address); Result EnableFrozenAddress(sf::Out out_value, u64 address, u64 width); Result DisableFrozenAddress(u64 address); - - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(HasCheatProcess), - MAKE_SERVICE_COMMAND_META(GetCheatProcessEvent), - MAKE_SERVICE_COMMAND_META(GetCheatProcessMetadata), - MAKE_SERVICE_COMMAND_META(ForceOpenCheatProcess), - MAKE_SERVICE_COMMAND_META(PauseCheatProcess), - MAKE_SERVICE_COMMAND_META(ResumeCheatProcess), - - MAKE_SERVICE_COMMAND_META(GetCheatProcessMappingCount), - MAKE_SERVICE_COMMAND_META(GetCheatProcessMappings), - MAKE_SERVICE_COMMAND_META(ReadCheatProcessMemory), - MAKE_SERVICE_COMMAND_META(WriteCheatProcessMemory), - MAKE_SERVICE_COMMAND_META(QueryCheatProcessMemory), - - MAKE_SERVICE_COMMAND_META(GetCheatCount), - MAKE_SERVICE_COMMAND_META(GetCheats), - MAKE_SERVICE_COMMAND_META(GetCheatById), - MAKE_SERVICE_COMMAND_META(ToggleCheat), - MAKE_SERVICE_COMMAND_META(AddCheat), - MAKE_SERVICE_COMMAND_META(RemoveCheat), - MAKE_SERVICE_COMMAND_META(ReadStaticRegister), - MAKE_SERVICE_COMMAND_META(WriteStaticRegister), - MAKE_SERVICE_COMMAND_META(ResetStaticRegisters), - - MAKE_SERVICE_COMMAND_META(GetFrozenAddressCount), - MAKE_SERVICE_COMMAND_META(GetFrozenAddresses), - MAKE_SERVICE_COMMAND_META(GetFrozenAddress), - MAKE_SERVICE_COMMAND_META(EnableFrozenAddress), - MAKE_SERVICE_COMMAND_META(DisableFrozenAddress), - }; }; + static_assert(impl::IsICheatInterface); } diff --git a/stratosphere/dmnt/source/cheat/impl/dmnt_cheat_debug_events_manager.cpp b/stratosphere/dmnt/source/cheat/impl/dmnt_cheat_debug_events_manager.cpp index 6a4ba7a39..5da5d609d 100644 --- a/stratosphere/dmnt/source/cheat/impl/dmnt_cheat_debug_events_manager.cpp +++ b/stratosphere/dmnt/source/cheat/impl/dmnt_cheat_debug_events_manager.cpp @@ -56,11 +56,11 @@ namespace ams::dmnt::cheat::impl { size_t target_core = NumCores - 1; /* Retrieve correct core for new thread event. */ - if (dbg_event.type == svc::DebugEvent_AttachThread) { + if (dbg_event.type == svc::DebugEvent_CreateThread) { u64 out64 = 0; u32 out32 = 0; - R_TRY_CATCH(svcGetDebugThreadParam(&out64, &out32, debug_handle, dbg_event.info.attach_thread.thread_id, DebugThreadParam_CurrentCore)) { + R_TRY_CATCH(svcGetDebugThreadParam(&out64, &out32, debug_handle, dbg_event.info.create_thread.thread_id, DebugThreadParam_CurrentCore)) { R_CATCH_RETHROW(svc::ResultProcessTerminated) } R_END_TRY_CATCH_WITH_ABORT_UNLESS; @@ -131,7 +131,7 @@ namespace ams::dmnt::cheat::impl { svc::DebugEventInfo d; size_t target_core = NumCores - 1; while (R_SUCCEEDED(svc::GetDebugEvent(std::addressof(d), cheat_dbg_hnd))) { - if (d.type == svc::DebugEvent_AttachThread) { + if (d.type == svc::DebugEvent_CreateThread) { R_TRY(GetTargetCore(std::addressof(target_core), d, cheat_dbg_hnd)); } } diff --git a/stratosphere/dmnt/source/dmnt_main.cpp b/stratosphere/dmnt/source/dmnt_main.cpp index 3839d4b9a..ff99db916 100644 --- a/stratosphere/dmnt/source/dmnt_main.cpp +++ b/stratosphere/dmnt/source/dmnt_main.cpp @@ -140,7 +140,7 @@ int main(int argc, char **argv) /* Create services. */ /* TODO: Implement rest of dmnt:- in ams.tma development branch. */ /* R_ABORT_UNLESS((g_server_manager.RegisterServer(DebugMonitorServiceName, DebugMonitorMaxSessions))); */ - R_ABORT_UNLESS((g_server_manager.RegisterServer(CheatServiceName, CheatMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(CheatServiceName, CheatMaxSessions))); /* Loop forever, servicing our services. */ /* Nintendo loops four threads processing on the manager -- we'll loop an extra fifth for our cheat service. */ diff --git a/stratosphere/dmnt/source/dmnt_service.hpp b/stratosphere/dmnt/source/dmnt_service.hpp index 80ba6de55..8372f6241 100644 --- a/stratosphere/dmnt/source/dmnt_service.hpp +++ b/stratosphere/dmnt/source/dmnt_service.hpp @@ -57,7 +57,8 @@ namespace ams::dmnt { static_assert(util::is_pod::value && sizeof(TargetIOFileHandle) == sizeof(u64), "TargetIOFileHandle"); - class DebugMonitorService final : public sf::IServiceObject { + /* TODO: Convert to new sf format in the future. */ + class DebugMonitorService final { private: enum class CommandId { BreakDebugProcess = 0, @@ -131,61 +132,6 @@ namespace ams::dmnt { Result TargetIO_FileSetSize(const sf::InBuffer &input, s64 size); Result TargetIO_FileDelete(const sf::InBuffer &path); Result TargetIO_FileMove(const sf::InBuffer &src_path, const sf::InBuffer &dst_path); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(BreakDebugProcess), - MAKE_SERVICE_COMMAND_META(TerminateDebugProcess), - MAKE_SERVICE_COMMAND_META(CloseHandle), - // MAKE_SERVICE_COMMAND_META(LoadImage), - MAKE_SERVICE_COMMAND_META(GetProcessId), - MAKE_SERVICE_COMMAND_META(GetProcessHandle), - MAKE_SERVICE_COMMAND_META(WaitSynchronization), - //MAKE_SERVICE_COMMAND_META(GetDebugEvent), - // MAKE_SERVICE_COMMAND_META(GetProcessModuleInfo), - // MAKE_SERVICE_COMMAND_META(GetProcessList), - // MAKE_SERVICE_COMMAND_META(GetThreadList), - // MAKE_SERVICE_COMMAND_META(GetDebugThreadContext), - // MAKE_SERVICE_COMMAND_META(ContinueDebugEvent), - // MAKE_SERVICE_COMMAND_META(ReadDebugProcessMemory), - // MAKE_SERVICE_COMMAND_META(WriteDebugProcessMemory), - // MAKE_SERVICE_COMMAND_META(SetDebugThreadContext), - // MAKE_SERVICE_COMMAND_META(GetDebugThreadParam), - // MAKE_SERVICE_COMMAND_META(InitializeThreadInfo), - // MAKE_SERVICE_COMMAND_META(SetHardwareBreakPoint), - // MAKE_SERVICE_COMMAND_META(QueryDebugProcessMemory), - // MAKE_SERVICE_COMMAND_META(GetProcessMemoryDetails), - // MAKE_SERVICE_COMMAND_META(AttachByProgramId), - // MAKE_SERVICE_COMMAND_META(AttachOnLaunch), - // MAKE_SERVICE_COMMAND_META(GetDebugMonitorProcessId), - // MAKE_SERVICE_COMMAND_META(GetJitDebugProcessList), - // MAKE_SERVICE_COMMAND_META(CreateCoreDump), - // MAKE_SERVICE_COMMAND_META(GetAllDebugThreadInfo), - MAKE_SERVICE_COMMAND_META(TargetIO_FileOpen), - MAKE_SERVICE_COMMAND_META(TargetIO_FileClose), - MAKE_SERVICE_COMMAND_META(TargetIO_FileRead), - MAKE_SERVICE_COMMAND_META(TargetIO_FileWrite), - MAKE_SERVICE_COMMAND_META(TargetIO_FileSetAttributes), - MAKE_SERVICE_COMMAND_META(TargetIO_FileGetInformation), - MAKE_SERVICE_COMMAND_META(TargetIO_FileSetTime), - MAKE_SERVICE_COMMAND_META(TargetIO_FileSetSize), - MAKE_SERVICE_COMMAND_META(TargetIO_FileDelete), - MAKE_SERVICE_COMMAND_META(TargetIO_FileMove), - // MAKE_SERVICE_COMMAND_META(TargetIO_DirectoryCreate), - // MAKE_SERVICE_COMMAND_META(TargetIO_DirectoryDelete), - // MAKE_SERVICE_COMMAND_META(TargetIO_DirectoryRename), - // MAKE_SERVICE_COMMAND_META(TargetIO_DirectoryGetCount), - // MAKE_SERVICE_COMMAND_META(TargetIO_DirectoryOpen), - // MAKE_SERVICE_COMMAND_META(TargetIO_DirectoryGetNext), - // MAKE_SERVICE_COMMAND_META(TargetIO_DirectoryClose), - // MAKE_SERVICE_COMMAND_META(TargetIO_GetFreeSpace), - // MAKE_SERVICE_COMMAND_META(TargetIO_GetVolumeInformation), - // MAKE_SERVICE_COMMAND_META(InitiateCoreDump), - // MAKE_SERVICE_COMMAND_META(ContinueCoreDump), - // MAKE_SERVICE_COMMAND_META(AddTTYToCoreDump), - // MAKE_SERVICE_COMMAND_META(AddImageToCoreDump), - // MAKE_SERVICE_COMMAND_META(CloseCoreDump), - // MAKE_SERVICE_COMMAND_META(CancelAttach), - }; }; } diff --git a/stratosphere/erpt/source/erpt_main.cpp b/stratosphere/erpt/source/erpt_main.cpp index f777a23ab..52823bb36 100644 --- a/stratosphere/erpt/source/erpt_main.cpp +++ b/stratosphere/erpt/source/erpt_main.cpp @@ -138,10 +138,7 @@ int main(int argc, char **argv) settings::system::GetSerialNumber(std::addressof(serial_number)); char os_private[0x60]; -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wformat-truncation" const auto os_priv_len = std::snprintf(os_private, sizeof(os_private), "%s (%.8s)", firmware_version.display_name, firmware_version.revision); -#pragma GCC diagnostic pop AMS_ASSERT(static_cast(os_priv_len) < sizeof(os_private)); R_ABORT_UNLESS(erpt::srv::SetSerialNumberAndOsVersion(serial_number.str, diff --git a/stratosphere/fatal/source/fatal_debug.cpp b/stratosphere/fatal/source/fatal_debug.cpp index 7f4188c55..6a45f589a 100644 --- a/stratosphere/fatal/source/fatal_debug.cpp +++ b/stratosphere/fatal/source/fatal_debug.cpp @@ -178,17 +178,17 @@ namespace ams::fatal::srv { /* First things first, check if process is 64 bits, and get list of thread infos. */ std::unordered_map thread_id_to_tls; { - bool got_attach_process = false; + bool got_create_process = false; svc::DebugEventInfo d; while (R_SUCCEEDED(svcGetDebugEvent(reinterpret_cast(&d), debug_handle.Get()))) { switch (d.type) { - case svc::DebugEvent_AttachProcess: - ctx->cpu_ctx.architecture = (d.info.attach_process.flags & 1) ? CpuContext::Architecture_Aarch64 : CpuContext::Architecture_Aarch32; - std::memcpy(ctx->proc_name, d.info.attach_process.name, sizeof(d.info.attach_process.name)); - got_attach_process = true; + case svc::DebugEvent_CreateProcess: + ctx->cpu_ctx.architecture = (d.info.create_process.flags & 1) ? CpuContext::Architecture_Aarch64 : CpuContext::Architecture_Aarch32; + std::memcpy(ctx->proc_name, d.info.create_process.name, sizeof(d.info.create_process.name)); + got_create_process = true; break; - case svc::DebugEvent_AttachThread: - thread_id_to_tls[d.info.attach_thread.thread_id] = d.info.attach_thread.tls_address; + case svc::DebugEvent_CreateThread: + thread_id_to_tls[d.info.create_thread.thread_id] = d.info.create_thread.tls_address; break; case svc::DebugEvent_Exception: case svc::DebugEvent_ExitProcess: @@ -197,7 +197,7 @@ namespace ams::fatal::srv { } } - if (!got_attach_process) { + if (!got_create_process) { return; } } diff --git a/stratosphere/fatal/source/fatal_main.cpp b/stratosphere/fatal/source/fatal_main.cpp index f1708ba51..3b576347b 100644 --- a/stratosphere/fatal/source/fatal_main.cpp +++ b/stratosphere/fatal/source/fatal_main.cpp @@ -142,6 +142,9 @@ namespace { int main(int argc, char **argv) { + /* Disable auto-abort in fs operations. */ + fs::SetEnabledAutoAbort(false); + /* Set thread name. */ os::SetThreadNamePointer(os::GetCurrentThread(), AMS_GET_SYSTEM_THREAD_NAME(fatal, Main)); AMS_ASSERT(os::GetThreadPriority(os::GetCurrentThread()) == AMS_GET_SYSTEM_THREAD_PRIORITY(fatal, Main)); @@ -153,11 +156,10 @@ int main(int argc, char **argv) fatal::srv::CheckRepairStatus(); /* Create services. */ - R_ABORT_UNLESS((g_server_manager.RegisterServer(PrivateServiceName, PrivateMaxSessions))); - R_ABORT_UNLESS((g_server_manager.RegisterServer(UserServiceName, UserMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(PrivateServiceName, PrivateMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(UserServiceName, UserMaxSessions))); /* Add dirty event holder. */ - /* TODO: s_server_manager.AddWaitable(ams::fatal::srv::GetFatalDirtyEvent()); */ auto *dirty_event_holder = ams::fatal::srv::GetFatalDirtyWaitableHolder(); g_server_manager.AddUserWaitableHolder(dirty_event_holder); diff --git a/stratosphere/fatal/source/fatal_service.cpp b/stratosphere/fatal/source/fatal_service.cpp index 0e86883b6..5c0744c35 100644 --- a/stratosphere/fatal/source/fatal_service.cpp +++ b/stratosphere/fatal/source/fatal_service.cpp @@ -133,15 +133,15 @@ namespace ams::fatal::srv { return g_context.ThrowFatalWithPolicy(result, os::GetCurrentProcessId(), FatalPolicy_ErrorScreen); } - Result UserService::ThrowFatal(Result result, const sf::ClientProcessId &client_pid) { + Result Service::ThrowFatal(Result result, const sf::ClientProcessId &client_pid) { return g_context.ThrowFatal(result, client_pid.GetValue()); } - Result UserService::ThrowFatalWithPolicy(Result result, const sf::ClientProcessId &client_pid, FatalPolicy policy) { + Result Service::ThrowFatalWithPolicy(Result result, const sf::ClientProcessId &client_pid, FatalPolicy policy) { return g_context.ThrowFatalWithPolicy(result, client_pid.GetValue(), policy); } - Result UserService::ThrowFatalWithCpuContext(Result result, const sf::ClientProcessId &client_pid, FatalPolicy policy, const CpuContext &cpu_ctx) { + Result Service::ThrowFatalWithCpuContext(Result result, const sf::ClientProcessId &client_pid, FatalPolicy policy, const CpuContext &cpu_ctx) { return g_context.ThrowFatalWithCpuContext(result, client_pid.GetValue(), policy, cpu_ctx); } diff --git a/stratosphere/fatal/source/fatal_service.hpp b/stratosphere/fatal/source/fatal_service.hpp index 04c707e0a..ca20c5e9f 100644 --- a/stratosphere/fatal/source/fatal_service.hpp +++ b/stratosphere/fatal/source/fatal_service.hpp @@ -18,39 +18,19 @@ namespace ams::fatal::srv { - class UserService final : public sf::IServiceObject { - private: - enum class CommandId { - ThrowFatal = 0, - ThrowFatalWithPolicy = 1, - ThrowFatalWithCpuContext = 2, - }; - private: - /* Actual commands. */ + class Service final { + public: Result ThrowFatal(Result error, const sf::ClientProcessId &client_pid); Result ThrowFatalWithPolicy(Result error, const sf::ClientProcessId &client_pid, FatalPolicy policy); Result ThrowFatalWithCpuContext(Result error, const sf::ClientProcessId &client_pid, FatalPolicy policy, const CpuContext &cpu_ctx); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(ThrowFatal), - MAKE_SERVICE_COMMAND_META(ThrowFatalWithPolicy), - MAKE_SERVICE_COMMAND_META(ThrowFatalWithCpuContext), - }; }; + static_assert(fatal::impl::IsIService); - class PrivateService final : public sf::IServiceObject { - private: - enum class CommandId { - GetFatalEvent = 0, - }; - private: - /* Actual commands. */ - Result GetFatalEvent(sf::OutCopyHandle out_h); + class PrivateService final { public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetFatalEvent), - }; + Result GetFatalEvent(sf::OutCopyHandle out_h); }; + static_assert(fatal::impl::IsIPrivateService); } diff --git a/stratosphere/lm/source/detail/lm_log_packet.cpp b/stratosphere/lm/source/detail/lm_log_packet.cpp new file mode 100644 index 000000000..0666b7f05 --- /dev/null +++ b/stratosphere/lm/source/detail/lm_log_packet.cpp @@ -0,0 +1,97 @@ +#include "lm_log_packet.hpp" + +namespace ams::lm::detail { + + LogPacketTransmitterBase::LogPacketTransmitterBase(u8 *log_buffer, size_t log_buffer_size, LogPacketFlushFunction flush_fn, LogSeverity severity, bool verbosity, u64 process_id, bool head, bool tail) { + AMS_ABORT_UNLESS(log_buffer != nullptr); + // TODO: check log_buffer align == alignof(LogPacketHeader) + AMS_ABORT_UNLESS(log_buffer_size > sizeof(LogPacketHeader)); + AMS_ABORT_UNLESS(flush_fn != nullptr); + + this->header = reinterpret_cast(log_buffer); + this->log_buffer_start = log_buffer; + this->log_buffer_end = log_buffer + log_buffer_size; + this->log_buffer_payload_start = log_buffer + sizeof(LogPacketHeader); + this->log_buffer_payload_current = log_buffer + sizeof(LogPacketHeader); + this->is_tail = tail; + this->flush_function = flush_fn; + this->header->SetProcessId(process_id); + + auto current_thread = os::GetCurrentThread(); + auto thread_id = os::GetThreadId(current_thread); + this->header->SetThreadId(thread_id); + + this->header->SetHead(head); + this->header->SetLittleEndian(true); + this->header->SetSeverity(severity); + this->header->SetVerbosity(verbosity); + } + + bool LogPacketTransmitterBase::Flush(bool tail) { + if (this->log_buffer_payload_current == this->log_buffer_payload_start) { + /* Nothing to log. */ + return true; + } + + this->header->SetTail(tail); + this->header->SetPayloadSize(static_cast(this->log_buffer_payload_current - this->log_buffer_payload_start)); + + auto ret = (this->flush_function)(this->log_buffer_start, static_cast(this->log_buffer_payload_current - this->log_buffer_start)); + + this->header->SetHead(false); + this->log_buffer_payload_current = this->log_buffer_payload_start; + return ret; + } + + void LogPacketTransmitterBase::PushDataChunkImpl(LogDataChunkKey key, const void *data, size_t data_size, bool is_string) { + auto start_position = this->log_buffer_payload_current; + auto data_start = reinterpret_cast(data); + auto data_end = data_start + size; + this->PushUleb128(key); + auto unk = this->log_buffer_payload_current + 2; // ? + do { + auto left_data = this->log_buffer_end - this->log_buffer_payload_current; + if (left_data >= unk) { + // A + } + + if (this->log_buffer_payload_current != this->log_buffer_payload_start) { + this->Flush(false); + } + + if (left_data >= unk) { + // A + } + // ... + if (data_end - data_start) >= x) { + z = x; + } + else { + z = static_cast(data_end - data_start); + } + if (is_string) { + auto some_size = z; + auto utf8_size = 0; // nn::diag::detail::GetValidSizeAsUtf8String(...); + if (utf8_size >= 0) { + some_size = utf8_size; + } + d = some_size; + } + else { + d = z; + } + } while(a < data_end); + } + + void LogPacketTransmitterBase::PushData(const void *data, size_t data_size) { + AMS_ABORT_UNLESS(data != nullptr); + AMS_ABORT_UNLESS(data_size > this->GetRemainSize()); + if (data_size == 0) { + return; + } + + std::memcpy(this->log_buffer_payload_current, data, data_size); + this->log_buffer_payload_current += data_size; + } + +} \ No newline at end of file diff --git a/stratosphere/lm/source/detail/lm_log_packet.hpp b/stratosphere/lm/source/detail/lm_log_packet.hpp new file mode 100644 index 000000000..1ff46b10a --- /dev/null +++ b/stratosphere/lm/source/detail/lm_log_packet.hpp @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::lm::detail { + + enum LogPacketHeaderFlag { + LogPacketHeaderFlag_Head = (1 << 0), + LogPacketHeaderFlag_Tail = (1 << 1), + LogPacketHeaderFlag_LittleEndian = (1 << 2), + }; + + enum LogSeverity : u8 { + LogSeverity_Trace = 0, + LogSeverity_Info = 1, + LogSeverity_Warn = 2, + LogSeverity_Error = 3, + LogSeverity_Fatal = 4 + }; + + struct LogBinaryHeader { + static constexpr u32 Magic = util::FourCC<'h','p','h','p'>::Code; + + u32 magic; + u8 version; + u8 pad[3]; + }; + + struct LogPacketHeader { + u64 process_id; + u64 thread_id; + u8 flags; + u8 pad; + LogSeverity severity; + bool verbosity; + u32 payload_size; + + void SetProcessId(u64 process_id) { + this->process_id = process_id; + } + + void SetThreadId(u64 thread_id) { + this->thread_id = thread_id; + } + + void SetLittleEndian(bool le) { + if (le) { + this->flags |= LogPacketHeaderFlag_LittleEndian; + } + else { + this->flags &= ~LogPacketHeaderFlag_LittleEndian; + } + } + + void SetSeverity(LogSeverity severity) { + this->severity = severity; + } + + void SetVerbosity(bool verbosity) { + this->verbosity = verbosity; + } + + void SetHead(bool head) { + if (head) { + this->flags |= LogPacketHeaderFlag_Head; + } + else { + this->flags &= ~LogPacketHeaderFlag_Head; + } + } + + void SetTail(bool tail) { + if (tail) { + this->flags |= LogPacketHeaderFlag_Tail; + } + else { + this->flags &= ~LogPacketHeaderFlag_Tail; + } + } + + void SetPayloadSize(u32 size) { + this->payload_size = size; + } + + u32 GetPayloadSize() { + return this->payload_size; + } + + }; + static_assert(sizeof(LogPacketHeader) == 0x18); + + enum LogDataChunkKey : u8 { + LogDataChunkKey_LogSessionBegin = 0, ///< Log session begin (unknown) + LogDataChunkKey_LogSessionEnd = 1, ///< Log session end (unknown) + LogDataChunkKey_TextLog = 2, ///< Text to be logged. + LogDataChunkKey_LineNumber = 3, ///< Source line number. + LogDataChunkKey_FileName = 4, ///< Source file name. + LogDataChunkKey_FunctionName = 5, ///< Source function name. + LogDataChunkKey_ModuleName = 6, ///< Process module name. + LogDataChunkKey_ThreadName = 7, ///< Process thread name. + LogDataChunkKey_LogPacketDropCount = 8, ///< Log packet drop count (unknown) + LogDataChunkKey_UserSystemClock = 9, ///< User system clock (unknown) + LogDataChunkKey_ProcessName = 10, ///< Process name. + }; + + struct LogDataChunkHeader { + LogDataChunkKey chunk_key; + u8 chunk_len; + }; + + using LogPacketFlushFunction = bool(*)(void*, size_t); + + class LogPacketTransmitterBase { + private: + LogPacketHeader *header; + u8 *log_buffer_start; + u8 *log_buffer_end; + u8 *log_buffer_payload_start; + u8 *log_buffer_payload_current; + bool is_tail; + LogPacketFlushFunction flush_function; + public: + LogPacketTransmitterBase(u8 *log_buffer, size_t log_buffer_size, LogPacketFlushFunction flush_fn, LogSeverity severity, bool verbosity, u64 process_id, bool head, bool tail); + + ~LogPacketTransmitterBase() { + this->Flush(this->is_tail); + } + + void PushData(const void *data, size_t data_size); + bool Flush(bool tail); + void PushDataChunkImpl(LogDataChunkKey key, const void *data, size_t data_size, bool is_string); + + void PushUleb128(u64 value) { + AMS_ASSERT(this->log_buffer_payload_current != this->log_buffer_end); + do { + u8 byte = value & 0x7F; + value >>= 7; + + if(value != 0) { + byte |= 0x80; + } + + *this->log_buffer_payload_current = byte; + this->log_buffer_payload_current += sizeof(byte); + } while(value != 0); + } + + void PushDataChunk(LogDataChunkKey key, const void *data, size_t data_size) { + return this->PushDataChunkImpl(key, data, data_size, false); + } + + void PushDataChunk(LogDataChunkKey key, const char *str, size_t str_len) { + return this->PushDataChunkImpl(key, str, str_len, true); + } + + size_t GetRemainSize() { + return static_cast(this->log_buffer_end - this->log_buffer_payload_current); + } + + size_t GetRequiredSizeToPushUleb128() { + // TODO + return 0; + } + }; + + class LogPacketTransmitter : public LogPacketTransmitterBase { + public: + using LogPacketTransmitterBase::LogPacketTransmitterBase; + + void PushLogSessionBegin() { + u8 dummy_value = 1; + this->PushDataChunk(LogDataChunkKey_LogSessionBegin, &dummy_value, sizeof(dummy_value)); + } + + void PushLogSessionEnd() { + u8 dummy_value = 1; + this->PushDataChunk(LogDataChunkKey_LogSessionEnd, &dummy_value, sizeof(dummy_value)); + } + + void PushTextLog(const char *log, size_t log_len) { + this->PushDataChunk(LogDataChunkKey_TextLog, log, log_len); + } + + void PushLineNumber(u32 line_no) { + this->PushDataChunk(LogDataChunkKey_LineNumber, &line_no, sizeof(line_no)); + } + + void PushFileName(const char *name, size_t name_len) { + this->PushDataChunk(LogDataChunkKey_FileName, name, name_len); + } + + void PushFunctionName(const char *name, size_t name_len) { + this->PushDataChunk(LogDataChunkKey_FunctionName, name, name_len); + } + + void PushModuleName(const char *name, size_t name_len) { + this->PushDataChunk(LogDataChunkKey_ModuleName, name, name_len); + } + + void PushThreadName(const char *name, size_t name_len) { + this->PushDataChunk(LogDataChunkKey_ThreadName, name, name_len); + } + + void PushLogPacketDropCount(u64 count) { + this->PushDataChunk(LogDataChunkKey_LogPacketDropCount, &count, sizeof(count)); + } + + void PushUserSystemClock(u64 usc) { + this->PushDataChunk(LogDataChunkKey_UserSystemClock, &usc, sizeof(usc)); + } + + void PushProcessName(const char *name, size_t name_len) { + this->PushDataChunk(LogDataChunkKey_ProcessName, name, name_len); + } + }; + +} \ No newline at end of file diff --git a/stratosphere/lm/source/impl/lm_a_logger.cpp b/stratosphere/lm/source/impl/lm_a_logger.cpp new file mode 100644 index 000000000..b192a2157 --- /dev/null +++ b/stratosphere/lm/source/impl/lm_a_logger.cpp @@ -0,0 +1,164 @@ +#include "lm_a_logger.hpp" +#include "../detail/lm_log_packet.hpp" + +namespace ams::lm::impl { + + namespace { + + bool PrepareLogFile(char *out_path, size_t len, const char *log_dir) { + char log_file[0x80] = {}; + auto time_point = time::StandardUserSystemClock::now(); + const auto posix_time = time::StandardUserSystemClock::to_time_t(time_point); + if (posix_time != 0) { + auto local_time = std::localtime(&posix_time); + + settings::system::SerialNumber serial_number; + settings::system::GetSerialNumber(&serial_number); + /* Log file format: sdmc://_[_].nxbinlog */ + /* The extra index is used when multiple logs are logged at the same time */ + const auto log_file_len = static_cast(snprintf(log_file, sizeof(log_file), "%s:/%s/%s_%04d%02d%02d%02d%02d%02d", "sdmc", log_dir, serial_number.str, local_time->tm_year + 1900, local_time->tm_mon + 1, local_time->tm_mday, local_time->tm_hour, local_time->tm_min, local_time->tm_sec)); + if (log_file_len <= sizeof(log_dir)) { + auto tmp_index = 1; + while(true) { + /* Only add the index if we couldn't create the default file name. */ + if (tmp_index == 1) { + const auto log_file_ext_len = static_cast(snprintf(out_path, len, "%s.%s", log_file, "nxbinlog")); + if (log_file_ext_len >= len) { + return false; + } + } + else { + const auto log_file_ext_len = static_cast(snprintf(out_path, len, "%s_%d.%s", log_file, tmp_index, "nxbinlog")); + if (log_file_ext_len >= len) { + return false; + } + } + + auto rc = fs::CreateFile(out_path, 0); + if (R_SUCCEEDED(rc)) { + break; + } + else if (fs::ResultPathAlreadyExists::Includes(rc) && (++tmp_index < 100)) { + /* If the log file already exists (multiple logs at the same time), retry over 100 times and increase the index */ + continue; + } + + return false; + } + return true; + } + } + return false; + } + + inline bool IsSdCardLoggingEnabled() { + bool logging_enabled = false; + auto enable_logging_size = settings::fwdbg::GetSettingsItemValue(&logging_enabled, sizeof(logging_enabled), "lm", "enable_sd_card_logging"); + return (enable_logging_size == sizeof(logging_enabled)) && logging_enabled; + } + + inline bool EnsureLogDirectory(const char *log_dir) { + /* Try to create the log directory. */ + /* If the directory already exists, there's no issue. */ + const auto result = fs::CreateDirectory(log_dir); + return R_SUCCEEDED(result) || fs::ResultPathAlreadyExists::Includes(result); + } + + } + + void ALogger::Dispose() { + if (this->some_flag) { + this->some_flag = false; + this->CallSomeFunction(false); + } + if (this->sd_card_mounted) { + fs::Unmount("sdmc"); + this->sd_card_mounted = false; + } + } + + bool ALogger::EnsureLogFile() { + if (this->some_flag) { + return true; + } + + bool unk_1 = false; + bool unk_2 = false; /* TODO: what are these? */ + if (unk_1) { + this->some_flag_3 = false; + } + + if (unk_2) { + if (!this->some_flag_3 && R_SUCCEEDED(fs::MountSdCard("sdmc"))) { + this->sd_card_mounted = true; + auto log_dir_size = settings::fwdbg::GetSettingsItemValueSize("lm", "sd_card_log_output_directory"); + char log_dir[0x80] = {}; + if (log_dir_size <= sizeof(log_dir)) { + if (settings::fwdbg::GetSettingsItemValue(log_dir, sizeof(log_dir), "lm", "sd_card_log_output_directory") == log_dir_size) { + char log_dir_path[0x80] = {}; + const auto log_dir_path_len = static_cast(snprintf(log_dir_path, sizeof(log_dir_path), "%s:/%s", "sdmc", log_dir)); + if (log_dir_path_len < sizeof(log_dir_path)) { + if (EnsureLogDirectory(log_dir_path)) { + if (PrepareLogFile(this->log_file_path, sizeof(this->log_file_path), log_dir_path)) { + fs::FileHandle log_file_h; + if (R_SUCCEEDED(fs::OpenFile(&log_file_h, this->log_file_path, fs::OpenMode_Write | fs::OpenMode_AllowAppend))) { + ON_SCOPE_EXIT { fs::CloseFile(log_file_h); }; + + /* 8-byte header, current version is 1 */ + detail::LogBinaryHeader bin_header = { detail::LogBinaryHeader::Magic, 1 }; + if (R_SUCCEEDED(fs::WriteFile(log_file_h, 0, &bin_header, sizeof(bin_header), fs::WriteOption::Flush))) { + this->log_file_offset = sizeof(bin_header); + return true; + } + } + } + } + } + } + } + } + } + + return false; + } + + bool ALogger::SaveLog(void *buf, size_t size) { + static bool should_log = IsSdCardLoggingEnabled(); + auto ret_val = false; + if (!should_log) { + return ret_val; + } + + if (this->EnsureLogFile()) { + fs::FileHandle log_file_h; + if(R_SUCCEEDED(fs::OpenFile(&log_file_h, this->log_file_path, fs::OpenMode_Write | fs::OpenMode_AllowAppend))) { + ON_SCOPE_EXIT { fs::CloseFile(log_file_h); }; + /* Write log data. */ + if (R_SUCCEEDED(fs::WriteFile(log_file_h, this->log_file_offset, buf, size, fs::WriteOption::Flush))) { + this->log_file_offset += size; + ret_val = true; + if (this->some_flag) { + return ret_val; + } + } + } + } + if (this->sd_card_mounted) { + fs::Unmount("sdmc"); + this->sd_card_mounted = false; + this->some_flag_3 = true; + // loc_71000388E0 (something related to SD card event notifier) + } + if (this->some_flag) { + this->some_flag = ret_val; + this->CallSomeFunction(ret_val); + } + return ret_val; + } + + ALogger *GetALogger() { + static ALogger a_logger; + return &a_logger; + } + +} \ No newline at end of file diff --git a/stratosphere/lm/source/impl/lm_a_logger.hpp b/stratosphere/lm/source/impl/lm_a_logger.hpp new file mode 100644 index 000000000..a36e22855 --- /dev/null +++ b/stratosphere/lm/source/impl/lm_a_logger.hpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::lm::impl { + + using ALoggerSomeFunction = void(*)(bool); + + class ALogger { + private: + os::SdkMutex sdk_mutex; + bool some_flag; + bool sd_card_mounted; + bool some_flag_3; + char log_file_path[128]; + bool some_flag_4; + size_t log_file_offset; + ALoggerSomeFunction some_fn; + size_t unk; + public: + ALogger() : sdk_mutex(), some_flag(false), sd_card_mounted(false), some_flag_3(false), log_file_path{}, some_flag_4(false), log_file_offset(0), some_fn(nullptr), unk(0) {} + + void Dispose(); + + bool EnsureLogFile(); + bool SaveLog(void *buf, size_t size); + + void SetSomeFunction(ALoggerSomeFunction fn) { + std::scoped_lock lk(this->sdk_mutex); + this->some_fn = fn; + } + + void CallSomeFunction(bool flag) { + std::scoped_lock lk(this->sdk_mutex); + if(this->some_fn != nullptr) { + (this->some_fn)(flag); + } + } + }; + + ALogger *GetALogger(); + +} \ No newline at end of file diff --git a/stratosphere/lm/source/impl/lm_b_logger.cpp b/stratosphere/lm/source/impl/lm_b_logger.cpp new file mode 100644 index 000000000..36e76d6ce --- /dev/null +++ b/stratosphere/lm/source/impl/lm_b_logger.cpp @@ -0,0 +1,66 @@ +#include "lm_b_logger.hpp" + +namespace ams::lm::impl { + + namespace { + + bool BLoggerFlushFunction(void *log_data, size_t size) { + /* GetCLogger()->Log(log_data, size); */ + return true; + } + + } + + bool BLogger::SendLogSessionBeginPacket(u64 process_id) { + /* ------------ Log buffer size: packet header (0x18) + chunk header (0x2) + LogSessionBegin dummy value (0x1) = 0x1B */ + constexpr auto log_buffer_size = sizeof(detail::LogPacketHeader) + sizeof(detail::LogDataChunkHeader) + sizeof(u8); + u8 log_buffer[log_buffer_size] = {}; + detail::LogPacketTransmitter log_packet_transmitter(log_buffer, log_buffer_size, this->flush_fn, detail::LogSeverity_Info, false, process_id, true, true); + + log_packet_transmitter.PushLogSessionBegin(); + const auto ok = log_packet_transmitter.Flush(true); + if (!ok) { + this->IncrementPacketDropCount(); + } + + return ok; + } + + bool BLogger::SendLogSessionEndPacket(u64 process_id) { + /* ------------ Log buffer size: packet header (0x18) + chunk header (0x2) + LogSessionEnd dummy value (0x1) = 0x1B */ + constexpr auto log_buffer_size = sizeof(detail::LogPacketHeader) + sizeof(detail::LogDataChunkHeader) + sizeof(u8); + u8 log_buffer[log_buffer_size] = {}; + detail::LogPacketTransmitter log_packet_transmitter(log_buffer, log_buffer_size, this->flush_fn, detail::LogSeverity_Info, false, process_id, true, true); + + log_packet_transmitter.PushLogSessionEnd(); + const auto ok = log_packet_transmitter.Flush(true); + if (!ok) { + this->IncrementPacketDropCount(); + } + + return ok; + } + + bool BLogger::SendLogPacketDropCountPacket() { + std::scoped_lock lk(this->packet_drop_count_lock); + + /* ------------ Log buffer size: packet header (0x18) + chunk header (0x2) + log packet drop count (0x8) = 0x22 */ + constexpr auto log_buffer_size = sizeof(detail::LogPacketHeader) + sizeof(detail::LogDataChunkHeader) + sizeof(this->packet_drop_count); + u8 log_buffer[log_buffer_size] = {}; + detail::LogPacketTransmitter log_packet_transmitter(log_buffer, log_buffer_size, this->flush_fn, detail::LogSeverity_Info, false, 0, true, true); + + log_packet_transmitter.PushLogPacketDropCount(this->packet_drop_count); + const auto ok = log_packet_transmitter.Flush(true); + if (!ok) { + this->IncrementPacketDropCount(); + } + + return ok; + } + + BLogger *GetBLogger() { + static BLogger b_logger(BLoggerFlushFunction); + return &b_logger; + } + +} \ No newline at end of file diff --git a/stratosphere/lm/source/impl/lm_b_logger.hpp b/stratosphere/lm/source/impl/lm_b_logger.hpp new file mode 100644 index 000000000..99debddbd --- /dev/null +++ b/stratosphere/lm/source/impl/lm_b_logger.hpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "../detail/lm_log_packet.hpp" + +namespace ams::lm::impl { + + using BLoggerSomeFunction = bool(*)(void*, size_t); + + class BLogger { + private: + detail::LogPacketFlushFunction flush_fn; + u64 packet_drop_count; + os::SdkMutex packet_drop_count_lock; + public: + BLogger(detail::LogPacketFlushFunction flush_fn) : flush_fn(flush_fn), packet_drop_count(0), packet_drop_count_lock() {} + + void IncrementPacketDropCount() { + std::scoped_lock lk(this->packet_drop_count_lock); + this->packet_drop_count++; + } + + bool SendLogSessionBeginPacket(u64 process_id); + bool SendLogSessionEndPacket(u64 process_id); + bool SendLogPacketDropCountPacket(); + }; + + BLogger *GetBLogger(); + +} \ No newline at end of file diff --git a/stratosphere/lm/source/impl/lm_c_logger.cpp b/stratosphere/lm/source/impl/lm_c_logger.cpp new file mode 100644 index 000000000..f7fecd5ef --- /dev/null +++ b/stratosphere/lm/source/impl/lm_c_logger.cpp @@ -0,0 +1,81 @@ +#include "lm_c_logger.hpp" +#include "lm_a_logger.hpp" + +namespace ams::lm::impl { + + u8 g_logger_data_buffer[0x20000]; + + namespace { + + bool g_some_flag = false; + + bool SendDataOverHtcs() { + return false; + } + + inline bool LogToSdCardAndHtcs(void *data, size_t size) { + return SendDataOverHtcs() && GetALogger()->SaveLog(data, size); + } + + bool LogFunction(void *data, size_t size) { + if (!g_some_flag) { + time::PosixTime posix_time; + if (R_SUCCEEDED(time::StandardUserSystemClock::GetCurrentTime(&posix_time))) { + /* TODO: what time conversions are done here? */ + /* TODO: what's done here? */ + } + g_some_flag = true; + } + return LogToSdCardAndHtcs(data, size); + } + + } + + bool CLogger::Log(const void *data, size_t data_size) { + if (data_size == 0) { + return true; + } + + this->sdk_mutex_1.Lock(); + if (this->CopyLogData(data, data_size)) { + return true; + } + auto tmp_flag = this->unk_flag; + + while(true) { + this->unk_flag++; + this->condvar_1.Wait(this->sdk_mutex_1); + auto flag = this->some_flag; + this->unk_flag--; + if (flag) { + break; + } + if (this->CopyLogData(data, data_size)) { + return true; + } + } + if (tmp_flag == 0) { + this->some_flag = false; + } + this->sdk_mutex_1.Unlock(); + return false; + } + + bool CLogger::SomeMemberFn() { + std::scoped_lock lk(this->sdk_mutex_2); + if (this->cur_offset == 0) { + this->sdk_mutex_1.Lock(); + do { + do { + this->condvar_2.Wait(this->sdk_mutex_1); + } while(this->cur_offset == 0); + } while(this->some_refcount != 0); + } + } + + CLogger *GetCLogger() { + static CLogger c_logger(g_logger_data_buffer, sizeof(g_logger_data_buffer), LogFunction); + return &c_logger; + } + +} \ No newline at end of file diff --git a/stratosphere/lm/source/impl/lm_c_logger.hpp b/stratosphere/lm/source/impl/lm_c_logger.hpp new file mode 100644 index 000000000..318d71eb8 --- /dev/null +++ b/stratosphere/lm/source/impl/lm_c_logger.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::lm::impl { + + using CLoggerLogFunction = bool(*)(void*, size_t); + + class CLogger { + private: + void *data_buffer; + size_t cur_offset; + uint64_t some_refcount; + void *unk_ref_2; + uint64_t unk_3; + uint64_t unk_4; + CLogger *self_ref; + void *unk_ref_3; + size_t data_buffer_size; + CLoggerLogFunction log_fn; + os::SdkMutex sdk_mutex_1; + os::SdkMutex sdk_mutex_2; + os::SdkConditionVariable condvar_1; + os::SdkConditionVariable condvar_2; + bool some_flag; + uint8_t pad[7]; + uint64_t unk_flag; + public: + CLogger(void *data_buffer, size_t data_buffer_size, CLoggerLogFunction log_fn) : data_buffer(data_buffer), unk_ref_2(/*?*/ nullptr), unk_ref_3(/*?*/ nullptr), data_buffer_size(data_buffer_size), log_fn(log_fn) {} + + bool Log(const void *data, size_t data_size); + bool SomeMemberFn(); + + inline bool CopyLogData(const void *data, size_t data_size) { + if ((this->data_buffer_size - this->cur_offset) >= data_size) { + auto out_buf = reinterpret_cast(this->data_buffer) + this->cur_offset; + this->cur_offset += data_size; + ++this->some_refcount; + this->sdk_mutex_1.Unlock(); + std::memcpy(out_buf, data, data_size); + this->sdk_mutex_1.Lock(); + if((--this->some_refcount) == 0) { + this->condvar_2.Signal(); + } + this->sdk_mutex_1.Unlock(); + return true; + } + return false; + } + }; + + CLogger *GetCLogger(); + +} \ No newline at end of file diff --git a/stratosphere/lm/source/impl/lm_d_logger.cpp b/stratosphere/lm/source/impl/lm_d_logger.cpp new file mode 100644 index 000000000..0e5948c12 --- /dev/null +++ b/stratosphere/lm/source/impl/lm_d_logger.cpp @@ -0,0 +1,88 @@ +#include "lm_d_logger.hpp" + +namespace ams::lm::impl { + + u8 g_log_getter_log_buffer[0x8000]; + void *g_log_getter_log_data = nullptr; + size_t g_log_getter_log_size = 0; + u64 g_log_getter_packet_drop_count = 0; + + namespace { + + bool OnLog(void *data, size_t size) { + /* */ + g_log_getter_log_data = data; + g_log_getter_log_size = size; + return true; + } + + } + + bool DLogger::HasLogData() { + if (this->cur_offset == 0) { + /* If we have log data to be collected, offset cannot be zero. */ + return false; + } + + /* Update log pointer and size with our values. */ + const auto ok = (this->some_fn)(this->log_data_buffer, this->cur_offset); + if (!ok) { + /* Failed to update them. */ + return false; + } + + /* Reset the offset, ready to receive new data later. */ + this->cur_offset = 0; + return true; + } + + bool DLogger::Log(void *log_data, size_t size) { + if (size > 0) { + if ((this->size - this->cur_offset) < size) { + return false; + } + + std::memcpy(reinterpret_cast(this->log_data_buffer) + this->cur_offset, log_data, size); + this->cur_offset += size; + } + return true; + } + + DLogger *GetDLogger() { + static DLogger d_logger(g_log_getter_log_buffer, sizeof(g_log_getter_log_buffer), OnLog); + return &d_logger; + } + + bool DLoggerLogGetterWriteFunction(void *log_data, size_t size, u64 unk_param) { + const auto ok = GetDLogger()->Log(log_data, size); + if (!ok) { + /* Logging failed, thus increase packet drop count. */ + g_log_getter_packet_drop_count++; + } + + return true; + } + + void WriteLogToLogGetter(const void *log_data, size_t size, LogGetterWriteFunction write_fn, u64 unk_param) { + // TODO + } + + size_t ReadLogFromLogGetter(void *out_log_data, size_t size, u64 *out_packet_drop_count) { + /* Copy log data from the global pointer and size values. */ + auto read_size = g_log_getter_log_size; + if (g_log_getter_log_size > size) { + /* Buffer can't hold all the packet, thus increase the drop count. */ + read_size = size; + g_log_getter_packet_drop_count++; + } + + /* Copy data. */ + std::memcpy(out_log_data, g_log_getter_log_data, read_size); + + /* Get the packet drop count value, and reset it. */ + *out_packet_drop_count = g_log_getter_packet_drop_count; + g_log_getter_packet_drop_count = 0; + return read_size; + } + +} \ No newline at end of file diff --git a/stratosphere/lm/source/impl/lm_d_logger.hpp b/stratosphere/lm/source/impl/lm_d_logger.hpp new file mode 100644 index 000000000..5e44b07f3 --- /dev/null +++ b/stratosphere/lm/source/impl/lm_d_logger.hpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "../detail/lm_log_packet.hpp" + +namespace ams::lm::impl { + + using DLoggerSomeFunction = bool(*)(void*, size_t); + using LogGetterWriteFunction = bool(*)(void*, size_t, u64); + + class DLogger { + private: + void *log_data_buffer; + size_t size; + size_t cur_offset; + DLoggerSomeFunction some_fn; + bool unk_flag; + public: + DLogger(void *log_data_buf, size_t size, DLoggerSomeFunction some_fn) : log_data_buffer(log_data_buf), size(size), cur_offset(0), some_fn(some_fn), unk_flag(false) {} + + bool HasLogData(); + bool Log(void *log_data, size_t size); + }; + + DLogger *GetDLogger(); + + bool DLoggerLogGetterWriteFunction(void *log_data, size_t size, u64 unk_param); + + void WriteLogToLogGetter(const void *log_data, size_t size, LogGetterWriteFunction write_fn, u64 unk_param); + size_t ReadLogFromLogGetter(void *out_log_data, size_t size, u64 *out_packet_drop_count); + + inline void WriteLogToLogGetterDefault(const void *log_data, size_t size) { + WriteLogToLogGetter(log_data, size, DLoggerLogGetterWriteFunction, 0); + } + +} \ No newline at end of file diff --git a/stratosphere/lm/source/impl/lm_log_server_proxy.cpp b/stratosphere/lm/source/impl/lm_log_server_proxy.cpp new file mode 100644 index 000000000..36942fed6 --- /dev/null +++ b/stratosphere/lm/source/impl/lm_log_server_proxy.cpp @@ -0,0 +1,10 @@ +#include "lm_log_server_proxy.hpp" + +namespace ams::lm::impl { + + LogServerProxy *GetLogServerProxy() { + static LogServerProxy log_server_proxy; + return &log_server_proxy; + } + +} \ No newline at end of file diff --git a/stratosphere/lm/source/impl/lm_log_server_proxy.hpp b/stratosphere/lm/source/impl/lm_log_server_proxy.hpp new file mode 100644 index 000000000..6b35f5eae --- /dev/null +++ b/stratosphere/lm/source/impl/lm_log_server_proxy.hpp @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "lm_a_logger.hpp" + +namespace ams::lm::impl { + + class LogServerProxy { + private: + alignas(os::ThreadStackAlignment) u8 htcs_thread_stack[4_KB]; + os::ThreadType htcs_thread; + os::SdkConditionVariable some_cond_var; + os::Event finalize_event; + os::SdkMutex some_cond_var_lock; + os::SdkMutex some_fn_lock; + u32 unk_3; + u32 unk_4; + ALoggerSomeFunction some_fn; + u64 data_4; + char data_5[3566]; + public: + LogServerProxy() : htcs_thread_stack{}, htcs_thread(), some_cond_var(), finalize_event(os::EventClearMode_ManualClear), some_cond_var_lock(), some_fn_lock(), unk_3(0), some_fn(nullptr), data_4(0), data_5{} {} + + void StartHtcsThread(ThreadFunc htcs_entry) { + R_ABORT_UNLESS(os::CreateThread(std::addressof(this->htcs_thread), htcs_entry, this, this->htcs_thread_stack, sizeof(this->htcs_thread_stack), AMS_GET_SYSTEM_THREAD_PRIORITY(lm, HtcsConnection))); + os::SetThreadNamePointer(std::addressof(this->htcs_thread), AMS_GET_SYSTEM_THREAD_NAME(lm, HtcsConnection)); + os::StartThread(std::addressof(this->htcs_thread)); + } + + void DisposeHtcsThread() { + this->finalize_event.Signal(); + /* nn::htcs::Close(); */ + os::WaitThread(std::addressof(this->htcs_thread)); + os::DestroyThread(std::addressof(this->htcs_thread)); + } + + void SetSomeFunction(ALoggerSomeFunction some_fn) { + std::scoped_lock lk(this->some_fn_lock); + this->some_fn = some_fn; + } + + os::Event &GetFinalizeEvent() { + return this->finalize_event; + } + }; + + LogServerProxy *GetLogServerProxy(); + +} \ No newline at end of file diff --git a/stratosphere/lm/source/impl/lm_logging.cpp b/stratosphere/lm/source/impl/lm_logging.cpp deleted file mode 100644 index 7eaf2d8b5..000000000 --- a/stratosphere/lm/source/impl/lm_logging.cpp +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (c) 2018-2020 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#include -#include "lm_logging.hpp" - -namespace ams::lm::impl { - - namespace { - - LogInfo g_last_log_info; - bool g_can_access_fs = true; - os::Mutex g_logging_lock(true); - os::SystemEvent g_logging_event(os::EventClearMode_AutoClear, true); - - void UpdateLastLogInfo(LogInfo info) { - std::scoped_lock lk(g_logging_lock); - g_last_log_info = info; - } - - bool CanAccessFs() { - std::scoped_lock lk(g_logging_lock); - return g_can_access_fs; - } - - void SignalLogEvent() { - std::scoped_lock lk(g_logging_lock); - g_logging_event.Signal(); - } - - } - - void SetCanAccessFs(bool can_access) { - std::scoped_lock lk(g_logging_lock); - g_can_access_fs = can_access; - } - - Result WriteLogPackets(std::vector &packet_list) { - std::scoped_lock lk(g_logging_lock); - if(CanAccessFs() && !packet_list.empty()) { - /* Ensure log directory exists. */ - fs::CreateDirectory(DebugLogDirectory); - - const auto program_id = packet_list.front().GetProgramId(); - - /* Ensure process's directory for debug logs exists. */ - char process_dir[FS_MAX_PATH] = {}; - std::snprintf(process_dir, sizeof(process_dir), "%s/0x%016lX", DebugLogDirectory, static_cast(program_id)); - fs::CreateDirectory(process_dir); - - /* Use current system tick as the binary log's identifier / file name. */ - const LogInfo info = { - .log_id = os::GetSystemTick().GetInt64Value(), - .program_id = program_id, - }; - - /* Generate binary log path. */ - /* All current log packets will be written to the same file. */ - /* Binary log files might contain several packets, but they all must start with a head packet and end with a tail packet. */ - char bin_log_path[FS_MAX_PATH] = {}; - std::snprintf(bin_log_path, sizeof(bin_log_path), "%s/0x%016lX.bin", process_dir, info.log_id); - - /* Ensure the file exists. */ - fs::CreateFile(bin_log_path, 0); - - /* Open the file. */ - fs::FileHandle bin_log_file; - R_TRY(fs::OpenFile(&bin_log_file, bin_log_path, fs::OpenMode_Write | fs::OpenMode_AllowAppend)); - ON_SCOPE_EXIT { fs::CloseFile(bin_log_file); }; - - s64 offset = 0; - for(const auto &packet_buf: packet_list) { - /* Write each packet. Don't write the entire buffer since it might have garbage from previous packets at the end. */ - const size_t size = packet_buf.GetPacketSize(); - R_TRY(fs::WriteFile(bin_log_file, offset, packet_buf.buf.get(), size, fs::WriteOption::Flush)); - offset += size; - } - - /* Update the last log's info and signal log event. */ - UpdateLastLogInfo(info); - SignalLogEvent(); - } - return ResultSuccess(); - } - - LogInfo GetLastLogInfo() { - std::scoped_lock lk(g_logging_lock); - return g_last_log_info; - } - - Handle GetLogEventHandle() { - std::scoped_lock lk(g_logging_lock); - return g_logging_event.GetReadableHandle(); - } - -} \ No newline at end of file diff --git a/stratosphere/lm/source/impl/lm_logging.hpp b/stratosphere/lm/source/impl/lm_logging.hpp deleted file mode 100644 index 7c3d25786..000000000 --- a/stratosphere/lm/source/impl/lm_logging.hpp +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright (c) 2018-2020 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#pragma once -#include -#include "../lm_types.hpp" - -namespace ams::lm::impl { - - constexpr const char DebugLogDirectory[] = "sdmc:/atmosphere/debug_logs"; - - enum LogPacketFlags : u8 { - LogPacketFlags_Head = BIT(0), /* Head -> a packet list is being sent, with this packet being the initial one. */ - LogPacketFlags_Tail = BIT(1), /* Tail -> this is the final packet of the packet list. */ - LogPacketFlags_LittleEndian = BIT(2), - }; - - struct LogPacketHeader { - u64 process_id; - u64 thread_id; - u8 flags; - u8 pad; - u8 severity; - u8 verbosity; - u32 payload_size; - - inline constexpr bool IsHead() const { - return this->flags & LogPacketFlags_Head; - } - - inline constexpr bool IsTail() const { - return this->flags & LogPacketFlags_Tail; - } - - }; - static_assert(sizeof(LogPacketHeader) == 0x18, "LogPacketHeader definition"); - - struct LogInfo { - s64 log_id; /* This is the system tick value when the log was saved. */ - ncm::ProgramId program_id; - }; - - /* Store log packet buffers as a unique_ptr, so that they automatically get disposed after they're used. */ - - struct LogPacketBuffer { - ncm::ProgramId program_id; - std::unique_ptr buf; - size_t buf_size; - - LogPacketBuffer() : program_id(0), buf(nullptr), buf_size(0) {} - - LogPacketBuffer(ncm::ProgramId program_id, const void *buf, size_t size) : program_id(program_id), buf(std::make_unique(size)), buf_size(size) { - if(this->buf != nullptr) { - std::memcpy(this->buf.get(), buf, size); - } - } - - inline const LogPacketHeader *GetHeader() const { - if(this->buf == nullptr) { - return nullptr; - } - return reinterpret_cast(this->buf.get()); - } - - inline size_t GetPacketSize() const { - if(this->buf == nullptr) { - return 0; - } - auto header = this->GetHeader(); - return sizeof(LogPacketHeader) + header->payload_size; - } - - inline bool ValidatePacket() const { - if(this->buf == nullptr) { - return false; - } - /* Ensure that the buffer size is big enough to properly hold the packet. */ - /* Input buffers might be bigger than the actual packet size. */ - return this->buf_size >= this->GetPacketSize(); - } - - inline bool IsHead() const { - if(this->buf == nullptr) { - return false; - } - auto header = this->GetHeader(); - return header->IsHead(); - } - - inline bool IsTail() const { - if(this->buf == nullptr) { - return false; - } - auto header = this->GetHeader(); - return header->IsTail(); - } - - inline ncm::ProgramId GetProgramId() const { - return this->program_id; - } - - }; - - inline void ClearDebugLogs() { - fs::DeleteDirectoryRecursively(DebugLogDirectory); - } - - void SetCanAccessFs(bool can_access); - Result WriteLogPackets(std::vector &packet_list); - - LogInfo GetLastLogInfo(); - - Handle GetLogEventHandle(); - -} \ No newline at end of file diff --git a/stratosphere/lm/source/lm_log_getter.cpp b/stratosphere/lm/source/lm_log_getter.cpp new file mode 100644 index 000000000..dbbbc648c --- /dev/null +++ b/stratosphere/lm/source/lm_log_getter.cpp @@ -0,0 +1,34 @@ +#include "lm_log_getter.hpp" +#include "impl/lm_d_logger.hpp" + +namespace ams::lm { + + /* Note: this global flag isn't accessed by lm:get, it'll be read by the Log() command from lm service. */ + bool g_log_getter_logging_enabled = false; + + void LogGetter::StartLogging() { + /* Set global flag. */ + g_log_getter_logging_enabled = true; + } + + void LogGetter::StopLogging() { + /* Set global flag. */ + g_log_getter_logging_enabled = false; + } + + void LogGetter::GetLog(const sf::OutAutoSelectBuffer &out_log_buffer, sf::Out out_size, sf::Out out_log_packet_drop_count) { + u64 log_packet_drop_count = 0; + size_t read_size = 0; + + /* Check if anything was logged, otherwise don't read anything. */ + if (impl::GetDLogger()->HasLogData()) { + /* Read log data. */ + read_size = impl::ReadLogFromLogGetter(out_log_buffer.GetPointer(), out_log_buffer.GetSize(), std::addressof(log_packet_drop_count)); + } + + /* This value will contain the amount of packets missed before reading this one */ + out_log_packet_drop_count.SetValue(log_packet_drop_count); + out_size.SetValue(read_size); + } + +} \ No newline at end of file diff --git a/stratosphere/lm/source/lm_log_getter.hpp b/stratosphere/lm/source/lm_log_getter.hpp new file mode 100644 index 000000000..e03b8da9c --- /dev/null +++ b/stratosphere/lm/source/lm_log_getter.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "detail/lm_log_packet.hpp" + +namespace ams::lm { + + namespace impl { + + #define AMS_LM_I_LOG_GETTER_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, void, StartLogging, ()) \ + AMS_SF_METHOD_INFO(C, H, 1, void, StopLogging, ()) \ + AMS_SF_METHOD_INFO(C, H, 2, void, GetLog, (const sf::OutAutoSelectBuffer &out_log_buffer, sf::Out out_size, sf::Out out_log_packet_drop_count)) \ + + AMS_SF_DEFINE_INTERFACE(ILogGetter, AMS_LM_I_LOG_GETTER_INFO) + + } + + class LogGetter { + public: + void StartLogging(); + void StopLogging(); + void GetLog(const sf::OutAutoSelectBuffer &out_log_buffer, sf::Out out_size, sf::Out out_log_packet_drop_count); + }; + static_assert(impl::IsILogGetter); + +} \ No newline at end of file diff --git a/stratosphere/lm/source/lm_log_service.cpp b/stratosphere/lm/source/lm_log_service.cpp new file mode 100644 index 000000000..6006dde6e --- /dev/null +++ b/stratosphere/lm/source/lm_log_service.cpp @@ -0,0 +1,94 @@ +#include "lm_log_service.hpp" +#include "lm_threads.hpp" +#include "impl/lm_b_logger.hpp" +#include "impl/lm_c_logger.hpp" +#include "impl/lm_d_logger.hpp" + +namespace ams::lm { + + /* Defined in DLogger's source code. */ + extern bool g_log_getter_logging_enabled; + + namespace { + + LogDestination g_log_destination = LogDestination_TMA; + + inline void SomeLog(const sf::InAutoSelectBuffer &log_buffer, const bool increment_drop_count) { + if (increment_drop_count) { + impl::GetBLogger()->IncrementPacketDropCount(); + } + + if (!(g_log_destination & 2)) { + /* TODO: what's done here? */ + } + + if (g_log_getter_logging_enabled) { + /* Log to lm:get. */ + impl::WriteLogToLogGetterDefault(log_buffer.GetPointer(), log_buffer.GetSize()); + } + } + + u8 g_logger_heap_memory[0x4000]; + os::SdkMutex g_logger_heap_lock; + + lmem::HeapHandle GetLoggerHeapHandle() { + static lmem::HeapHandle g_logger_heap_handle = lmem::CreateExpHeap(g_logger_heap_memory, sizeof(g_logger_heap_memory), lmem::CreateOption_None); + return g_logger_heap_handle; + } + + } + + Logger::Logger(os::ProcessId process_id) : process_id(process_id) { + impl::GetBLogger()->SendLogSessionBeginPacket(static_cast(process_id)); + } + + Logger::~Logger() { + impl::GetBLogger()->SendLogSessionEndPacket(static_cast(this->process_id)); + } + + void *Logger::operator new(size_t size) { + std::scoped_lock lk(g_logger_heap_lock); + return lmem::AllocateFromExpHeap(GetLoggerHeapHandle(), size); + } + + void Logger::operator delete(void *ptr) { + std::scoped_lock lk(g_logger_heap_lock); + lmem::FreeToExpHeap(GetLoggerHeapHandle(), ptr); + } + + void Logger::Log(const sf::InAutoSelectBuffer &log_buffer) { + auto log_packet_header = (detail::LogPacketHeader*)log_buffer.GetPointer(); + + /* Don't log anything if payload size isn't correct. */ + if ((log_packet_header->GetPayloadSize() + sizeof(detail::LogPacketHeader)) != log_buffer.GetSize()) { + return; + } + + /* Set process ID. */ + log_packet_header->SetProcessId(static_cast(this->process_id)); + + bool do_increment_drop_count = false; + if (g_log_destination & 1) { + if (ShouldLogWithFlush()) { + do_increment_drop_count = !impl::GetCLogger()->Log(log_buffer.GetPointer(), log_buffer.GetSize()); + } + else { + /* TODO: ? */ + do_increment_drop_count = !impl::GetCLogger()->Log(log_buffer.GetPointer(), log_buffer.GetSize()); + } + } + + SomeLog(log_buffer, do_increment_drop_count); + } + + void Logger::SetDestination(LogDestination log_destination) { + /* Set global destination value. */ + g_log_destination = log_destination; + } + + void LogService::OpenLogger(const sf::ClientProcessId &client_pid, sf::Out> out_logger) { + /* Simply create a logger object, which will be allocated/freed with the exp heap created above. */ + out_logger.SetValue(sf::MakeShared(client_pid.process_id)); + } + +} \ No newline at end of file diff --git a/stratosphere/lm/source/lm_log_service.hpp b/stratosphere/lm/source/lm_log_service.hpp new file mode 100644 index 000000000..2be6770a7 --- /dev/null +++ b/stratosphere/lm/source/lm_log_service.hpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "detail/lm_log_packet.hpp" + +namespace ams::lm { + + enum LogDestination : u32 { + LogDestination_TMA = (1 << 0), + LogDestination_UART = (1 << 1), + LogDestination_UARTSleeping = (1 << 2) + }; + + namespace impl { + + #define AMS_LM_I_LOGGER_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, void, Log, (const sf::InAutoSelectBuffer &log_buffer)) \ + AMS_SF_METHOD_INFO(C, H, 1, void, SetDestination, (LogDestination log_destination)) + + AMS_SF_DEFINE_INTERFACE(ILogger, AMS_LM_I_LOGGER_INFO) + + #define AMS_LM_I_LOG_SERVICE_INFO(C, H) \ + AMS_SF_METHOD_INFO(C, H, 0, void, OpenLogger, (const sf::ClientProcessId &client_pid, sf::Out> out_logger)) + + AMS_SF_DEFINE_INTERFACE(ILogService, AMS_LM_I_LOG_SERVICE_INFO) + + } + + class Logger { + private: + os::ProcessId process_id; + public: + Logger(os::ProcessId process_id); + ~Logger(); + + void *operator new(size_t size); + void operator delete(void *p); + + void Log(const sf::InAutoSelectBuffer &log_buffer); + void SetDestination(LogDestination log_destination); + }; + static_assert(impl::IsILogger); + + class LogService { + public: + void OpenLogger(const sf::ClientProcessId &client_pid, sf::Out> out_logger); + }; + static_assert(impl::IsILogService); + +} \ No newline at end of file diff --git a/stratosphere/lm/source/lm_main.cpp b/stratosphere/lm/source/lm_main.cpp index 4e321532e..227d82565 100644 --- a/stratosphere/lm/source/lm_main.cpp +++ b/stratosphere/lm/source/lm_main.cpp @@ -1,166 +1,168 @@ -/* - * Copyright (c) 2018-2020 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#include -#include "lm_service.hpp" - -extern "C" { - extern u32 __start__; - - u32 __nx_applet_type = AppletType_None; - u32 __nx_fs_num_sessions = 1; - - #define INNER_HEAP_SIZE 0x20000 - size_t nx_inner_heap_size = INNER_HEAP_SIZE; - char nx_inner_heap[INNER_HEAP_SIZE]; - - void __libnx_initheap(void); - void __appInit(void); - void __appExit(void); -} - -namespace ams { - - ncm::ProgramId CurrentProgramId = ncm::SystemProgramId::LogManager; - - namespace result { - - /* Fatal is launched after we are launched, so disable this. */ - bool CallFatalOnResultAssertion = false; - - } - -} - -using namespace ams; - -void __libnx_initheap(void) { - void* addr = nx_inner_heap; - size_t size = nx_inner_heap_size; - - /* Newlib */ - extern char* fake_heap_start; - extern char* fake_heap_end; - - fake_heap_start = (char*)addr; - fake_heap_end = (char*)addr + size; -} - -void __appInit(void) { - hos::InitializeForStratosphere(); - - /* Initialize services. */ - sm::DoWithSession([&]() { - R_ABORT_UNLESS(pminfoInitialize()); - R_ABORT_UNLESS(fsInitialize()); - R_ABORT_UNLESS(pscmInitialize()); - R_ABORT_UNLESS(setsysInitialize()); - }); - - R_ABORT_UNLESS(fs::MountSdCard("sdmc")); - - ams::CheckApiVersion(); -} - -void __appExit(void) { - fs::Unmount("sdmc"); - - /* Cleanup services. */ - setsysExit(); - pscmExit(); - fsExit(); - pminfoExit(); -} - -namespace { - - /* TODO: these domain/domain object amounts work fine, but which ones does N actually use? */ - struct ServerOptions { - static constexpr size_t PointerBufferSize = 0; - static constexpr size_t MaxDomains = 0x40; - static constexpr size_t MaxDomainObjects = 0x200; - }; - - constexpr sm::ServiceName LmServiceName = sm::ServiceName::Encode("lm"); - constexpr size_t LmMaxSessions = 42; - - /* lm */ - constexpr size_t NumServers = 1; - sf::hipc::ServerManager g_server_manager; - - psc::PmModule g_pm_module; - os::WaitableHolderType g_module_waitable_holder; - -} - -namespace ams::lm { - - void StartAndLoopProcess() { - /* Get our psc:m module to handle requests. */ - R_ABORT_UNLESS(g_pm_module.Initialize(psc::PmModuleId_Lm, nullptr, 0, os::EventClearMode_ManualClear)); - os::InitializeWaitableHolder(std::addressof(g_module_waitable_holder), g_pm_module.GetEventPointer()->GetBase()); - os::SetWaitableHolderUserData(std::addressof(g_module_waitable_holder), static_cast(psc::PmModuleId_Lm)); - g_server_manager.AddUserWaitableHolder(std::addressof(g_module_waitable_holder)); - - psc::PmState pm_state; - psc::PmFlagSet pm_flags; - while(true) { - auto *signaled_holder = g_server_manager.WaitSignaled(); - if(signaled_holder != std::addressof(g_module_waitable_holder)) { - R_ABORT_UNLESS(g_server_manager.Process(signaled_holder)); - } - else { - g_pm_module.GetEventPointer()->Clear(); - if(R_SUCCEEDED(g_pm_module.GetRequest(std::addressof(pm_state), std::addressof(pm_flags)))) { - switch(pm_state) { - case psc::PmState_Awake: - case psc::PmState_ReadyAwaken: - /* Awake, enable logging. */ - impl::SetCanAccessFs(true); - break; - case psc::PmState_ReadySleep: - case psc::PmState_ReadyShutdown: - /* Sleep, disable logging. */ - impl::SetCanAccessFs(false); - break; - default: - break; - } - R_ABORT_UNLESS(g_pm_module.Acknowledge(pm_state, ResultSuccess())); - } - g_server_manager.AddUserWaitableHolder(signaled_holder); - } - } - } - -} - -int main(int argc, char **argv) { - /* Set thread name. */ - os::SetThreadNamePointer(os::GetCurrentThread(), AMS_GET_SYSTEM_THREAD_NAME(lm, IpcServer)); - AMS_ASSERT(os::GetThreadPriority(os::GetCurrentThread()) == AMS_GET_SYSTEM_THREAD_PRIORITY(lm, IpcServer)); - - /* Clear logs directory. */ - lm::impl::ClearDebugLogs(); - - /* Add service to manager. */ - R_ABORT_UNLESS(g_server_manager.RegisterServer(LmServiceName, LmMaxSessions)); - - /* Loop forever, servicing our services. */ - lm::StartAndLoopProcess(); - - /* Cleanup */ - return 0; +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include "lm_log_service.hpp" +#include "lm_log_getter.hpp" +#include "lm_threads.hpp" + +extern "C" { + extern u32 __start__; + + u32 __nx_applet_type = AppletType_None; + u32 __nx_fs_num_sessions = 1; + + #define INNER_HEAP_SIZE 0x20000 + size_t nx_inner_heap_size = INNER_HEAP_SIZE; + char nx_inner_heap[INNER_HEAP_SIZE]; + + void __libnx_initheap(void); + void __appInit(void); + void __appExit(void); +} + +namespace ams { + + ncm::ProgramId CurrentProgramId = ncm::SystemProgramId::LogManager; + + namespace result { + + /* Fatal is launched after we are launched, so disable this. */ + bool CallFatalOnResultAssertion = false; + + } + +} + +using namespace ams; + +void __libnx_initheap(void) { + void* addr = nx_inner_heap; + size_t size = nx_inner_heap_size; + + /* Newlib */ + extern char* fake_heap_start; + extern char* fake_heap_end; + + fake_heap_start = (char*)addr; + fake_heap_end = (char*)addr + size; +} + +void __appInit(void) { + hos::InitializeForStratosphere(); + + /* Initialize services. */ + sm::DoWithSession([]() { + R_ABORT_UNLESS(setsysInitialize()); + R_ABORT_UNLESS(pscmInitialize()); + }); + + ams::CheckApiVersion(); +} + +void __appExit(void) { + /* Cleanup services. */ + pscmExit(); + setsysExit(); +} + +namespace { + + /* TODO: these domain/domain object amounts work fine, but which ones does N actually use? */ + struct ServerOptions { + static constexpr size_t PointerBufferSize = 0x400; + static constexpr size_t MaxDomains = 0x40; + static constexpr size_t MaxDomainObjects = 0x200; + }; + + constexpr sm::ServiceName LogServiceName = sm::ServiceName::Encode("lm"); + constexpr size_t LogServiceMaxSessions = 30; + + constexpr sm::ServiceName LogGetterServiceName = sm::ServiceName::Encode("lm:get"); + constexpr size_t LogGetterServiceMaxSessions = 1; + + /* lm, lm:get */ + constexpr size_t NumServers = 2; + constexpr size_t MaxSessions = LogServiceMaxSessions + LogGetterServiceMaxSessions; + sf::hipc::ServerManager g_server_manager; + + psc::PmModule g_pm_module; + os::WaitableHolderType g_pm_module_waitable_holder; + +} + +extern std::atomic_bool g_flag; + +namespace { + + void StartAndLoopProcess() { + /* Get our psc:m module to handle requests. */ + const psc::PmModuleId dependencies[] = { psc::PmModuleId_Fs, psc::PmModuleId_TmaHostIo }; + R_ABORT_UNLESS(g_pm_module.Initialize(psc::PmModuleId_Lm, dependencies, util::size(dependencies), os::EventClearMode_ManualClear)); + + os::InitializeWaitableHolder(std::addressof(g_pm_module_waitable_holder), g_pm_module.GetEventPointer()->GetBase()); + os::SetWaitableHolderUserData(std::addressof(g_pm_module_waitable_holder), static_cast(psc::PmModuleId_Lm)); + + g_server_manager.AddUserWaitableHolder(std::addressof(g_pm_module_waitable_holder)); + + psc::PmState prev_state; + psc::PmState cur_state; + psc::PmFlagSet pm_flags; + while(true) { + auto *signaled_holder = g_server_manager.WaitSignaled(); + if (signaled_holder != std::addressof(g_pm_module_waitable_holder)) { + /* Process IPC requests. */ + R_ABORT_UNLESS(g_server_manager.Process(signaled_holder)); + } + else { + /* Handle our module. */ + g_pm_module.GetEventPointer()->Clear(); + if(R_SUCCEEDED(g_pm_module.GetRequest(std::addressof(cur_state), std::addressof(pm_flags)))) { + if (((prev_state == psc::PmState_ReadyAwakenCritical) && (cur_state == psc::PmState_ReadyAwaken)) || ((prev_state == psc::PmState_ReadyAwaken) && (cur_state == psc::PmState_ReadySleep)) || (cur_state == psc::PmState_ReadyShutdown)) { + // TODO: set some atomic flag + } + R_ABORT_UNLESS(g_pm_module.Acknowledge(cur_state, ResultSuccess())); + prev_state = cur_state; + } + g_server_manager.AddUserWaitableHolder(signaled_holder); + } + } + } + +} + +int main(int argc, char **argv) { + /* Set thread name. */ + os::SetThreadNamePointer(os::GetCurrentThread(), AMS_GET_SYSTEM_THREAD_NAME(lm, IpcServer)); + AMS_ASSERT(os::GetThreadPriority(os::GetCurrentThread()) == AMS_GET_SYSTEM_THREAD_PRIORITY(lm, IpcServer)); + + /* Start threads. */ + lm::StartHtcsThread(); + lm::StartFlushThread(); + + /* Register lm */ + R_ABORT_UNLESS((g_server_manager.RegisterServer(LogServiceName, LogServiceMaxSessions))); + + /* Register lm:get */ + R_ABORT_UNLESS((g_server_manager.RegisterServer(LogGetterServiceName, LogGetterServiceMaxSessions))); + + /* Loop forever, servicing our services. */ + StartAndLoopProcess(); + + /* Dispose threads. */ + lm::DisposeFlushThread(); + lm::DisposeHtcsThread(); + + return 0; } \ No newline at end of file diff --git a/stratosphere/lm/source/lm_service.cpp b/stratosphere/lm/source/lm_service.cpp deleted file mode 100644 index a891e9041..000000000 --- a/stratosphere/lm/source/lm_service.cpp +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2018-2020 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#include -#include "lm_service.hpp" - -namespace ams::lm { - - namespace { - - bool SaveDebugLogs() { - /* Get whether we should actually save logs. */ - u8 save_debug_logs = 0; - if (settings::fwdbg::GetSettingsItemValue(&save_debug_logs, sizeof(save_debug_logs), "atmosphere", "logmanager_save_debug_logs") != sizeof(save_debug_logs)) { - return false; - } - - return save_debug_logs != 0; - } - - } - - void Logger::Log(const sf::InAutoSelectBuffer &buf) { - /* Don't log unless we should do it. */ - if(SaveDebugLogs()) { - impl::LogPacketBuffer log_packet_buf(this->program_id, buf.GetPointer(), buf.GetSize()); - - /* Check if there's a queue already started. */ - const bool has_queued_packets = !this->queued_packets.empty(); - - if(log_packet_buf.IsHead() && log_packet_buf.IsTail()) { - /* Single packet to be logged - ensure the queue is empty, push it alone on the queue and log it. */ - this->queued_packets.clear(); - this->queued_packets.push_back(std::move(log_packet_buf)); - impl::WriteLogPackets(this->queued_packets); - } - else if(log_packet_buf.IsHead()) { - /* This is the initial packet of a queue - ensure the queue is empty and push it. */ - this->queued_packets.clear(); - this->queued_packets.push_back(std::move(log_packet_buf)); - } - else if(log_packet_buf.IsTail()) { - /* This is the last packet of the queue - push it and log the queue. */ - this->queued_packets.push_back(std::move(log_packet_buf)); - impl::WriteLogPackets(this->queued_packets); - } - else if(has_queued_packets) { - /* Another packet of the queue - push it. */ - this->queued_packets.push_back(std::move(log_packet_buf)); - } - else { - /* Invalid packet - but lm must succeed on this call. */ - /* This shouldn't happen at all... */ - } - } - } - - void Logger::SetDestination(LogDestination destination) { - /* TODO: shall we make use of this value? */ - this->destination = destination; - } - - void LogService::OpenLogger(const sf::ClientProcessId &client_pid, sf::Out> out_logger) { - /* Apparently lm succeeds on many/all commands, so we will succeed on them too. */ - ncm::ProgramId program_id; - pm::info::GetProgramId(&program_id, client_pid.GetValue()); - - auto logger = std::make_shared(program_id); - out_logger.SetValue(std::move(logger)); - } - - void LogService::AtmosphereGetLogEvent(sf::OutCopyHandle out_event) { - out_event.SetValue(impl::GetLogEventHandle()); - } - - void LogService::AtmosphereGetLastLogInfo(sf::Out out_log_id, sf::Out out_program_id) { - const auto info = impl::GetLastLogInfo(); - out_log_id.SetValue(info.log_id); - out_program_id.SetValue(info.program_id); - } - -} \ No newline at end of file diff --git a/stratosphere/lm/source/lm_service.hpp b/stratosphere/lm/source/lm_service.hpp deleted file mode 100644 index b1be5f646..000000000 --- a/stratosphere/lm/source/lm_service.hpp +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright (c) 2018-2020 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#pragma once -#include -#include "impl/lm_logging.hpp" - -namespace ams::lm { - - class Logger : public sf::IServiceObject { - - private: - enum class CommandId { - Log = 0, - SetDestination = 1, - }; - - private: - ncm::ProgramId program_id; - LogDestination destination; - std::vector queued_packets; - public: - Logger(ncm::ProgramId program_id) : program_id(program_id), destination(LogDestination::TMA), queued_packets() {} - - private: - void Log(const sf::InAutoSelectBuffer &buf); - void SetDestination(LogDestination destination); - - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(Log), - MAKE_SERVICE_COMMAND_META(SetDestination, hos::Version_3_0_0), - }; - - }; - - class LogService : public sf::IServiceObject { - private: - enum class CommandId { - OpenLogger = 0, - AtmosphereGetLogEvent = 65000, - AtmosphereGetLastLogInfo = 65001, - }; - - private: - /* Official command. */ - void OpenLogger(const sf::ClientProcessId &client_pid, sf::Out> out_logger); - - /* Atmosphere commands. */ - void AtmosphereGetLogEvent(sf::OutCopyHandle out_event); - void AtmosphereGetLastLogInfo(sf::Out out_log_id, sf::Out out_program_id); - - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(OpenLogger), - MAKE_SERVICE_COMMAND_META(AtmosphereGetLogEvent), - MAKE_SERVICE_COMMAND_META(AtmosphereGetLastLogInfo), - }; - - }; - -} \ No newline at end of file diff --git a/stratosphere/lm/source/lm_threads.cpp b/stratosphere/lm/source/lm_threads.cpp new file mode 100644 index 000000000..d42fe6f24 --- /dev/null +++ b/stratosphere/lm/source/lm_threads.cpp @@ -0,0 +1,227 @@ +#include "lm_threads.hpp" +#include "impl/lm_log_server_proxy.hpp" + +std::atomic_bool g_flag; + +namespace ams::lm { + + namespace { + + void HtcsMain(void *log_server_proxy_v) { + auto log_server_proxy = reinterpret_cast(log_server_proxy_v); + /* TODO: htcs support in libstratosphere + + htcs::SockAddrHtcs addr = {}; + addr.family = HTCS_AF_HTCS; + strncpy(addr.host_name, sizeof(addr.host_name), htcs::GetDefaultHostName()); + strncpy(addr.peer_name, sizeof(addr.peer_name), "iywys@$LogManager"); + + // Handle htcs until the finalize event signals + do { + auto server_fd = htcs::Socket(); + if(R_SUCCEEDED(htcs::Bind(server_fd, &addr)) && R_SUCCEEDED(htcs::Listen(server_fd, 0))) { + while(!os::TryWaitEvent(log_server_proxy->GetFinalizeEvent())) { + auto client_fd = htcs::Accept(server_fd, 0); + log_server_proxy->CallSomeFunction(true); + + { + std::scoped_lock lk(log_server_proxy->GetSomeCondvarLock()); + os::BroadcastConditionVariable(log_server_proxy->GetSomeCondvar()); + } + + u8 dummy_val; + while(htcs::Recv(client_fd, &dummy_val, sizeof(dummy_val)) == sizeof(dummy_val)); + + htcs::Close(client_fd); + log_server_proxy->CallSomeFunction(false); + } + htcs::Close(server_fd); + } + } while(!os::TimedWaitEvent(log_server_proxy->GetFinalizeEvent(), 1'000'000'000ul)); + (); + + */ + while(true) {} + } + + os::ThreadType g_flush_thread; + alignas(os::ThreadStackAlignment) u8 g_flush_thread_stack[8_KB]; + + os::WaitableManagerType g_waitable_manager; + os::SdkMutex g_waitable_manager_lock; + bool g_flush_active = false; + + os::WaitableHolderType g_finalize_waitable_holder; + os::Event g_finalize_event(os::EventClearMode_ManualClear); + os::WaitableHolderType g_waitable_holder_1; + os::Event g_event_1(os::EventClearMode_ManualClear); + os::WaitableHolderType g_waitable_holder_2; + os::SystemEvent g_event_2; + os::WaitableHolderType g_waitable_holder_3; + os::Event g_event_3(os::EventClearMode_ManualClear); + + u8 g_fs_heap_memory[0x8000]; + lmem::HeapHandle g_fs_heap_handle; + + void *Allocate(size_t size) { + return lmem::AllocateFromExpHeap(g_fs_heap_handle, size); + } + + void Deallocate(void *ptr, size_t size) { + lmem::FreeToExpHeap(g_fs_heap_handle, ptr); + } + + void SomeALoggerFunction(bool some_flag) { + if (some_flag) { + g_event_1.Signal(); + } + else { + g_event_1.Clear(); + if (!g_event_3.TryWait()) { + /* + impl::GetCLogger()->(); + */ + } + } + } + + void SomeLogServerProxyFunction(bool some_flag) { + if (some_flag) { + g_event_3.Signal(); + } + else { + g_event_3.Clear(); + if (!g_event_1.TryWait()) { + /* + impl::GetCLogger()->(); + */ + } + } + } + + void FlushMain(void *arg) { + /* Initialize everything. */ + fs::SetEnabledAutoAbort(false); + g_fs_heap_handle = lmem::CreateExpHeap(g_fs_heap_memory, sizeof(g_fs_heap_memory), lmem::CreateOption_None); + AMS_ABORT_UNLESS(g_fs_heap_handle != nullptr); + fs::SetAllocator(Allocate, Deallocate); + time::Initialize(); + g_waitable_manager_lock.Lock(); + os::InitializeWaitableManager(std::addressof(g_waitable_manager)); + os::InitializeWaitableHolder(std::addressof(g_finalize_waitable_holder), g_finalize_event.GetBase()); + os::LinkWaitableHolder(std::addressof(g_waitable_manager), std::addressof(g_finalize_waitable_holder)); + /* R_ABORT_UNLESS(nn::fs::OpenSdCardEventNotifier(...)); */ + impl::GetALogger()->SetSomeFunction(SomeALoggerFunction); + os::InitializeWaitableHolder(std::addressof(g_waitable_holder_1), g_event_1.GetBase()); + os::LinkWaitableHolder(std::addressof(g_waitable_manager), std::addressof(g_waitable_holder_1)); + os::InitializeWaitableHolder(std::addressof(g_waitable_holder_2), g_event_2.GetBase()); + os::LinkWaitableHolder(std::addressof(g_waitable_manager), std::addressof(g_waitable_holder_2)); + impl::GetLogServerProxy()->SetSomeFunction(SomeLogServerProxyFunction); + os::InitializeWaitableHolder(std::addressof(g_waitable_holder_3), g_event_3.GetBase()); + os::LinkWaitableHolder(std::addressof(g_waitable_manager), std::addressof(g_waitable_holder_3)); + g_flush_active = true; + g_waitable_manager_lock.Unlock(); + + /* Main loop. */ + auto is_finalizing = false; + while(!is_finalizing) { + /* + if (impl::GetCLogger()->SomeMemberFn()) { + impl::GetBLogger()->SendLogPacketDropCountPacket(); + } + */ + g_waitable_manager_lock.Lock(); + auto signaled_holder = os::WaitAny(std::addressof(g_waitable_manager)); + g_waitable_manager_lock.Unlock(); + + if (signaled_holder == std::addressof(g_finalize_waitable_holder)) { + /* We're finalizing, thus break loop. */ + is_finalizing = true; + break; + } + + while(true) { + /* Wait until the holder we get is holder 1 or 3...? */ + if ((signaled_holder == std::addressof(g_waitable_holder_1)) || (signaled_holder == std::addressof(g_waitable_holder_3))) { + break; + } + + if (signaled_holder == std::addressof(g_waitable_holder_2)) { + g_event_2.Clear(); + /* + if (fs::IsSdCardInserted()) { + break; + } + */ + } + + g_waitable_manager_lock.Lock(); + signaled_holder = os::WaitAny(std::addressof(g_waitable_manager)); + g_waitable_manager_lock.Unlock(); + + if (signaled_holder == std::addressof(g_finalize_waitable_holder)) { + /* We're finalizing, thus break loop. */ + is_finalizing = true; + break; + } + } + } + + /* Finalize everything. */ + g_waitable_manager_lock.Lock(); + g_flush_active = false; + /* sub_7100022F30(std::addressof(g_waitable_manager)); */ + /* unknown_libname_142(std::addressof(g_waitable_manager)); */ + os::UnlinkWaitableHolder(std::addressof(g_finalize_waitable_holder)); + os::UnlinkWaitableHolder(std::addressof(g_waitable_holder_3)); + impl::GetLogServerProxy()->SetSomeFunction(nullptr); + os::UnlinkWaitableHolder(std::addressof(g_waitable_holder_2)); + os::UnlinkWaitableHolder(std::addressof(g_waitable_holder_1)); + impl::GetALogger()->Dispose(); + impl::GetALogger()->SetSomeFunction(nullptr); + time::Finalize(); + lmem::DestroyExpHeap(g_fs_heap_handle); + g_waitable_manager_lock.Unlock(); + } + + } + + void StartHtcsThread() { + impl::GetLogServerProxy()->StartHtcsThread(HtcsMain); + } + + void DisposeHtcsThread() { + impl::GetLogServerProxy()->DisposeHtcsThread(); + } + + void StartFlushThread() { + R_ABORT_UNLESS(os::CreateThread(std::addressof(g_flush_thread), FlushMain, nullptr, g_flush_thread_stack, sizeof(g_flush_thread_stack), AMS_GET_SYSTEM_THREAD_PRIORITY(lm, Flush))); + os::SetThreadNamePointer(std::addressof(g_flush_thread), AMS_GET_SYSTEM_THREAD_NAME(lm, Flush)); + os::StartThread(std::addressof(g_flush_thread)); + } + + void DisposeFlushThread() { + g_finalize_event.Signal(); + os::WaitThread(std::addressof(g_flush_thread)); + os::DestroyThread(std::addressof(g_flush_thread)); + } + + bool ShouldLogWithFlush() { + if (g_flag) { + return false; + } + + if (!g_waitable_manager_lock.TryLock()) { + return false; + } + + auto ret_value = false; + if (g_flush_active) { + auto signaled_holder = os::WaitAny(std::addressof(g_waitable_manager)); + ret_value = (signaled_holder != nullptr) && (signaled_holder != std::addressof(g_finalize_waitable_holder)); + } + g_waitable_manager_lock.Unlock(); + return ret_value; + } + +} \ No newline at end of file diff --git a/stratosphere/lm/source/lm_types.hpp b/stratosphere/lm/source/lm_threads.hpp similarity index 82% rename from stratosphere/lm/source/lm_types.hpp rename to stratosphere/lm/source/lm_threads.hpp index 87e5a2ceb..a7a125f4a 100644 --- a/stratosphere/lm/source/lm_types.hpp +++ b/stratosphere/lm/source/lm_threads.hpp @@ -13,17 +13,17 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ - #pragma once #include namespace ams::lm { - enum class LogDestination : u32 { - TMA = (1 << 0), - UART = (1 << 1), - UARTSleeping = (1 << 2), - All = 0xFFFF, - }; + void StartHtcsThread(); + void DisposeHtcsThread(); + + void StartFlushThread(); + void DisposeFlushThread(); + + bool ShouldLogWithFlush(); } \ No newline at end of file diff --git a/stratosphere/loader/source/ldr_anti_downgrade_tables.inc b/stratosphere/loader/source/ldr_anti_downgrade_tables.inc index 894acb128..0d57586ae 100644 --- a/stratosphere/loader/source/ldr_anti_downgrade_tables.inc +++ b/stratosphere/loader/source/ldr_anti_downgrade_tables.inc @@ -251,3 +251,76 @@ constexpr MinimumProgramVersion g_MinimumProgramVersions1000[] = { {ncm::WebAppletId::WifiWebAuth, MakeSystemVersion(10, 0, 0)}, }; constexpr size_t g_MinimumProgramVersionsCount1000 = util::size(g_MinimumProgramVersions1000); + +constexpr MinimumProgramVersion g_MinimumProgramVersions1010[] = { + /* All non-Development System Modules. */ + {ncm::SystemProgramId::Usb, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Tma, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Boot2, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Settings, MakeSystemVersion(10, 1, 0)}, + {ncm::SystemProgramId::Bus, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Bluetooth, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Bcat, MakeSystemVersion(10, 0, 0)}, + /* {ncm::SystemProgramId::Dmnt, MakeSystemVersion(10, 0, 0)}, */ + {ncm::SystemProgramId::Friends, MakeSystemVersion(10, 1, 0)}, + {ncm::SystemProgramId::Nifm, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Ptm, MakeSystemVersion(10, 0, 2)}, + /* {ncm::SystemProgramId::Shell, MakeSystemVersion(10, 0, 0)}, */ + {ncm::SystemProgramId::BsdSockets, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Hid, MakeSystemVersion(10, 1, 0)}, + {ncm::SystemProgramId::Audio, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::LogManager, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Wlan, MakeSystemVersion(10, 1, 0)}, + /* {ncm::SystemProgramId::Cs, MakeSystemVersion(10, 0, 0)}, */ + {ncm::SystemProgramId::Ldn, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::NvServices, MakeSystemVersion(10, 1, 0)}, + {ncm::SystemProgramId::Pcv, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Ppc, MakeSystemVersion( 9, 0, 0)}, + {ncm::SystemProgramId::NvnFlinger, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Pcie, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Account, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Ns, MakeSystemVersion(10, 1, 0)}, + {ncm::SystemProgramId::Nfc, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Psc, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::CapSrv, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Am, MakeSystemVersion(10, 1, 0)}, + {ncm::SystemProgramId::Ssl, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Nim, MakeSystemVersion(10, 1, 0)}, + /* {ncm::SystemProgramId::Cec, MakeSystemVersion(10, 0, 0)}, */ + /* {ncm::SystemProgramId::Tspm, MakeSystemVersion(10, 0, 0)}, */ + /* {ncm::SystemProgramId::Spl, MakeSystemVersion(10, 0, 0)}, */ + {ncm::SystemProgramId::Lbl, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Btm, MakeSystemVersion(10, 1, 0)}, + {ncm::SystemProgramId::Erpt, MakeSystemVersion(10, 0, 0)}, + /* {ncm::SystemProgramId::Time, MakeSystemVersion(10, 0, 0)}, */ + {ncm::SystemProgramId::Vi, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Pctl, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Npns, MakeSystemVersion(10, 1, 0)}, + {ncm::SystemProgramId::Eupld, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Glue, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Eclct, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Es, MakeSystemVersion(10, 1, 0)}, + {ncm::SystemProgramId::Fatal, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Grc, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Creport, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Ro, MakeSystemVersion(10, 0, 0)}, + /* {ncm::SystemProgramId::Profiler, MakeSystemVersion(10, 0, 0)}, */ + {ncm::SystemProgramId::Sdb, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Migration, MakeSystemVersion(10, 1, 0)}, + /* {ncm::SystemProgramId::Jit, MakeSystemVersion(10, 0, 0)}, */ + {ncm::SystemProgramId::JpegDec, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::SafeMode, MakeSystemVersion(10, 0, 0)}, + {ncm::SystemProgramId::Olsc, MakeSystemVersion(10, 1, 0)}, + /* {ncm::SystemProgramId::Dt, MakeSystemVersion(10, 0, 0)}, */ + /* {ncm::SystemProgramId::Nd, MakeSystemVersion(10, 0, 0)}, */ + {ncm::SystemProgramId::Ngct, MakeSystemVersion(10, 0, 0)}, + /* {ncm::SystemProgramId::Pgl, MakeSystemVersion(10, 0, 0)}, */ + + /* All Web Applets. */ + {ncm::WebAppletId::Web, MakeSystemVersion(10, 0, 0)}, + {ncm::WebAppletId::Shop, MakeSystemVersion(10, 0, 0)}, + {ncm::WebAppletId::OfflineWeb, MakeSystemVersion(10, 0, 0)}, + {ncm::WebAppletId::LoginShare, MakeSystemVersion(10, 0, 0)}, + {ncm::WebAppletId::WifiWebAuth, MakeSystemVersion(10, 0, 0)}, +}; +constexpr size_t g_MinimumProgramVersionsCount1010 = util::size(g_MinimumProgramVersions1010); diff --git a/stratosphere/loader/source/ldr_loader_service.hpp b/stratosphere/loader/source/ldr_loader_service.hpp index 2d5d5942a..289c384a7 100644 --- a/stratosphere/loader/source/ldr_loader_service.hpp +++ b/stratosphere/loader/source/ldr_loader_service.hpp @@ -18,101 +18,27 @@ namespace ams::ldr { - class LoaderService : public sf::IServiceObject { - protected: + class LoaderService final { + public: /* Official commands. */ - virtual Result CreateProcess(sf::OutMoveHandle proc_h, PinId id, u32 flags, sf::CopyHandle reslimit_h); - virtual Result GetProgramInfo(sf::Out out_program_info, const ncm::ProgramLocation &loc); - virtual Result PinProgram(sf::Out out_id, const ncm::ProgramLocation &loc); - virtual Result UnpinProgram(PinId id); - virtual Result SetProgramArguments(ncm::ProgramId program_id, const sf::InPointerBuffer &args, u32 args_size); - virtual Result FlushArguments(); - virtual Result GetProcessModuleInfo(sf::Out count, const sf::OutPointerArray &out, os::ProcessId process_id); - virtual Result SetEnabledProgramVerification(bool enabled); + Result CreateProcess(sf::OutMoveHandle proc_h, PinId id, u32 flags, sf::CopyHandle reslimit_h); + Result GetProgramInfo(sf::Out out_program_info, const ncm::ProgramLocation &loc); + Result PinProgram(sf::Out out_id, const ncm::ProgramLocation &loc); + Result UnpinProgram(PinId id); + Result SetProgramArguments(ncm::ProgramId program_id, const sf::InPointerBuffer &args, u32 args_size); + Result FlushArguments(); + Result GetProcessModuleInfo(sf::Out count, const sf::OutPointerArray &out, os::ProcessId process_id); + Result SetEnabledProgramVerification(bool enabled); /* Atmosphere commands. */ - virtual Result AtmosphereRegisterExternalCode(sf::OutMoveHandle out, ncm::ProgramId program_id); - virtual void AtmosphereUnregisterExternalCode(ncm::ProgramId program_id); - virtual void AtmosphereHasLaunchedProgram(sf::Out out, ncm::ProgramId program_id); - virtual Result AtmosphereGetProgramInfo(sf::Out out_program_info, sf::Out out_status, const ncm::ProgramLocation &loc); - virtual Result AtmospherePinProgram(sf::Out out_id, const ncm::ProgramLocation &loc, const cfg::OverrideStatus &override_status); + Result AtmosphereRegisterExternalCode(sf::OutMoveHandle out, ncm::ProgramId program_id); + void AtmosphereUnregisterExternalCode(ncm::ProgramId program_id); + void AtmosphereHasLaunchedProgram(sf::Out out, ncm::ProgramId program_id); + Result AtmosphereGetProgramInfo(sf::Out out_program_info, sf::Out out_status, const ncm::ProgramLocation &loc); + Result AtmospherePinProgram(sf::Out out_id, const ncm::ProgramLocation &loc, const cfg::OverrideStatus &override_status); }; - - namespace pm { - - class ProcessManagerInterface final : public LoaderService { - protected: - enum class CommandId { - CreateProcess = 0, - GetProgramInfo = 1, - PinProgram = 2, - UnpinProgram = 3, - SetEnabledProgramVerification = 4, - - AtmosphereHasLaunchedProgram = 65000, - AtmosphereGetProgramInfo = 65001, - AtmospherePinProgram = 65002, - }; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(CreateProcess), - MAKE_SERVICE_COMMAND_META(GetProgramInfo), - MAKE_SERVICE_COMMAND_META(PinProgram), - MAKE_SERVICE_COMMAND_META(UnpinProgram), - MAKE_SERVICE_COMMAND_META(SetEnabledProgramVerification, hos::Version_10_0_0), - - MAKE_SERVICE_COMMAND_META(AtmosphereHasLaunchedProgram), - MAKE_SERVICE_COMMAND_META(AtmosphereGetProgramInfo), - MAKE_SERVICE_COMMAND_META(AtmospherePinProgram), - }; - }; - - } - - namespace dmnt { - - class DebugMonitorInterface final : public LoaderService { - protected: - enum class CommandId { - SetProgramArguments = 0, - FlushArguments = 1, - GetProcessModuleInfo = 2, - - AtmosphereHasLaunchedProgram = 65000, - }; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(SetProgramArguments), - MAKE_SERVICE_COMMAND_META(FlushArguments), - MAKE_SERVICE_COMMAND_META(GetProcessModuleInfo), - - MAKE_SERVICE_COMMAND_META(AtmosphereHasLaunchedProgram), - }; - }; - - } - - namespace shell { - - class ShellInterface final : public LoaderService { - protected: - enum class CommandId { - SetProgramArguments = 0, - FlushArguments = 1, - - AtmosphereRegisterExternalCode = 65000, - AtmosphereUnregisterExternalCode = 65001, - }; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(SetProgramArguments), - MAKE_SERVICE_COMMAND_META(FlushArguments), - - MAKE_SERVICE_COMMAND_META(AtmosphereRegisterExternalCode), - MAKE_SERVICE_COMMAND_META(AtmosphereUnregisterExternalCode), - }; - }; - - } + static_assert(ams::ldr::impl::IsIProcessManagerInterface); + static_assert(ams::ldr::impl::IsIDebugMonitorInterface); + static_assert(ams::ldr::impl::IsIShellInterface); } diff --git a/stratosphere/loader/source/ldr_main.cpp b/stratosphere/loader/source/ldr_main.cpp index 5d0af575e..73de30843 100644 --- a/stratosphere/loader/source/ldr_main.cpp +++ b/stratosphere/loader/source/ldr_main.cpp @@ -115,6 +115,9 @@ namespace { int main(int argc, char **argv) { + /* Disable auto-abort in fs operations. */ + fs::SetEnabledAutoAbort(false); + /* Set thread name. */ os::SetThreadNamePointer(os::GetCurrentThread(), AMS_GET_SYSTEM_THREAD_NAME(ldr, Main)); AMS_ASSERT(os::GetThreadPriority(os::GetCurrentThread()) == AMS_GET_SYSTEM_THREAD_PRIORITY(ldr, Main)); @@ -126,9 +129,9 @@ int main(int argc, char **argv) ldr::SetDevelopmentForAcidSignatureCheck(spl::IsDevelopment()); /* Add services to manager. */ - R_ABORT_UNLESS((g_server_manager.RegisterServer(ProcessManagerServiceName, ProcessManagerMaxSessions))); - R_ABORT_UNLESS((g_server_manager.RegisterServer(ShellServiceName, ShellMaxSessions))); - R_ABORT_UNLESS((g_server_manager.RegisterServer(DebugMonitorServiceName, DebugMonitorMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(ProcessManagerServiceName, ProcessManagerMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(ShellServiceName, ShellMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(DebugMonitorServiceName, DebugMonitorMaxSessions))); /* Loop forever, servicing our services. */ g_server_manager.LoopProcess(); diff --git a/stratosphere/loader/source/ldr_process_creation.cpp b/stratosphere/loader/source/ldr_process_creation.cpp index 780368634..7447af874 100644 --- a/stratosphere/loader/source/ldr_process_creation.cpp +++ b/stratosphere/loader/source/ldr_process_creation.cpp @@ -96,7 +96,10 @@ namespace ams::ldr { size_t num_entries = 0; const auto hos_version = hos::GetVersion(); - if (hos_version >= hos::Version_10_0_0) { + if (hos_version >= hos::Version_10_1_0) { + entries = g_MinimumProgramVersions1010; + num_entries = g_MinimumProgramVersionsCount1010; + } else if (hos_version >= hos::Version_10_0_0) { entries = g_MinimumProgramVersions1000; num_entries = g_MinimumProgramVersionsCount1000; } else if (hos_version >= hos::Version_9_1_0) { diff --git a/stratosphere/ncm/source/ncm_main.cpp b/stratosphere/ncm/source/ncm_main.cpp index d9ed098ea..860d3adab 100644 --- a/stratosphere/ncm/source/ncm_main.cpp +++ b/stratosphere/ncm/source/ncm_main.cpp @@ -153,24 +153,21 @@ namespace { class ContentManagerServerManager : public sf::hipc::ServerManager { private: - using ServiceType = ncm::ContentManagerImpl; + using Interface = ncm::IContentManager; + using ServiceImpl = ncm::ContentManagerImpl; private: os::ThreadType thread; - std::shared_ptr ncm_manager; + std::shared_ptr ncm_manager; private: static void ThreadFunction(void *_this) { reinterpret_cast(_this)->LoopProcess(); } public: - ContentManagerServerManager(ServiceType *m) - : ncm_manager() - { - /* ... */ - } + ContentManagerServerManager() : ncm_manager() { /* ... */ } - ams::Result Initialize(std::shared_ptr manager_obj) { + ams::Result Initialize(std::shared_ptr manager_obj) { this->ncm_manager = manager_obj; - return this->RegisterServer(ContentManagerServiceName, ContentManagerManagerSessions, this->ncm_manager); + return this->RegisterServer(ContentManagerServiceName, ContentManagerManagerSessions, this->ncm_manager); } ams::Result StartThreads() { @@ -200,23 +197,20 @@ namespace { class LocationResolverServerManager : public sf::hipc::ServerManager { private: - using ServiceType = lr::LocationResolverManagerImpl; + using Interface = lr::ILocationResolverManager; + using ServiceImpl = lr::LocationResolverManagerImpl; private: os::ThreadType thread; - std::shared_ptr lr_manager; + std::shared_ptr lr_manager; private: static void ThreadFunction(void *_this) { reinterpret_cast(_this)->LoopProcess(); } public: - LocationResolverServerManager(ServiceType *m) - : lr_manager(sf::ServiceObjectTraits::SharedPointerHelper::GetEmptyDeleteSharedPointer(m)) - { - /* ... */ - } + LocationResolverServerManager(ServiceImpl &m) : lr_manager(sf::GetSharedPointerTo(m)) { /* ... */ } ams::Result Initialize() { - return this->RegisterServer(LocationResolverServiceName, LocationResolverManagerSessions, this->lr_manager); + return this->RegisterServer(LocationResolverServiceName, LocationResolverManagerSessions, this->lr_manager); } ams::Result StartThreads() { @@ -232,14 +226,10 @@ namespace { }; ncm::ContentManagerImpl g_ncm_manager_service_object; - ContentManagerServerManager g_ncm_server_manager(std::addressof(g_ncm_manager_service_object)); + ContentManagerServerManager g_ncm_server_manager; lr::LocationResolverManagerImpl g_lr_manager_service_object; - LocationResolverServerManager g_lr_server_manager(std::addressof(g_lr_manager_service_object)); - - ALWAYS_INLINE std::shared_ptr GetSharedPointerToContentManager() { - return sf::ServiceObjectTraits::SharedPointerHelper::GetEmptyDeleteSharedPointer(std::addressof(g_ncm_manager_service_object)); - } + LocationResolverServerManager g_lr_server_manager(g_lr_manager_service_object); /* Compile-time configuration. */ #ifdef NCM_BUILD_FOR_INTITIALIZE @@ -262,13 +252,16 @@ namespace { int main(int argc, char **argv) { + /* Disable auto-abort in fs operations. */ + fs::SetEnabledAutoAbort(false); + /* Set thread name. */ os::SetThreadNamePointer(os::GetCurrentThread(), AMS_GET_SYSTEM_THREAD_NAME(ncm, MainWaitThreads)); AMS_ASSERT(os::GetThreadPriority(os::GetCurrentThread()) == AMS_GET_SYSTEM_THREAD_PRIORITY(ncm, MainWaitThreads)); /* Create and initialize the content manager. */ - auto content_manager = GetSharedPointerToContentManager(); - R_ABORT_UNLESS(content_manager->Initialize(ManagerConfig)); + auto content_manager = sf::GetSharedPointerTo(g_ncm_manager_service_object); + R_ABORT_UNLESS(content_manager->GetImpl().Initialize(ManagerConfig)); /* Initialize ncm's server and start threads. */ R_ABORT_UNLESS(g_ncm_server_manager.Initialize(content_manager)); diff --git a/stratosphere/pgl/source/pgl_main.cpp b/stratosphere/pgl/source/pgl_main.cpp index bd56bebc1..8532d6cc0 100644 --- a/stratosphere/pgl/source/pgl_main.cpp +++ b/stratosphere/pgl/source/pgl_main.cpp @@ -66,12 +66,8 @@ namespace ams::pgl { constinit pgl::srv::ShellInterface g_shell_interface; - ALWAYS_INLINE std::shared_ptr GetSharedPointerToShellInterface() { - return ams::sf::ServiceObjectTraits::SharedPointerHelper::GetEmptyDeleteSharedPointer(std::addressof(g_shell_interface)); - } - void RegisterServiceSession() { - R_ABORT_UNLESS(g_server_manager.RegisterServer(ShellServiceName, ShellMaxSessions, GetSharedPointerToShellInterface())); + R_ABORT_UNLESS((g_server_manager.RegisterServer(ShellServiceName, ShellMaxSessions, ams::sf::GetSharedPointerTo(g_shell_interface)))); } void LoopProcess() { @@ -145,6 +141,9 @@ void __appExit(void) { int main(int argc, char **argv) { + /* Disable auto-abort in fs operations. */ + fs::SetEnabledAutoAbort(false); + /* Set thread name. */ os::SetThreadNamePointer(os::GetCurrentThread(), AMS_GET_SYSTEM_THREAD_NAME(pgl, Main)); AMS_ASSERT(os::GetThreadPriority(os::GetCurrentThread()) == AMS_GET_SYSTEM_THREAD_PRIORITY(pgl, Main)); diff --git a/stratosphere/pm/source/pm_boot_mode_service.cpp b/stratosphere/pm/source/pm_boot_mode_service.cpp index e185ddf0a..290f05f27 100644 --- a/stratosphere/pm/source/pm_boot_mode_service.cpp +++ b/stratosphere/pm/source/pm_boot_mode_service.cpp @@ -16,7 +16,7 @@ #include #include "pm_boot_mode_service.hpp" -namespace ams::pm::bm { +namespace ams::pm { namespace { @@ -26,12 +26,16 @@ namespace ams::pm::bm { } /* Override of weakly linked boot_mode_api functions. */ - BootMode GetBootMode() { - return g_boot_mode; - } + namespace bm { + + BootMode GetBootMode() { + return g_boot_mode; + } + + void SetMaintenanceBoot() { + g_boot_mode = BootMode::Maintenance; + } - void SetMaintenanceBoot() { - g_boot_mode = BootMode::Maintenance; } /* Service command implementations. */ diff --git a/stratosphere/pm/source/pm_boot_mode_service.hpp b/stratosphere/pm/source/pm_boot_mode_service.hpp index a15f66d15..c7b6fc0b7 100644 --- a/stratosphere/pm/source/pm_boot_mode_service.hpp +++ b/stratosphere/pm/source/pm_boot_mode_service.hpp @@ -16,23 +16,13 @@ #pragma once #include -namespace ams::pm::bm { +namespace ams::pm { - class BootModeService final : public sf::IServiceObject { - private: - enum class CommandId { - GetBootMode = 0, - SetMaintenanceBoot = 1, - }; - private: - /* Actual command implementations. */ + class BootModeService final { + public: void GetBootMode(sf::Out out); void SetMaintenanceBoot(); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetBootMode), - MAKE_SERVICE_COMMAND_META(SetMaintenanceBoot), - }; }; + static_assert(pm::impl::IsIBootModeInterface); } diff --git a/stratosphere/pm/source/pm_debug_monitor_service.cpp b/stratosphere/pm/source/pm_debug_monitor_service.cpp index a42156a55..2fd98f893 100644 --- a/stratosphere/pm/source/pm_debug_monitor_service.cpp +++ b/stratosphere/pm/source/pm_debug_monitor_service.cpp @@ -17,49 +17,49 @@ #include "pm_debug_monitor_service.hpp" #include "impl/pm_process_manager.hpp" -namespace ams::pm::dmnt { +namespace ams::pm { /* Actual command implementations. */ - Result DebugMonitorServiceBase::GetModuleIdList(sf::Out out_count, const sf::OutBuffer &out_buf, u64 unused) { + Result DebugMonitorService::GetModuleIdList(sf::Out out_count, const sf::OutBuffer &out_buf, u64 unused) { R_UNLESS(out_buf.GetSize() <= std::numeric_limits::max(), pm::ResultInvalidSize()); return impl::GetModuleIdList(out_count.GetPointer(), out_buf.GetPointer(), out_buf.GetSize(), unused); } - Result DebugMonitorServiceBase::GetExceptionProcessIdList(sf::Out out_count, const sf::OutArray &out_process_ids) { + Result DebugMonitorService::GetExceptionProcessIdList(sf::Out out_count, const sf::OutArray &out_process_ids) { R_UNLESS(out_process_ids.GetSize() <= std::numeric_limits::max(), pm::ResultInvalidSize()); return impl::GetExceptionProcessIdList(out_count.GetPointer(), out_process_ids.GetPointer(), out_process_ids.GetSize()); } - Result DebugMonitorServiceBase::StartProcess(os::ProcessId process_id) { + Result DebugMonitorService::StartProcess(os::ProcessId process_id) { return impl::StartProcess(process_id); } - Result DebugMonitorServiceBase::GetProcessId(sf::Out out, ncm::ProgramId program_id) { + Result DebugMonitorService::GetProcessId(sf::Out out, ncm::ProgramId program_id) { return impl::GetProcessId(out.GetPointer(), program_id); } - Result DebugMonitorServiceBase::HookToCreateProcess(sf::OutCopyHandle out_hook, ncm::ProgramId program_id) { + Result DebugMonitorService::HookToCreateProcess(sf::OutCopyHandle out_hook, ncm::ProgramId program_id) { return impl::HookToCreateProcess(out_hook.GetHandlePointer(), program_id); } - Result DebugMonitorServiceBase::GetApplicationProcessId(sf::Out out) { + Result DebugMonitorService::GetApplicationProcessId(sf::Out out) { return impl::GetApplicationProcessId(out.GetPointer()); } - Result DebugMonitorServiceBase::HookToCreateApplicationProcess(sf::OutCopyHandle out_hook) { + Result DebugMonitorService::HookToCreateApplicationProcess(sf::OutCopyHandle out_hook) { return impl::HookToCreateApplicationProcess(out_hook.GetHandlePointer()); } - Result DebugMonitorServiceBase::ClearHook(u32 which) { + Result DebugMonitorService::ClearHook(u32 which) { return impl::ClearHook(which); } /* Atmosphere extension commands. */ - Result DebugMonitorServiceBase::AtmosphereGetProcessInfo(sf::OutCopyHandle out_process_handle, sf::Out out_loc, sf::Out out_status, os::ProcessId process_id) { + Result DebugMonitorService::AtmosphereGetProcessInfo(sf::OutCopyHandle out_process_handle, sf::Out out_loc, sf::Out out_status, os::ProcessId process_id) { return impl::AtmosphereGetProcessInfo(out_process_handle.GetHandlePointer(), out_loc.GetPointer(), out_status.GetPointer(), process_id); } - Result DebugMonitorServiceBase::AtmosphereGetCurrentLimitInfo(sf::Out out_cur_val, sf::Out out_lim_val, u32 group, u32 resource) { + Result DebugMonitorService::AtmosphereGetCurrentLimitInfo(sf::Out out_cur_val, sf::Out out_lim_val, u32 group, u32 resource) { return impl::AtmosphereGetCurrentLimitInfo(out_cur_val.GetPointer(), out_lim_val.GetPointer(), group, resource); } diff --git a/stratosphere/pm/source/pm_debug_monitor_service.hpp b/stratosphere/pm/source/pm_debug_monitor_service.hpp index 396d86e20..634e61104 100644 --- a/stratosphere/pm/source/pm_debug_monitor_service.hpp +++ b/stratosphere/pm/source/pm_debug_monitor_service.hpp @@ -16,90 +16,24 @@ #pragma once #include -namespace ams::pm::dmnt { +namespace ams::pm { - class DebugMonitorServiceBase : public sf::IServiceObject { - protected: + class DebugMonitorService final { + public: /* Actual command implementations. */ - virtual Result GetModuleIdList(sf::Out out_count, const sf::OutBuffer &out_buf, u64 unused); - virtual Result GetExceptionProcessIdList(sf::Out out_count, const sf::OutArray &out_process_ids); - virtual Result StartProcess(os::ProcessId process_id); - virtual Result GetProcessId(sf::Out out, ncm::ProgramId program_id); - virtual Result HookToCreateProcess(sf::OutCopyHandle out_hook, ncm::ProgramId program_id); - virtual Result GetApplicationProcessId(sf::Out out); - virtual Result HookToCreateApplicationProcess(sf::OutCopyHandle out_hook); - virtual Result ClearHook(u32 which); + Result GetModuleIdList(sf::Out out_count, const sf::OutBuffer &out_buf, u64 unused); + Result GetExceptionProcessIdList(sf::Out out_count, const sf::OutArray &out_process_ids); + Result StartProcess(os::ProcessId process_id); + Result GetProcessId(sf::Out out, ncm::ProgramId program_id); + Result HookToCreateProcess(sf::OutCopyHandle out_hook, ncm::ProgramId program_id); + Result GetApplicationProcessId(sf::Out out); + Result HookToCreateApplicationProcess(sf::OutCopyHandle out_hook); + Result ClearHook(u32 which); /* Atmosphere extension commands. */ - virtual Result AtmosphereGetProcessInfo(sf::OutCopyHandle out_process_handle, sf::Out out_loc, sf::Out out_status, os::ProcessId process_id); - virtual Result AtmosphereGetCurrentLimitInfo(sf::Out out_cur_val, sf::Out out_lim_val, u32 group, u32 resource); - }; - - /* This represents modern DebugMonitorService (5.0.0+). */ - class DebugMonitorService final : public DebugMonitorServiceBase { - private: - enum class CommandId { - GetExceptionProcessIdList = 0, - StartProcess = 1, - GetProcessId = 2, - HookToCreateProcess = 3, - GetApplicationProcessId = 4, - HookToCreateApplicationProcess = 5, - - ClearHook = 6, - - AtmosphereGetProcessInfo = 65000, - AtmosphereGetCurrentLimitInfo = 65001, - }; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - /* 5.0.0-* */ - MAKE_SERVICE_COMMAND_META(GetExceptionProcessIdList), - MAKE_SERVICE_COMMAND_META(StartProcess), - MAKE_SERVICE_COMMAND_META(GetProcessId), - MAKE_SERVICE_COMMAND_META(HookToCreateProcess), - MAKE_SERVICE_COMMAND_META(GetApplicationProcessId), - MAKE_SERVICE_COMMAND_META(HookToCreateApplicationProcess), - - /* 6.0.0-* */ - MAKE_SERVICE_COMMAND_META(ClearHook, hos::Version_6_0_0), - - /* Atmosphere extensions. */ - MAKE_SERVICE_COMMAND_META(AtmosphereGetProcessInfo), - MAKE_SERVICE_COMMAND_META(AtmosphereGetCurrentLimitInfo), - }; - }; - - /* This represents deprecated DebugMonitorService (1.0.0-4.1.0). */ - class DebugMonitorServiceDeprecated final : public DebugMonitorServiceBase { - private: - enum class CommandId { - GetModuleIdList = 0, - GetExceptionProcessIdList = 1, - StartProcess = 2, - GetProcessId = 3, - HookToCreateProcess = 4, - GetApplicationProcessId = 5, - HookToCreateApplicationProcess = 6, - - AtmosphereGetProcessInfo = 65000, - AtmosphereGetCurrentLimitInfo = 65001, - }; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - /* 1.0.0-4.1.0 */ - MAKE_SERVICE_COMMAND_META(GetModuleIdList), - MAKE_SERVICE_COMMAND_META(GetExceptionProcessIdList), - MAKE_SERVICE_COMMAND_META(StartProcess), - MAKE_SERVICE_COMMAND_META(GetProcessId), - MAKE_SERVICE_COMMAND_META(HookToCreateProcess), - MAKE_SERVICE_COMMAND_META(GetApplicationProcessId), - MAKE_SERVICE_COMMAND_META(HookToCreateApplicationProcess), - - /* Atmosphere extensions. */ - MAKE_SERVICE_COMMAND_META(AtmosphereGetProcessInfo), - MAKE_SERVICE_COMMAND_META(AtmosphereGetCurrentLimitInfo), - }; + Result AtmosphereGetProcessInfo(sf::OutCopyHandle out_process_handle, sf::Out out_loc, sf::Out out_status, os::ProcessId process_id); + Result AtmosphereGetCurrentLimitInfo(sf::Out out_cur_val, sf::Out out_lim_val, u32 group, u32 resource); }; + static_assert(pm::impl::IsIDebugMonitorInterface); } diff --git a/stratosphere/pm/source/pm_info_service.cpp b/stratosphere/pm/source/pm_info_service.cpp index d6e34e30b..5ec0f23a4 100644 --- a/stratosphere/pm/source/pm_info_service.cpp +++ b/stratosphere/pm/source/pm_info_service.cpp @@ -17,11 +17,15 @@ #include "pm_info_service.hpp" #include "impl/pm_process_manager.hpp" -namespace ams::pm::info { +namespace ams::pm { /* Overrides for libstratosphere pm::info commands. */ - Result HasLaunchedProgram(bool *out, ncm::ProgramId program_id) { - return ldr::pm::HasLaunchedProgram(out, program_id); + namespace info { + + Result HasLaunchedProgram(bool *out, ncm::ProgramId program_id) { + return ldr::pm::HasLaunchedProgram(out, program_id); + } + } /* Actual command implementations. */ diff --git a/stratosphere/pm/source/pm_info_service.hpp b/stratosphere/pm/source/pm_info_service.hpp index 70043aeb8..f46639b27 100644 --- a/stratosphere/pm/source/pm_info_service.hpp +++ b/stratosphere/pm/source/pm_info_service.hpp @@ -16,18 +16,10 @@ #pragma once #include -namespace ams::pm::info { +namespace ams::pm { - class InformationService final : public sf::IServiceObject { - private: - enum class CommandId { - GetProgramId = 0, - - AtmosphereGetProcessId = 65000, - AtmosphereHasLaunchedProgram = 65001, - AtmosphereGetProcessInfo = 65002, - }; - private: + class InformationService final { + public: /* Actual command implementations. */ Result GetProgramId(sf::Out out, os::ProcessId process_id); @@ -35,14 +27,7 @@ namespace ams::pm::info { Result AtmosphereGetProcessId(sf::Out out, ncm::ProgramId program_id); Result AtmosphereHasLaunchedProgram(sf::Out out, ncm::ProgramId program_id); Result AtmosphereGetProcessInfo(sf::Out out_loc, sf::Out out_status, os::ProcessId process_id); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetProgramId), - - MAKE_SERVICE_COMMAND_META(AtmosphereGetProcessId), - MAKE_SERVICE_COMMAND_META(AtmosphereHasLaunchedProgram), - MAKE_SERVICE_COMMAND_META(AtmosphereGetProcessInfo), - }; }; + static_assert(pm::impl::IsIInformationInterface); } diff --git a/stratosphere/pm/source/pm_main.cpp b/stratosphere/pm/source/pm_main.cpp index ae7078c32..dedb3f16f 100644 --- a/stratosphere/pm/source/pm_main.cpp +++ b/stratosphere/pm/source/pm_main.cpp @@ -94,8 +94,8 @@ namespace { svc::DebugEventInfo d; while (true) { R_ABORT_UNLESS(svcGetDebugEvent(reinterpret_cast(&d), debug_handle.Get())); - if (d.type == svc::DebugEvent_AttachProcess) { - return ncm::ProgramId{d.info.attach_process.program_id}; + if (d.type == svc::DebugEvent_CreateProcess) { + return ncm::ProgramId{d.info.create_process.program_id}; } } } @@ -196,14 +196,14 @@ int main(int argc, char **argv) /* NOTE: Extra sessions have been added to pm:bm and pm:info to facilitate access by the rest of stratosphere. */ /* Also Note: PM was rewritten in 5.0.0, so the shell and dmnt services are different before/after. */ if (hos::GetVersion() >= hos::Version_5_0_0) { - R_ABORT_UNLESS((g_server_manager.RegisterServer(ShellServiceName, ShellMaxSessions))); - R_ABORT_UNLESS((g_server_manager.RegisterServer(DebugMonitorServiceName, DebugMonitorMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(ShellServiceName, ShellMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(DebugMonitorServiceName, DebugMonitorMaxSessions))); } else { - R_ABORT_UNLESS((g_server_manager.RegisterServer(ShellServiceName, ShellMaxSessions))); - R_ABORT_UNLESS((g_server_manager.RegisterServer(DebugMonitorServiceName, DebugMonitorMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(ShellServiceName, ShellMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(DebugMonitorServiceName, DebugMonitorMaxSessions))); } - R_ABORT_UNLESS((g_server_manager.RegisterServer(BootModeServiceName, BootModeMaxSessions))); - R_ABORT_UNLESS((g_server_manager.RegisterServer(InformationServiceName, InformationMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(BootModeServiceName, BootModeMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(InformationServiceName, InformationMaxSessions))); /* Loop forever, servicing our services. */ g_server_manager.LoopProcess(); diff --git a/stratosphere/pm/source/pm_shell_service.cpp b/stratosphere/pm/source/pm_shell_service.cpp index 939066dea..491c3169f 100644 --- a/stratosphere/pm/source/pm_shell_service.cpp +++ b/stratosphere/pm/source/pm_shell_service.cpp @@ -17,59 +17,63 @@ #include "pm_shell_service.hpp" #include "impl/pm_process_manager.hpp" -namespace ams::pm::shell { +namespace ams::pm { /* Overrides for libstratosphere pm::shell commands. */ - Result LaunchProgram(os::ProcessId *out_process_id, const ncm::ProgramLocation &loc, u32 launch_flags) { - return impl::LaunchProgram(out_process_id, loc, launch_flags); + namespace shell { + + Result LaunchProgram(os::ProcessId *out_process_id, const ncm::ProgramLocation &loc, u32 launch_flags) { + return impl::LaunchProgram(out_process_id, loc, launch_flags); + } + } /* Service command implementations. */ - Result ShellServiceBase::LaunchProgram(sf::Out out_process_id, const ncm::ProgramLocation &loc, u32 flags) { + Result ShellService::LaunchProgram(sf::Out out_process_id, const ncm::ProgramLocation &loc, u32 flags) { return pm::shell::LaunchProgram(out_process_id.GetPointer(), loc, flags); } - Result ShellServiceBase::TerminateProcess(os::ProcessId process_id) { + Result ShellService::TerminateProcess(os::ProcessId process_id) { return impl::TerminateProcess(process_id); } - Result ShellServiceBase::TerminateProgram(ncm::ProgramId program_id) { + Result ShellService::TerminateProgram(ncm::ProgramId program_id) { return impl::TerminateProgram(program_id); } - void ShellServiceBase::GetProcessEventHandle(sf::OutCopyHandle out) { + void ShellService::GetProcessEventHandle(sf::OutCopyHandle out) { R_ABORT_UNLESS(impl::GetProcessEventHandle(out.GetHandlePointer())); } - void ShellServiceBase::GetProcessEventInfo(sf::Out out) { + void ShellService::GetProcessEventInfo(sf::Out out) { R_ABORT_UNLESS(impl::GetProcessEventInfo(out.GetPointer())); } - Result ShellServiceBase::CleanupProcess(os::ProcessId process_id) { + Result ShellService::CleanupProcess(os::ProcessId process_id) { return impl::CleanupProcess(process_id); } - Result ShellServiceBase::ClearExceptionOccurred(os::ProcessId process_id) { + Result ShellService::ClearExceptionOccurred(os::ProcessId process_id) { return impl::ClearExceptionOccurred(process_id); } - void ShellServiceBase::NotifyBootFinished() { + void ShellService::NotifyBootFinished() { R_ABORT_UNLESS(impl::NotifyBootFinished()); } - Result ShellServiceBase::GetApplicationProcessIdForShell(sf::Out out) { + Result ShellService::GetApplicationProcessIdForShell(sf::Out out) { return impl::GetApplicationProcessId(out.GetPointer()); } - Result ShellServiceBase::BoostSystemMemoryResourceLimit(u64 boost_size) { + Result ShellService::BoostSystemMemoryResourceLimit(u64 boost_size) { return impl::BoostSystemMemoryResourceLimit(boost_size); } - Result ShellServiceBase::BoostApplicationThreadResourceLimit() { + Result ShellService::BoostApplicationThreadResourceLimit() { return impl::BoostApplicationThreadResourceLimit(); } - void ShellServiceBase::GetBootFinishedEventHandle(sf::OutCopyHandle out) { + void ShellService::GetBootFinishedEventHandle(sf::OutCopyHandle out) { R_ABORT_UNLESS(impl::GetBootFinishedEventHandle(out.GetHandlePointer())); } diff --git a/stratosphere/pm/source/pm_shell_service.hpp b/stratosphere/pm/source/pm_shell_service.hpp index 7c0d60c07..8794eedd4 100644 --- a/stratosphere/pm/source/pm_shell_service.hpp +++ b/stratosphere/pm/source/pm_shell_service.hpp @@ -16,91 +16,24 @@ #pragma once #include -namespace ams::pm::shell { +namespace ams::pm { - class ShellServiceBase : public sf::IServiceObject { - protected: + class ShellService final { + public: /* Actual command implementations. */ - virtual Result LaunchProgram(sf::Out out_process_id, const ncm::ProgramLocation &loc, u32 flags); - virtual Result TerminateProcess(os::ProcessId process_id); - virtual Result TerminateProgram(ncm::ProgramId program_id); - virtual void GetProcessEventHandle(sf::OutCopyHandle out); - virtual void GetProcessEventInfo(sf::Out out); - virtual Result CleanupProcess(os::ProcessId process_id); - virtual Result ClearExceptionOccurred(os::ProcessId process_id); - virtual void NotifyBootFinished(); - virtual Result GetApplicationProcessIdForShell(sf::Out out); - virtual Result BoostSystemMemoryResourceLimit(u64 boost_size); - virtual Result BoostApplicationThreadResourceLimit(); - virtual void GetBootFinishedEventHandle(sf::OutCopyHandle out); - }; - - /* This represents modern ShellService (5.0.0+). */ - class ShellService final : public ShellServiceBase { - private: - enum class CommandId { - LaunchProgram = 0, - TerminateProcess = 1, - TerminateProgram = 2, - GetProcessEventHandle = 3, - GetProcessEventInfo = 4, - NotifyBootFinished = 5, - GetApplicationProcessIdForShell = 6, - BoostSystemMemoryResourceLimit = 7, - BoostApplicationThreadResourceLimit = 8, - GetBootFinishedEventHandle = 9, - }; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - /* 5.0.0-* */ - MAKE_SERVICE_COMMAND_META(LaunchProgram), - MAKE_SERVICE_COMMAND_META(TerminateProcess), - MAKE_SERVICE_COMMAND_META(TerminateProgram), - MAKE_SERVICE_COMMAND_META(GetProcessEventHandle), - MAKE_SERVICE_COMMAND_META(GetProcessEventInfo), - MAKE_SERVICE_COMMAND_META(NotifyBootFinished), - MAKE_SERVICE_COMMAND_META(GetApplicationProcessIdForShell), - MAKE_SERVICE_COMMAND_META(BoostSystemMemoryResourceLimit), - - /* 7.0.0-* */ - MAKE_SERVICE_COMMAND_META(BoostApplicationThreadResourceLimit, hos::Version_7_0_0), - - /* 8.0.0-* */ - MAKE_SERVICE_COMMAND_META(GetBootFinishedEventHandle, hos::Version_8_0_0), - }; - }; - - /* This represents deprecated ShellService (1.0.0-4.1.0). */ - class ShellServiceDeprecated final : public ShellServiceBase { - private: - enum class CommandId { - LaunchProgram = 0, - TerminateProcess = 1, - TerminateProgram = 2, - GetProcessEventHandle = 3, - GetProcessEventInfo = 4, - CleanupProcess = 5, - ClearExceptionOccurred = 6, - NotifyBootFinished = 7, - GetApplicationProcessIdForShell = 8, - BoostSystemMemoryResourceLimit = 9, - }; - public: - DEFINE_SERVICE_DISPATCH_TABLE { - /* 1.0.0-4.1.0 */ - MAKE_SERVICE_COMMAND_META(LaunchProgram), - MAKE_SERVICE_COMMAND_META(TerminateProcess), - MAKE_SERVICE_COMMAND_META(TerminateProgram), - MAKE_SERVICE_COMMAND_META(GetProcessEventHandle), - MAKE_SERVICE_COMMAND_META(GetProcessEventInfo), - MAKE_SERVICE_COMMAND_META(CleanupProcess), - MAKE_SERVICE_COMMAND_META(ClearExceptionOccurred), - MAKE_SERVICE_COMMAND_META(NotifyBootFinished), - MAKE_SERVICE_COMMAND_META(GetApplicationProcessIdForShell), - - /* 4.0.0-4.1.0 */ - MAKE_SERVICE_COMMAND_META(BoostSystemMemoryResourceLimit, hos::Version_4_0_0), - }; + Result LaunchProgram(sf::Out out_process_id, const ncm::ProgramLocation &loc, u32 flags); + Result TerminateProcess(os::ProcessId process_id); + Result TerminateProgram(ncm::ProgramId program_id); + void GetProcessEventHandle(sf::OutCopyHandle out); + void GetProcessEventInfo(sf::Out out); + Result CleanupProcess(os::ProcessId process_id); + Result ClearExceptionOccurred(os::ProcessId process_id); + void NotifyBootFinished(); + Result GetApplicationProcessIdForShell(sf::Out out); + Result BoostSystemMemoryResourceLimit(u64 boost_size); + Result BoostApplicationThreadResourceLimit(); + void GetBootFinishedEventHandle(sf::OutCopyHandle out); }; + static_assert(pm::impl::IsIShellInterface); } diff --git a/stratosphere/ro/source/impl/ro_service_impl.cpp b/stratosphere/ro/source/impl/ro_service_impl.cpp index 3fde492fa..5568b0f43 100644 --- a/stratosphere/ro/source/impl/ro_service_impl.cpp +++ b/stratosphere/ro/source/impl/ro_service_impl.cpp @@ -413,7 +413,7 @@ namespace ams::ro::impl { } /* Service implementations. */ - Result LoadNrr(size_t context_id, Handle process_h, u64 nrr_address, u64 nrr_size, ModuleType expected_type, bool enforce_type) { + Result RegisterModuleInfo(size_t context_id, Handle process_h, u64 nrr_address, u64 nrr_size, ModuleType expected_type, bool enforce_type) { /* Get context. */ ProcessContext *context = GetContextById(context_id); AMS_ABORT_UNLESS(context != nullptr); @@ -454,7 +454,7 @@ namespace ams::ro::impl { return ResultSuccess(); } - Result UnloadNrr(size_t context_id, u64 nrr_address) { + Result UnregisterModuleInfo(size_t context_id, u64 nrr_address) { /* Get context. */ ProcessContext *context = GetContextById(context_id); AMS_ABORT_UNLESS(context != nullptr); @@ -476,7 +476,7 @@ namespace ams::ro::impl { return UnmapNrr(context->process_handle, nrr_backup.mapped_header, nrr_backup.nrr_heap_address, nrr_backup.nrr_heap_size, nrr_backup.mapped_code_address); } - Result LoadNro(u64 *out_address, size_t context_id, u64 nro_address, u64 nro_size, u64 bss_address, u64 bss_size) { + Result MapManualLoadModuleMemory(u64 *out_address, size_t context_id, u64 nro_address, u64 nro_size, u64 bss_address, u64 bss_size) { /* Get context. */ ProcessContext *context = GetContextById(context_id); AMS_ABORT_UNLESS(context != nullptr); @@ -522,7 +522,7 @@ namespace ams::ro::impl { return ResultSuccess(); } - Result UnloadNro(size_t context_id, u64 nro_address) { + Result UnmapManualLoadModuleMemory(size_t context_id, u64 nro_address) { /* Get context. */ ProcessContext *context = GetContextById(context_id); AMS_ABORT_UNLESS(context != nullptr); diff --git a/stratosphere/ro/source/impl/ro_service_impl.hpp b/stratosphere/ro/source/impl/ro_service_impl.hpp index 01ad09e67..a8f45456d 100644 --- a/stratosphere/ro/source/impl/ro_service_impl.hpp +++ b/stratosphere/ro/source/impl/ro_service_impl.hpp @@ -35,10 +35,10 @@ namespace ams::ro::impl { void UnregisterProcess(size_t context_id); /* Service implementations. */ - Result LoadNrr(size_t context_id, Handle process_h, u64 nrr_address, u64 nrr_size, ModuleType expected_type, bool enforce_type); - Result UnloadNrr(size_t context_id, u64 nrr_address); - Result LoadNro(u64 *out_address, size_t context_id, u64 nro_address, u64 nro_size, u64 bss_address, u64 bss_size); - Result UnloadNro(size_t context_id, u64 nro_address); + Result RegisterModuleInfo(size_t context_id, Handle process_h, u64 nrr_address, u64 nrr_size, ModuleType expected_type, bool enforce_type); + Result UnregisterModuleInfo(size_t context_id, u64 nrr_address); + Result MapManualLoadModuleMemory(u64 *out_address, size_t context_id, u64 nro_address, u64 nro_size, u64 bss_address, u64 bss_size); + Result UnmapManualLoadModuleMemory(size_t context_id, u64 nro_address); /* Debug service implementations. */ Result GetProcessModuleInfo(u32 *out_count, LoaderModuleInfo *out_infos, size_t max_out_count, os::ProcessId process_id); diff --git a/stratosphere/ro/source/ro_debug_monitor.cpp b/stratosphere/ro/source/ro_debug_monitor_service.cpp similarity index 96% rename from stratosphere/ro/source/ro_debug_monitor.cpp rename to stratosphere/ro/source/ro_debug_monitor_service.cpp index 2cb4aa8ee..21a105109 100644 --- a/stratosphere/ro/source/ro_debug_monitor.cpp +++ b/stratosphere/ro/source/ro_debug_monitor_service.cpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #include -#include "ro_debug_monitor.hpp" +#include "ro_debug_monitor_service.hpp" #include "impl/ro_service_impl.hpp" namespace ams::ro { diff --git a/stratosphere/ro/source/ro_debug_monitor.hpp b/stratosphere/ro/source/ro_debug_monitor_service.hpp similarity index 71% rename from stratosphere/ro/source/ro_debug_monitor.hpp rename to stratosphere/ro/source/ro_debug_monitor_service.hpp index f86eb00d6..58b634e43 100644 --- a/stratosphere/ro/source/ro_debug_monitor.hpp +++ b/stratosphere/ro/source/ro_debug_monitor_service.hpp @@ -18,18 +18,10 @@ namespace ams::ro { - class DebugMonitorService final : public sf::IServiceObject { - protected: - enum class CommandId { - GetProcessModuleInfo = 0, - }; - private: - /* Actual commands. */ - Result GetProcessModuleInfo(sf::Out out_count, const sf::OutArray &out_infos, os::ProcessId process_id); + class DebugMonitorService final { public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetProcessModuleInfo), - }; + Result GetProcessModuleInfo(sf::Out out_count, const sf::OutArray &out_infos, os::ProcessId process_id); }; + static_assert(ro::impl::IsIDebugMonitorInterface); } diff --git a/stratosphere/ro/source/ro_main.cpp b/stratosphere/ro/source/ro_main.cpp index 2a0a8034c..f3de32bb9 100644 --- a/stratosphere/ro/source/ro_main.cpp +++ b/stratosphere/ro/source/ro_main.cpp @@ -14,8 +14,8 @@ * along with this program. If not, see . */ #include -#include "ro_debug_monitor.hpp" -#include "ro_service.hpp" +#include "ro_debug_monitor_service.hpp" +#include "ro_ro_service.hpp" extern "C" { extern u32 __start__; @@ -84,10 +84,6 @@ void __appExit(void) { setsysExit(); } -/* Helpers to create RO objects. */ -static constexpr auto MakeRoServiceForSelf = []() { return std::make_shared(ro::ModuleType::ForSelf); }; -static constexpr auto MakeRoServiceForOthers = []() { return std::make_shared(ro::ModuleType::ForOthers); }; - namespace { /* ldr:ro, ro:dmnt, ro:1. */ @@ -122,11 +118,11 @@ int main(int argc, char **argv) } /* Create services. */ - R_ABORT_UNLESS((g_server_manager.RegisterServer(DebugMonitorServiceName, DebugMonitorMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(DebugMonitorServiceName, DebugMonitorMaxSessions))); - R_ABORT_UNLESS((g_server_manager.RegisterServer(ForSelfServiceName, ForSelfMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(ForSelfServiceName, ForSelfMaxSessions))); if (hos::GetVersion() >= hos::Version_7_0_0) { - R_ABORT_UNLESS((g_server_manager.RegisterServer(ForOthersServiceName, ForOthersMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(ForOthersServiceName, ForOthersMaxSessions))); } /* Loop forever, servicing our services. */ diff --git a/stratosphere/ro/source/ro_ro_service.cpp b/stratosphere/ro/source/ro_ro_service.cpp new file mode 100644 index 000000000..47eb27b02 --- /dev/null +++ b/stratosphere/ro/source/ro_ro_service.cpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "ro_ro_service.hpp" +#include "impl/ro_service_impl.hpp" + +namespace ams::ro { + + void SetDevelopmentHardware(bool is_development_hardware) { + impl::SetDevelopmentHardware(is_development_hardware); + } + + void SetDevelopmentFunctionEnabled(bool is_development_function_enabled) { + impl::SetDevelopmentFunctionEnabled(is_development_function_enabled); + } + + RoService::RoService(ModuleType t) : context_id(impl::InvalidContextId), type(t) { + /* ... */ + } + + RoService::~RoService() { + impl::UnregisterProcess(this->context_id); + } + + Result RoService::MapManualLoadModuleMemory(sf::Out load_address, const sf::ClientProcessId &client_pid, u64 nro_address, u64 nro_size, u64 bss_address, u64 bss_size) { + R_TRY(impl::ValidateProcess(this->context_id, client_pid.GetValue())); + return impl::MapManualLoadModuleMemory(load_address.GetPointer(), this->context_id, nro_address, nro_size, bss_address, bss_size); + } + + Result RoService::UnmapManualLoadModuleMemory(const sf::ClientProcessId &client_pid, u64 nro_address) { + R_TRY(impl::ValidateProcess(this->context_id, client_pid.GetValue())); + return impl::UnmapManualLoadModuleMemory(this->context_id, nro_address); + } + + Result RoService::RegisterModuleInfo(const sf::ClientProcessId &client_pid, u64 nrr_address, u64 nrr_size) { + R_TRY(impl::ValidateProcess(this->context_id, client_pid.GetValue())); + return impl::RegisterModuleInfo(this->context_id, svc::InvalidHandle, nrr_address, nrr_size, ModuleType::ForSelf, true); + } + + Result RoService::UnregisterModuleInfo(const sf::ClientProcessId &client_pid, u64 nrr_address) { + R_TRY(impl::ValidateProcess(this->context_id, client_pid.GetValue())); + return impl::UnregisterModuleInfo(this->context_id, nrr_address); + } + + Result RoService::RegisterProcessHandle(const sf::ClientProcessId &client_pid, sf::CopyHandle process_h) { + return impl::RegisterProcess(std::addressof(this->context_id), process_h.GetValue(), client_pid.GetValue()); + } + + Result RoService::RegisterModuleInfoEx(const sf::ClientProcessId &client_pid, u64 nrr_address, u64 nrr_size, sf::CopyHandle process_h) { + R_TRY(impl::ValidateProcess(this->context_id, client_pid.GetValue())); + return impl::RegisterModuleInfo(this->context_id, process_h.GetValue(), nrr_address, nrr_size, this->type, this->type == ModuleType::ForOthers); + } + +} diff --git a/stratosphere/ro/source/ro_ro_service.hpp b/stratosphere/ro/source/ro_ro_service.hpp new file mode 100644 index 000000000..c3dd6d15b --- /dev/null +++ b/stratosphere/ro/source/ro_ro_service.hpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include + +namespace ams::ro { + + /* Access utilities. */ + void SetDevelopmentHardware(bool is_development_hardware); + void SetDevelopmentFunctionEnabled(bool is_development_function_enabled); + + class RoService { + private: + size_t context_id; + ModuleType type; + protected: + explicit RoService(ModuleType t); + public: + virtual ~RoService(); + public: + /* Actual commands. */ + Result MapManualLoadModuleMemory(sf::Out out_load_address, const sf::ClientProcessId &client_pid, u64 nro_address, u64 nro_size, u64 bss_address, u64 bss_size); + Result UnmapManualLoadModuleMemory(const sf::ClientProcessId &client_pid, u64 nro_address); + Result RegisterModuleInfo(const sf::ClientProcessId &client_pid, u64 nrr_address, u64 nrr_size); + Result UnregisterModuleInfo(const sf::ClientProcessId &client_pid, u64 nrr_address); + Result RegisterProcessHandle(const sf::ClientProcessId &client_pid, sf::CopyHandle process_h); + Result RegisterModuleInfoEx(const sf::ClientProcessId &client_pid, u64 nrr_address, u64 nrr_size, sf::CopyHandle process_h); + }; + static_assert(ro::impl::IsIRoInterface); + + class RoServiceForSelf final : public RoService { + public: + RoServiceForSelf() : RoService(ro::ModuleType::ForSelf) { /* ... */ } + }; + + /* TODO: This is really JitPlugin... */ + class RoServiceForOthers final : public RoService { + public: + RoServiceForOthers() : RoService(ro::ModuleType::ForOthers) { /* ... */ } + }; + +} diff --git a/stratosphere/ro/source/ro_service.cpp b/stratosphere/ro/source/ro_service.cpp deleted file mode 100644 index 96d38519f..000000000 --- a/stratosphere/ro/source/ro_service.cpp +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright (c) 2018-2020 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#include -#include "ro_service.hpp" -#include "impl/ro_service_impl.hpp" - -namespace ams::ro { - - void SetDevelopmentHardware(bool is_development_hardware) { - impl::SetDevelopmentHardware(is_development_hardware); - } - - void SetDevelopmentFunctionEnabled(bool is_development_function_enabled) { - impl::SetDevelopmentFunctionEnabled(is_development_function_enabled); - } - - Service::Service(ModuleType t) : context_id(impl::InvalidContextId), type(t) { - /* ... */ - } - - Service::~Service() { - impl::UnregisterProcess(this->context_id); - } - - Result Service::LoadNro(sf::Out load_address, const sf::ClientProcessId &client_pid, u64 nro_address, u64 nro_size, u64 bss_address, u64 bss_size) { - R_TRY(impl::ValidateProcess(this->context_id, client_pid.GetValue())); - return impl::LoadNro(load_address.GetPointer(), this->context_id, nro_address, nro_size, bss_address, bss_size); - } - - Result Service::UnloadNro(const sf::ClientProcessId &client_pid, u64 nro_address) { - R_TRY(impl::ValidateProcess(this->context_id, client_pid.GetValue())); - return impl::UnloadNro(this->context_id, nro_address); - } - - Result Service::LoadNrr(const sf::ClientProcessId &client_pid, u64 nrr_address, u64 nrr_size) { - R_TRY(impl::ValidateProcess(this->context_id, client_pid.GetValue())); - return impl::LoadNrr(this->context_id, INVALID_HANDLE, nrr_address, nrr_size, ModuleType::ForSelf, true); - } - - Result Service::UnloadNrr(const sf::ClientProcessId &client_pid, u64 nrr_address) { - R_TRY(impl::ValidateProcess(this->context_id, client_pid.GetValue())); - return impl::UnloadNrr(this->context_id, nrr_address); - } - - Result Service::Initialize(const sf::ClientProcessId &client_pid, sf::CopyHandle process_h) { - return impl::RegisterProcess(&this->context_id, process_h.GetValue(), client_pid.GetValue()); - } - - Result Service::LoadNrrEx(const sf::ClientProcessId &client_pid, u64 nrr_address, u64 nrr_size, sf::CopyHandle process_h) { - R_TRY(impl::ValidateProcess(this->context_id, client_pid.GetValue())); - return impl::LoadNrr(this->context_id, process_h.GetValue(), nrr_address, nrr_size, this->type, this->type == ModuleType::ForOthers); - } - -} diff --git a/stratosphere/ro/source/ro_service.hpp b/stratosphere/ro/source/ro_service.hpp deleted file mode 100644 index 6cc943ef9..000000000 --- a/stratosphere/ro/source/ro_service.hpp +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2018-2020 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#pragma once -#include - -namespace ams::ro { - - /* Access utilities. */ - void SetDevelopmentHardware(bool is_development_hardware); - void SetDevelopmentFunctionEnabled(bool is_development_function_enabled); - - class Service final : public sf::IServiceObject { - protected: - enum class CommandId { - LoadNro = 0, - UnloadNro = 1, - LoadNrr = 2, - UnloadNrr = 3, - Initialize = 4, - LoadNrrEx = 10, - }; - private: - size_t context_id; - ModuleType type; - public: - explicit Service(ModuleType t); - virtual ~Service(); - private: - /* Actual commands. */ - Result LoadNro(sf::Out out_load_address, const sf::ClientProcessId &client_pid, u64 nro_address, u64 nro_size, u64 bss_address, u64 bss_size); - Result UnloadNro(const sf::ClientProcessId &client_pid, u64 nro_address); - Result LoadNrr(const sf::ClientProcessId &client_pid, u64 nrr_address, u64 nrr_size); - Result UnloadNrr(const sf::ClientProcessId &client_pid, u64 nrr_address); - Result Initialize(const sf::ClientProcessId &client_pid, sf::CopyHandle process_h); - Result LoadNrrEx(const sf::ClientProcessId &client_pid, u64 nrr_address, u64 nrr_size, sf::CopyHandle process_h); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(LoadNro), - MAKE_SERVICE_COMMAND_META(UnloadNro), - MAKE_SERVICE_COMMAND_META(LoadNrr), - MAKE_SERVICE_COMMAND_META(UnloadNrr), - MAKE_SERVICE_COMMAND_META(Initialize), - MAKE_SERVICE_COMMAND_META(LoadNrrEx, hos::Version_7_0_0), - }; - - }; - -} diff --git a/stratosphere/sm/source/impl/sm_service_manager.cpp b/stratosphere/sm/source/impl/sm_service_manager.cpp index 33812ddb3..603a1d976 100644 --- a/stratosphere/sm/source/impl/sm_service_manager.cpp +++ b/stratosphere/sm/source/impl/sm_service_manager.cpp @@ -364,8 +364,8 @@ namespace ams::sm::impl { /* Create both handles. */ { os::ManagedHandle fwd_hnd, hnd; - R_TRY(svcConnectToPort(fwd_hnd.GetPointer(), service_info->port_h.Get())); - R_TRY(svcConnectToPort(hnd.GetPointer(), service_info->mitm_port_h.Get())); + R_ABORT_UNLESS(svcConnectToPort(fwd_hnd.GetPointer(), service_info->port_h.Get())); + R_ABORT_UNLESS(svcConnectToPort(hnd.GetPointer(), service_info->mitm_port_h.Get())); service_info->mitm_fwd_sess_h = std::move(fwd_hnd); *out = hnd.Move(); } diff --git a/stratosphere/sm/source/sm_dmnt_service.cpp b/stratosphere/sm/source/sm_debug_monitor_service.cpp similarity index 73% rename from stratosphere/sm/source/sm_dmnt_service.cpp rename to stratosphere/sm/source/sm_debug_monitor_service.cpp index 7ee32f363..4622bbb4d 100644 --- a/stratosphere/sm/source/sm_dmnt_service.cpp +++ b/stratosphere/sm/source/sm_debug_monitor_service.cpp @@ -14,20 +14,20 @@ * along with this program. If not, see . */ #include -#include "sm_dmnt_service.hpp" +#include "sm_debug_monitor_service.hpp" #include "impl/sm_service_manager.hpp" namespace ams::sm { - Result DmntService::AtmosphereGetRecord(sf::Out record, ServiceName service) { + Result DebugMonitorService::AtmosphereGetRecord(sf::Out record, ServiceName service) { return impl::GetServiceRecord(record.GetPointer(), service); } - void DmntService::AtmosphereListRecords(const sf::OutArray &records, sf::Out out_count, u64 offset) { + void DebugMonitorService::AtmosphereListRecords(const sf::OutArray &records, sf::Out out_count, u64 offset) { R_ABORT_UNLESS(impl::ListServiceRecords(records.GetPointer(), out_count.GetPointer(), offset, records.GetSize())); } - void DmntService::AtmosphereGetRecordSize(sf::Out record_size) { + void DebugMonitorService::AtmosphereGetRecordSize(sf::Out record_size) { record_size.SetValue(sizeof(ServiceRecord)); } diff --git a/stratosphere/sm/source/sm_debug_monitor_service.hpp b/stratosphere/sm/source/sm_debug_monitor_service.hpp new file mode 100644 index 000000000..9b405e769 --- /dev/null +++ b/stratosphere/sm/source/sm_debug_monitor_service.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include + +namespace ams::sm { + + /* Service definition. */ + class DebugMonitorService final { + public: + Result AtmosphereGetRecord(sf::Out record, ServiceName service); + void AtmosphereListRecords(const sf::OutArray &records, sf::Out out_count, u64 offset); + void AtmosphereGetRecordSize(sf::Out record_size); + }; + static_assert(sm::impl::IsIDebugMonitorInterface); + +} diff --git a/stratosphere/sm/source/sm_dmnt_service.hpp b/stratosphere/sm/source/sm_dmnt_service.hpp deleted file mode 100644 index 08a1c2ac6..000000000 --- a/stratosphere/sm/source/sm_dmnt_service.hpp +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright (c) 2018-2020 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#pragma once -#include - -namespace ams::sm { - - /* Service definition. */ - class DmntService final : public sf::IServiceObject { - protected: - /* Command IDs. */ - enum class CommandId { - AtmosphereGetRecord = 65000, - AtmosphereListRecords = 65001, - AtmosphereGetRecordSize = 65002, - }; - private: - /* Actual commands. */ - virtual Result AtmosphereGetRecord(sf::Out record, ServiceName service); - virtual void AtmosphereListRecords(const sf::OutArray &records, sf::Out out_count, u64 offset); - virtual void AtmosphereGetRecordSize(sf::Out record_size); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(AtmosphereGetRecord), - MAKE_SERVICE_COMMAND_META(AtmosphereListRecords), - MAKE_SERVICE_COMMAND_META(AtmosphereGetRecordSize), - }; - }; - -} diff --git a/stratosphere/sm/source/sm_main.cpp b/stratosphere/sm/source/sm_main.cpp index eb2c56dfc..2916c8e51 100644 --- a/stratosphere/sm/source/sm_main.cpp +++ b/stratosphere/sm/source/sm_main.cpp @@ -16,7 +16,7 @@ #include #include "sm_user_service.hpp" #include "sm_manager_service.hpp" -#include "sm_dmnt_service.hpp" +#include "sm_debug_monitor_service.hpp" #include "impl/sm_service_manager.hpp" extern "C" { @@ -101,14 +101,14 @@ int main(int argc, char **argv) { Handle sm_h; R_ABORT_UNLESS(svcManageNamedPort(&sm_h, "sm:", 0x40)); - g_server_manager.RegisterServer(sm_h); + g_server_manager.RegisterServer(sm_h); } /* Create sm:m manually. */ { Handle smm_h; R_ABORT_UNLESS(sm::impl::RegisterServiceForSelf(&smm_h, sm::ServiceName::Encode("sm:m"), 1)); - g_server_manager.RegisterServer(smm_h); + g_server_manager.RegisterServer(smm_h); } /*===== ATMOSPHERE EXTENSION =====*/ @@ -116,7 +116,7 @@ int main(int argc, char **argv) { Handle smdmnt_h; R_ABORT_UNLESS(sm::impl::RegisterServiceForSelf(&smdmnt_h, sm::ServiceName::Encode("sm:dmnt"), 1)); - g_server_manager.RegisterServer(smdmnt_h); + g_server_manager.RegisterServer(smdmnt_h); } /*================================*/ diff --git a/stratosphere/sm/source/sm_manager_service.hpp b/stratosphere/sm/source/sm_manager_service.hpp index 05340baa4..1906c1824 100644 --- a/stratosphere/sm/source/sm_manager_service.hpp +++ b/stratosphere/sm/source/sm_manager_service.hpp @@ -19,33 +19,14 @@ namespace ams::sm { /* Service definition. */ - class ManagerService final : public sf::IServiceObject { - protected: - /* Command IDs. */ - enum class CommandId { - RegisterProcess = 0, - UnregisterProcess = 1, - - AtmosphereEndInitDefers = 65000, - AtmosphereHasMitm = 65001, - AtmosphereRegisterProcess = 65002, - }; - private: - /* Actual commands. */ - virtual Result RegisterProcess(os::ProcessId process_id, const sf::InBuffer &acid_sac, const sf::InBuffer &aci_sac); - virtual Result UnregisterProcess(os::ProcessId process_id); - virtual void AtmosphereEndInitDefers(); - virtual void AtmosphereHasMitm(sf::Out out, ServiceName service); - virtual Result AtmosphereRegisterProcess(os::ProcessId process_id, ncm::ProgramId program_id, cfg::OverrideStatus override_status, const sf::InBuffer &acid_sac, const sf::InBuffer &aci_sac); + class ManagerService final { public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(RegisterProcess), - MAKE_SERVICE_COMMAND_META(UnregisterProcess), - - MAKE_SERVICE_COMMAND_META(AtmosphereEndInitDefers), - MAKE_SERVICE_COMMAND_META(AtmosphereHasMitm), - MAKE_SERVICE_COMMAND_META(AtmosphereRegisterProcess), - }; + Result RegisterProcess(os::ProcessId process_id, const sf::InBuffer &acid_sac, const sf::InBuffer &aci_sac); + Result UnregisterProcess(os::ProcessId process_id); + void AtmosphereEndInitDefers(); + void AtmosphereHasMitm(sf::Out out, ServiceName service); + Result AtmosphereRegisterProcess(os::ProcessId process_id, ncm::ProgramId program_id, cfg::OverrideStatus override_status, const sf::InBuffer &acid_sac, const sf::InBuffer &aci_sac); }; + static_assert(sm::impl::IsIManagerInterface); } diff --git a/stratosphere/sm/source/sm_user_service.cpp b/stratosphere/sm/source/sm_user_service.cpp index f36c661e7..67143af19 100644 --- a/stratosphere/sm/source/sm_user_service.cpp +++ b/stratosphere/sm/source/sm_user_service.cpp @@ -19,20 +19,18 @@ namespace ams::sm { - Result UserService::Initialize(const sf::ClientProcessId &client_process_id) { + Result UserService::RegisterClient(const sf::ClientProcessId &client_process_id) { this->process_id = client_process_id.GetValue(); this->has_initialized = true; return ResultSuccess(); } Result UserService::EnsureInitialized() { - if (!this->has_initialized) { - return sm::ResultInvalidClient(); - } + R_UNLESS(this->has_initialized, sm::ResultInvalidClient()); return ResultSuccess(); } - Result UserService::GetService(sf::OutMoveHandle out_h, ServiceName service) { + Result UserService::GetServiceHandle(sf::OutMoveHandle out_h, ServiceName service) { R_TRY(this->EnsureInitialized()); return impl::GetServiceHandle(out_h.GetHandlePointer(), this->process_id, service); } diff --git a/stratosphere/sm/source/sm_user_service.hpp b/stratosphere/sm/source/sm_user_service.hpp index 341760f8f..bc2c02f3f 100644 --- a/stratosphere/sm/source/sm_user_service.hpp +++ b/stratosphere/sm/source/sm_user_service.hpp @@ -20,26 +20,7 @@ namespace ams::sm { /* Service definition. */ - class UserService final : public sf::IServiceObject { - protected: - /* Command IDs. */ - enum class CommandId { - Initialize = 0, - GetService = 1, - RegisterService = 2, - UnregisterService = 3, - - AtmosphereInstallMitm = 65000, - AtmosphereUninstallMitm = 65001, - /* Deprecated: AtmosphereAssociatePidTidForMitm = 65002 */ - AtmosphereAcknowledgeMitmSession = 65003, - AtmosphereHasMitm = 65004, - AtmosphereWaitMitm = 65005, - AtmosphereDeclareFutureMitm = 65006, - - AtmosphereHasService = 65100, - AtmosphereWaitService = 65101, - }; + class UserService final { private: os::ProcessId process_id = os::InvalidProcessId; bool has_initialized = false; @@ -47,38 +28,22 @@ namespace ams::sm { Result EnsureInitialized(); public: /* Official commands. */ - virtual Result Initialize(const sf::ClientProcessId &client_process_id); - virtual Result GetService(sf::OutMoveHandle out_h, ServiceName service); - virtual Result RegisterService(sf::OutMoveHandle out_h, ServiceName service, u32 max_sessions, bool is_light); - virtual Result UnregisterService(ServiceName service); + Result RegisterClient(const sf::ClientProcessId &client_process_id); + Result GetServiceHandle(sf::OutMoveHandle out_h, ServiceName service); + Result RegisterService(sf::OutMoveHandle out_h, ServiceName service, u32 max_sessions, bool is_light); + Result UnregisterService(ServiceName service); /* Atmosphere commands. */ - virtual Result AtmosphereInstallMitm(sf::OutMoveHandle srv_h, sf::OutMoveHandle qry_h, ServiceName service); - virtual Result AtmosphereUninstallMitm(ServiceName service); - virtual Result AtmosphereAcknowledgeMitmSession(sf::Out client_info, sf::OutMoveHandle fwd_h, ServiceName service); - virtual Result AtmosphereHasMitm(sf::Out out, ServiceName service); - virtual Result AtmosphereWaitMitm(ServiceName service); - virtual Result AtmosphereDeclareFutureMitm(ServiceName service); + Result AtmosphereInstallMitm(sf::OutMoveHandle srv_h, sf::OutMoveHandle qry_h, ServiceName service); + Result AtmosphereUninstallMitm(ServiceName service); + Result AtmosphereAcknowledgeMitmSession(sf::Out client_info, sf::OutMoveHandle fwd_h, ServiceName service); + Result AtmosphereHasMitm(sf::Out out, ServiceName service); + Result AtmosphereWaitMitm(ServiceName service); + Result AtmosphereDeclareFutureMitm(ServiceName service); - virtual Result AtmosphereHasService(sf::Out out, ServiceName service); - virtual Result AtmosphereWaitService(ServiceName service); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(Initialize), - MAKE_SERVICE_COMMAND_META(GetService), - MAKE_SERVICE_COMMAND_META(RegisterService), - MAKE_SERVICE_COMMAND_META(UnregisterService), - - MAKE_SERVICE_COMMAND_META(AtmosphereInstallMitm), - MAKE_SERVICE_COMMAND_META(AtmosphereUninstallMitm), - MAKE_SERVICE_COMMAND_META(AtmosphereAcknowledgeMitmSession), - MAKE_SERVICE_COMMAND_META(AtmosphereHasMitm), - MAKE_SERVICE_COMMAND_META(AtmosphereWaitMitm), - MAKE_SERVICE_COMMAND_META(AtmosphereDeclareFutureMitm), - - MAKE_SERVICE_COMMAND_META(AtmosphereHasService), - MAKE_SERVICE_COMMAND_META(AtmosphereWaitService), - }; + Result AtmosphereHasService(sf::Out out, ServiceName service); + Result AtmosphereWaitService(ServiceName service); }; + static_assert(sm::impl::IsIUserInterface); } diff --git a/stratosphere/spl/source/spl_api_impl.cpp b/stratosphere/spl/source/spl_api_impl.cpp index f4c420070..2a5550d12 100644 --- a/stratosphere/spl/source/spl_api_impl.cpp +++ b/stratosphere/spl/source/spl_api_impl.cpp @@ -24,39 +24,39 @@ namespace ams::spl::impl { /* Convenient defines. */ constexpr size_t DeviceAddressSpaceAlign = 0x400000; - constexpr u32 WorkBufferMapBase = 0x80000000u; - constexpr u32 CryptAesInMapBase = 0x90000000u; - constexpr u32 CryptAesOutMapBase = 0xC0000000u; - constexpr size_t CryptAesSizeMax = static_cast(CryptAesOutMapBase - CryptAesInMapBase); + constexpr u32 WorkBufferMapBase = 0x80000000u; + constexpr u32 ComputeAesInMapBase = 0x90000000u; + constexpr u32 ComputeAesOutMapBase = 0xC0000000u; + constexpr size_t ComputeAesSizeMax = static_cast(ComputeAesOutMapBase - ComputeAesInMapBase); constexpr size_t RsaPrivateKeySize = 0x100; - constexpr size_t RsaPrivateKeyMetaSize = 0x30; + constexpr size_t DeviceUniqueDataMetaSize = 0x30; constexpr size_t LabelDigestSizeMax = 0x20; constexpr size_t WorkBufferSizeMax = 0x800; - constexpr s32 MaxPhysicalAesKeyslots = 6; - constexpr s32 MaxPhysicalAesKeyslotsDeprecated = 4; + constexpr s32 MaxPhysicalAesKeySlots = 6; + constexpr s32 MaxPhysicalAesKeySlotsDeprecated = 4; - constexpr s32 MaxVirtualAesKeyslots = 9; + constexpr s32 MaxVirtualAesKeySlots = 9; - /* Keyslot management. */ + /* KeySlot management. */ KeySlotCache g_keyslot_cache; - std::optional g_keyslot_cache_entry[MaxPhysicalAesKeyslots]; + std::optional g_keyslot_cache_entry[MaxPhysicalAesKeySlots]; - inline s32 GetMaxPhysicalKeyslots() { - return (hos::GetVersion() >= hos::Version_6_0_0) ? MaxPhysicalAesKeyslots : MaxPhysicalAesKeyslotsDeprecated; + inline s32 GetMaxPhysicalKeySlots() { + return (hos::GetVersion() >= hos::Version_6_0_0) ? MaxPhysicalAesKeySlots : MaxPhysicalAesKeySlotsDeprecated; } constexpr s32 VirtualKeySlotMin = 16; - constexpr s32 VirtualKeySlotMax = VirtualKeySlotMin + MaxVirtualAesKeyslots - 1; + constexpr s32 VirtualKeySlotMax = VirtualKeySlotMin + MaxVirtualAesKeySlots - 1; constexpr inline bool IsVirtualKeySlot(s32 keyslot) { return VirtualKeySlotMin <= keyslot && keyslot <= VirtualKeySlotMax; } inline bool IsPhysicalKeySlot(s32 keyslot) { - return keyslot < GetMaxPhysicalKeyslots(); + return keyslot < GetMaxPhysicalKeySlots(); } constexpr inline s32 GetVirtualKeySlotIndex(s32 keyslot) { @@ -71,16 +71,16 @@ namespace ams::spl::impl { } void InitializeKeySlotCache() { - for (s32 i = 0; i < MaxPhysicalAesKeyslots; i++) { + for (s32 i = 0; i < MaxPhysicalAesKeySlots; i++) { g_keyslot_cache_entry[i].emplace(i); g_keyslot_cache.AddEntry(std::addressof(g_keyslot_cache_entry[i].value())); } } enum class KeySlotContentType { - None = 0, - AesKey = 1, - TitleKey = 2, + None = 0, + AesKey = 1, + PreparedKey = 2, }; struct KeySlotContents { @@ -92,15 +92,15 @@ namespace ams::spl::impl { } aes_key; struct { AccessKey access_key; - } title_key; + } prepared_key; }; }; - const void *g_keyslot_owners[MaxVirtualAesKeyslots]; - KeySlotContents g_keyslot_contents[MaxVirtualAesKeyslots]; - KeySlotContents g_physical_keyslot_contents_for_backwards_compatibility[MaxPhysicalAesKeyslots]; + const void *g_keyslot_owners[MaxVirtualAesKeySlots]; + KeySlotContents g_keyslot_contents[MaxVirtualAesKeySlots]; + KeySlotContents g_physical_keyslot_contents_for_backwards_compatibility[MaxPhysicalAesKeySlots]; - void ClearPhysicalKeyslot(s32 keyslot) { + void ClearPhysicalKeySlot(s32 keyslot) { AMS_ASSERT(IsPhysicalKeySlot(keyslot)); AccessKey access_key = {}; @@ -139,13 +139,13 @@ namespace ams::spl::impl { if (load) { switch (contents->type) { case KeySlotContentType::None: - ClearPhysicalKeyslot(phys_slot); + ClearPhysicalKeySlot(phys_slot); break; case KeySlotContentType::AesKey: R_ABORT_UNLESS(smc::ConvertResult(smc::LoadAesKey(phys_slot, contents->aes_key.access_key, contents->aes_key.key_source))); break; - case KeySlotContentType::TitleKey: - R_ABORT_UNLESS(smc::ConvertResult(smc::LoadTitleKey(phys_slot, contents->title_key.access_key))); + case KeySlotContentType::PreparedKey: + R_ABORT_UNLESS(smc::ConvertResult(smc::LoadPreparedAesKey(phys_slot, contents->prepared_key.access_key))); break; AMS_UNREACHABLE_DEFAULT_CASE(); } @@ -169,32 +169,32 @@ namespace ams::spl::impl { return ResultSuccess(); } - Result LoadVirtualTitleKey(s32 keyslot, const AccessKey &access_key) { + Result LoadVirtualPreparedAesKey(s32 keyslot, const AccessKey &access_key) { /* Ensure we can load into the slot. */ const s32 phys_slot = GetPhysicalKeySlot(keyslot, false); - R_TRY(smc::ConvertResult(smc::LoadTitleKey(phys_slot, access_key))); + R_TRY(smc::ConvertResult(smc::LoadPreparedAesKey(phys_slot, access_key))); /* Update our contents. */ const s32 index = GetVirtualKeySlotIndex(keyslot); - g_keyslot_contents[index].type = KeySlotContentType::TitleKey; - g_keyslot_contents[index].title_key.access_key = access_key; + g_keyslot_contents[index].type = KeySlotContentType::PreparedKey; + g_keyslot_contents[index].prepared_key.access_key = access_key; return ResultSuccess(); } /* Type definitions. */ - class ScopedAesKeyslot { + class ScopedAesKeySlot { private: s32 slot; bool has_slot; public: - ScopedAesKeyslot() : slot(-1), has_slot(false) { + ScopedAesKeySlot() : slot(-1), has_slot(false) { /* ... */ } - ~ScopedAesKeyslot() { + ~ScopedAesKeySlot() { if (this->has_slot) { - FreeAesKeyslot(slot, this); + DeallocateAesKeySlot(slot, this); } } @@ -203,7 +203,7 @@ namespace ams::spl::impl { } Result Allocate() { - R_TRY(AllocateAesKeyslot(&this->slot, this)); + R_TRY(AllocateAesKeySlot(&this->slot, this)); this->has_slot = true; return ResultSuccess(); } @@ -269,7 +269,7 @@ namespace ams::spl::impl { void InitializeSeEvents() { u64 irq_num; - AMS_ABORT_UNLESS(smc::GetConfig(&irq_num, 1, SplConfigItem_SecurityEngineIrqNumber) == smc::Result::Success); + AMS_ABORT_UNLESS(smc::GetConfig(&irq_num, 1, ConfigItem::SecurityEngineInterruptNumber) == smc::Result::Success); os::InitializeInterruptEvent(std::addressof(g_se_event), irq_num, os::EventClearMode_AutoClear); R_ABORT_UNLESS(os::CreateSystemEvent(std::addressof(g_se_keyslot_available_event), os::EventClearMode_AutoClear, true)); @@ -320,7 +320,7 @@ namespace ams::spl::impl { WaitSeOperationComplete(); smc::Result op_res; - smc::Result res = smc::CheckStatus(&op_res, op_key); + smc::Result res = smc::GetResult(&op_res, op_key); if (res != smc::Result::Success) { return res; } @@ -332,7 +332,7 @@ namespace ams::spl::impl { WaitSeOperationComplete(); smc::Result op_res; - smc::Result res = smc::GetResult(&op_res, out_buf, out_buf_size, op_key); + smc::Result res = smc::GetResultData(&op_res, out_buf, out_buf_size, op_key); if (res != smc::Result::Success) { return res; } @@ -340,17 +340,17 @@ namespace ams::spl::impl { return op_res; } - /* Internal Keyslot utility. */ - Result ValidateAesKeyslot(s32 keyslot, const void *owner) { + /* Internal KeySlot utility. */ + Result ValidateAesKeySlot(s32 keyslot, const void *owner) { /* Allow the use of physical keyslots on 1.0.0. */ if (hos::GetVersion() == hos::Version_1_0_0) { R_SUCCEED_IF(IsPhysicalKeySlot(keyslot)); } - R_UNLESS(IsVirtualKeySlot(keyslot), spl::ResultInvalidKeyslot()); + R_UNLESS(IsVirtualKeySlot(keyslot), spl::ResultInvalidKeySlot()); const s32 index = GetVirtualKeySlotIndex(keyslot); - R_UNLESS(g_keyslot_owners[index] == owner, spl::ResultInvalidKeyslot()); + R_UNLESS(g_keyslot_owners[index] == owner, spl::ResultInvalidKeySlot()); return ResultSuccess(); } @@ -377,11 +377,11 @@ namespace ams::spl::impl { std::scoped_lock lk(g_async_op_lock); smc::AsyncOperationKey op_key; const IvCtr iv_ctr = {}; - const u32 mode = smc::GetCryptAesMode(smc::CipherMode::CbcDecrypt, GetPhysicalKeySlot(keyslot, true)); + const u32 mode = smc::GetComputeAesMode(smc::CipherMode::CbcDecrypt, GetPhysicalKeySlot(keyslot, true)); const u32 dst_ll_addr = g_se_mapped_work_buffer_addr + offsetof(DecryptAesBlockLayout, crypt_ctx.out); const u32 src_ll_addr = g_se_mapped_work_buffer_addr + offsetof(DecryptAesBlockLayout, crypt_ctx.in); - smc::Result res = smc::CryptAes(&op_key, mode, iv_ctr, dst_ll_addr, src_ll_addr, sizeof(layout->in_block)); + smc::Result res = smc::ComputeAes(&op_key, mode, iv_ctr, dst_ll_addr, src_ll_addr, sizeof(layout->in_block)); if (res != smc::Result::Success) { return res; } @@ -397,33 +397,33 @@ namespace ams::spl::impl { } /* Implementation wrappers for API commands. */ - Result ImportSecureExpModKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option) { - struct ImportSecureExpModKeyLayout { - u8 data[RsaPrivateKeyMetaSize + 2 * RsaPrivateKeySize + 0x10]; + Result DecryptAndStoreDeviceUniqueKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option) { + struct DecryptAndStoreDeviceUniqueKeyLayout { + u8 data[DeviceUniqueDataMetaSize + 2 * RsaPrivateKeySize + 0x10]; }; - ImportSecureExpModKeyLayout *layout = reinterpret_cast(g_work_buffer); + DecryptAndStoreDeviceUniqueKeyLayout *layout = reinterpret_cast(g_work_buffer); /* Validate size. */ - R_UNLESS(src_size <= sizeof(ImportSecureExpModKeyLayout), spl::ResultInvalidSize()); + R_UNLESS(src_size <= sizeof(DecryptAndStoreDeviceUniqueKeyLayout), spl::ResultInvalidSize()); std::memcpy(layout, src, src_size); armDCacheFlush(layout, sizeof(*layout)); smc::Result smc_res; if (hos::GetVersion() >= hos::Version_5_0_0) { - smc_res = smc::DecryptOrImportRsaPrivateKey(layout->data, src_size, access_key, key_source, static_cast(option)); + smc_res = smc::DecryptDeviceUniqueData(layout->data, src_size, access_key, key_source, static_cast(option)); } else { - smc_res = smc::ImportSecureExpModKey(layout->data, src_size, access_key, key_source, option); + smc_res = smc::DecryptAndStoreGcKey(layout->data, src_size, access_key, key_source, option); } return smc::ConvertResult(smc_res); } - Result SecureExpMod(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size, smc::SecureExpModMode mode) { - struct SecureExpModLayout { + Result ModularExponentiateWithStorageKey(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size, smc::ModularExponentiateWithStorageKeyMode mode) { + struct ModularExponentiateWithStorageKeyLayout { u8 base[0x100]; u8 mod[0x100]; }; - SecureExpModLayout *layout = reinterpret_cast(g_work_buffer); + ModularExponentiateWithStorageKeyLayout *layout = reinterpret_cast(g_work_buffer); /* Validate sizes. */ R_UNLESS(base_size <= sizeof(layout->base), spl::ResultInvalidSize()); @@ -443,7 +443,7 @@ namespace ams::spl::impl { std::scoped_lock lk(g_async_op_lock); smc::AsyncOperationKey op_key; - smc::Result res = smc::SecureExpMod(&op_key, layout->base, layout->mod, mode); + smc::Result res = smc::ModularExponentiateWithStorageKey(&op_key, layout->base, layout->mod, mode); if (res != smc::Result::Success) { return smc::ConvertResult(res); } @@ -458,12 +458,12 @@ namespace ams::spl::impl { return ResultSuccess(); } - Result UnwrapEsRsaOaepWrappedKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation, smc::EsKeyType type) { - struct UnwrapEsKeyLayout { + Result PrepareEsDeviceUniqueKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation, smc::EsCommonKeyType type) { + struct PrepareEsDeviceUniqueKeyLayout { u8 base[0x100]; u8 mod[0x100]; }; - UnwrapEsKeyLayout *layout = reinterpret_cast(g_work_buffer); + PrepareEsDeviceUniqueKeyLayout *layout = reinterpret_cast(g_work_buffer); /* Validate sizes. */ R_UNLESS(base_size <= sizeof(layout->base), spl::ResultInvalidSize()); @@ -483,7 +483,7 @@ namespace ams::spl::impl { std::scoped_lock lk(g_async_op_lock); smc::AsyncOperationKey op_key; - smc::Result res = smc::UnwrapTitleKey(&op_key, layout->base, layout->mod, label_digest, label_digest_size, smc::GetUnwrapEsKeyOption(type, generation)); + smc::Result res = smc::PrepareEsDeviceUniqueKey(&op_key, layout->base, layout->mod, label_digest, label_digest_size, smc::GetPrepareEsDeviceUniqueKeyOption(type, generation)); if (res != smc::Result::Success) { return smc::ConvertResult(res); } @@ -514,39 +514,45 @@ namespace ams::spl::impl { } /* General. */ - Result GetConfig(u64 *out, SplConfigItem which) { + Result GetConfig(u64 *out, ConfigItem which) { /* Nintendo explicitly blacklists package2 hash here, amusingly. */ /* This is not blacklisted in safemode, but we're never in safe mode... */ - R_UNLESS(which != SplConfigItem_Package2Hash, spl::ResultInvalidArgument()); + R_UNLESS(which != ConfigItem::Package2Hash, spl::ResultInvalidArgument()); smc::Result res = smc::GetConfig(out, 1, which); /* Nintendo has some special handling here for hardware type/is_retail. */ - if (which == SplConfigItem_HardwareType && res == smc::Result::InvalidArgument) { - *out = 0; - res = smc::Result::Success; - } - if (which == SplConfigItem_IsRetail && res == smc::Result::InvalidArgument) { - *out = 0; - res = smc::Result::Success; + if (res == smc::Result::InvalidArgument) { + switch (which) { + case ConfigItem::HardwareType: + *out = static_cast(HardwareType::Icosa); + res = smc::Result::Success; + break; + case ConfigItem::HardwareState: + *out = HardwareState_Development; + res = smc::Result::Success; + break; + default: + break; + } } return smc::ConvertResult(res); } - Result ExpMod(void *out, size_t out_size, const void *base, size_t base_size, const void *exp, size_t exp_size, const void *mod, size_t mod_size) { - struct ExpModLayout { + Result ModularExponentiate(void *out, size_t out_size, const void *base, size_t base_size, const void *exp, size_t exp_size, const void *mod, size_t mod_size) { + struct ModularExponentiateLayout { u8 base[0x100]; u8 exp[0x100]; u8 mod[0x100]; }; - ExpModLayout *layout = reinterpret_cast(g_work_buffer); + ModularExponentiateLayout *layout = reinterpret_cast(g_work_buffer); /* Validate sizes. */ R_UNLESS(base_size <= sizeof(layout->base), spl::ResultInvalidSize()); - R_UNLESS(exp_size <= sizeof(layout->exp), spl::ResultInvalidSize()); - R_UNLESS(mod_size <= sizeof(layout->mod), spl::ResultInvalidSize()); - R_UNLESS(out_size <= WorkBufferSizeMax, spl::ResultInvalidSize()); + R_UNLESS(exp_size <= sizeof(layout->exp), spl::ResultInvalidSize()); + R_UNLESS(mod_size <= sizeof(layout->mod), spl::ResultInvalidSize()); + R_UNLESS(out_size <= WorkBufferSizeMax, spl::ResultInvalidSize()); /* Copy data into work buffer. */ const size_t base_ofs = sizeof(layout->base) - base_size; @@ -562,7 +568,7 @@ namespace ams::spl::impl { std::scoped_lock lk(g_async_op_lock); smc::AsyncOperationKey op_key; - smc::Result res = smc::ExpMod(&op_key, layout->base, layout->exp, exp_size, layout->mod); + smc::Result res = smc::ModularExponentiate(&op_key, layout->base, layout->exp, exp_size, layout->mod); if (res != smc::Result::Success) { return smc::ConvertResult(res); } @@ -577,7 +583,7 @@ namespace ams::spl::impl { return ResultSuccess(); } - Result SetConfig(SplConfigItem which, u64 value) { + Result SetConfig(ConfigItem which, u64 value) { return smc::ConvertResult(smc::SetConfig(which, &value, 1)); } @@ -595,10 +601,10 @@ namespace ams::spl::impl { } Result IsDevelopment(bool *out) { - u64 is_retail; - R_TRY(GetConfig(&is_retail, SplConfigItem_IsRetail)); + u64 hardware_state; + R_TRY(impl::GetConfig(&hardware_state, ConfigItem::HardwareState)); - *out = (is_retail == 0); + *out = (hardware_state == HardwareState_Development); return ResultSuccess(); } @@ -623,7 +629,7 @@ namespace ams::spl::impl { } Result LoadAesKey(s32 keyslot, const void *owner, const AccessKey &access_key, const KeySource &key_source) { - R_TRY(ValidateAesKeyslot(keyslot, owner)); + R_TRY(ValidateAesKeySlot(keyslot, owner)); return LoadVirtualAesKey(keyslot, access_key, key_source); } @@ -632,7 +638,7 @@ namespace ams::spl::impl { .data = {0x89, 0x61, 0x5E, 0xE0, 0x5C, 0x31, 0xB6, 0x80, 0x5F, 0xE5, 0x8F, 0x3D, 0xA2, 0x4F, 0x7A, 0xA8} }; - ScopedAesKeyslot keyslot_holder; + ScopedAesKeySlot keyslot_holder; R_TRY(keyslot_holder.Allocate()); R_TRY(LoadVirtualAesKey(keyslot_holder.GetKeySlot(), access_key, s_generate_aes_key_source)); @@ -651,8 +657,8 @@ namespace ams::spl::impl { return GenerateAesKey(out_key, access_key, key_source); } - Result CryptAesCtr(void *dst, size_t dst_size, s32 keyslot, const void *owner, const void *src, size_t src_size, const IvCtr &iv_ctr) { - R_TRY(ValidateAesKeyslot(keyslot, owner)); + Result ComputeCtr(void *dst, size_t dst_size, s32 keyslot, const void *owner, const void *src, size_t src_size, const IvCtr &iv_ctr) { + R_TRY(ValidateAesKeySlot(keyslot, owner)); /* Succeed immediately if there's nothing to crypt. */ if (src_size == 0) { @@ -670,14 +676,14 @@ namespace ams::spl::impl { const uintptr_t dst_addr_page_aligned = util::AlignDown(dst_addr, os::MemoryPageSize); const size_t src_size_page_aligned = util::AlignUp(src_addr + src_size, os::MemoryPageSize) - src_addr_page_aligned; const size_t dst_size_page_aligned = util::AlignUp(dst_addr + dst_size, os::MemoryPageSize) - dst_addr_page_aligned; - const u32 src_se_map_addr = CryptAesInMapBase + (src_addr_page_aligned % DeviceAddressSpaceAlign); - const u32 dst_se_map_addr = CryptAesOutMapBase + (dst_addr_page_aligned % DeviceAddressSpaceAlign); - const u32 src_se_addr = CryptAesInMapBase + (src_addr % DeviceAddressSpaceAlign); - const u32 dst_se_addr = CryptAesOutMapBase + (dst_addr % DeviceAddressSpaceAlign); + const u32 src_se_map_addr = ComputeAesInMapBase + (src_addr_page_aligned % DeviceAddressSpaceAlign); + const u32 dst_se_map_addr = ComputeAesOutMapBase + (dst_addr_page_aligned % DeviceAddressSpaceAlign); + const u32 src_se_addr = ComputeAesInMapBase + (src_addr % DeviceAddressSpaceAlign); + const u32 dst_se_addr = ComputeAesOutMapBase + (dst_addr % DeviceAddressSpaceAlign); /* Validate aligned sizes. */ - R_UNLESS(src_size_page_aligned <= CryptAesSizeMax, spl::ResultInvalidSize()); - R_UNLESS(dst_size_page_aligned <= CryptAesSizeMax, spl::ResultInvalidSize()); + R_UNLESS(src_size_page_aligned <= ComputeAesSizeMax, spl::ResultInvalidSize()); + R_UNLESS(dst_size_page_aligned <= ComputeAesSizeMax, spl::ResultInvalidSize()); /* Helpers for mapping/unmapping. */ DeviceAddressSpaceMapHelper in_mapper(g_se_das_hnd, src_se_map_addr, src_addr_page_aligned, src_size_page_aligned, 1); @@ -698,11 +704,11 @@ namespace ams::spl::impl { { std::scoped_lock lk(g_async_op_lock); smc::AsyncOperationKey op_key; - const u32 mode = smc::GetCryptAesMode(smc::CipherMode::Ctr, GetPhysicalKeySlot(keyslot, true)); + const u32 mode = smc::GetComputeAesMode(smc::CipherMode::Ctr, GetPhysicalKeySlot(keyslot, true)); const u32 dst_ll_addr = g_se_mapped_work_buffer_addr + offsetof(SeCryptContext, out); const u32 src_ll_addr = g_se_mapped_work_buffer_addr + offsetof(SeCryptContext, in); - smc::Result res = smc::CryptAes(&op_key, mode, iv_ctr, dst_ll_addr, src_ll_addr, src_size); + smc::Result res = smc::ComputeAes(&op_key, mode, iv_ctr, dst_ll_addr, src_ll_addr, src_size); if (res != smc::Result::Success) { return smc::ConvertResult(res); } @@ -717,7 +723,7 @@ namespace ams::spl::impl { } Result ComputeCmac(Cmac *out_cmac, s32 keyslot, const void *owner, const void *data, size_t size) { - R_TRY(ValidateAesKeyslot(keyslot, owner)); + R_TRY(ValidateAesKeySlot(keyslot, owner)); R_UNLESS(size <= WorkBufferSizeMax, spl::ResultInvalidSize()); @@ -725,9 +731,9 @@ namespace ams::spl::impl { return smc::ConvertResult(smc::ComputeCmac(out_cmac, GetPhysicalKeySlot(keyslot, true), g_work_buffer, size)); } - Result AllocateAesKeyslot(s32 *out_keyslot, const void *owner) { + Result AllocateAesKeySlot(s32 *out_keyslot, const void *owner) { /* Find a virtual keyslot. */ - for (s32 i = 0; i < MaxVirtualAesKeyslots; i++) { + for (s32 i = 0; i < MaxVirtualAesKeySlots; i++) { if (g_keyslot_owners[i] == nullptr) { g_keyslot_owners[i] = owner; g_keyslot_contents[i] = { .type = KeySlotContentType::None }; @@ -737,20 +743,20 @@ namespace ams::spl::impl { } os::ClearSystemEvent(std::addressof(g_se_keyslot_available_event)); - return spl::ResultOutOfKeyslots(); + return spl::ResultOutOfKeySlots(); } - Result FreeAesKeyslot(s32 keyslot, const void *owner) { + Result DeallocateAesKeySlot(s32 keyslot, const void *owner) { /* Only virtual keyslots can be freed. */ - R_UNLESS(IsVirtualKeySlot(keyslot), spl::ResultInvalidKeyslot()); + R_UNLESS(IsVirtualKeySlot(keyslot), spl::ResultInvalidKeySlot()); /* Ensure the keyslot is owned. */ - R_TRY(ValidateAesKeyslot(keyslot, owner)); + R_TRY(ValidateAesKeySlot(keyslot, owner)); /* Clear the physical keyslot, if we're cached. */ s32 phys_slot; if (g_keyslot_cache.Release(std::addressof(phys_slot), keyslot)) { - ClearPhysicalKeyslot(phys_slot); + ClearPhysicalKeySlot(phys_slot); } /* Clear the virtual keyslot. */ @@ -763,15 +769,15 @@ namespace ams::spl::impl { } /* RSA. */ - Result DecryptRsaPrivateKey(void *dst, size_t dst_size, const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option) { - struct DecryptRsaPrivateKeyLayout { - u8 data[RsaPrivateKeySize + RsaPrivateKeyMetaSize]; + Result DecryptDeviceUniqueData(void *dst, size_t dst_size, const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option) { + struct DecryptDeviceUniqueDataLayout { + u8 data[RsaPrivateKeySize + DeviceUniqueDataMetaSize]; }; - DecryptRsaPrivateKeyLayout *layout = reinterpret_cast(g_work_buffer); + DecryptDeviceUniqueDataLayout *layout = reinterpret_cast(g_work_buffer); /* Validate size. */ - R_UNLESS(src_size >= RsaPrivateKeyMetaSize, spl::ResultInvalidSize()); - R_UNLESS(src_size <= sizeof(DecryptRsaPrivateKeyLayout), spl::ResultInvalidSize()); + R_UNLESS(src_size >= DeviceUniqueDataMetaSize, spl::ResultInvalidSize()); + R_UNLESS(src_size <= sizeof(DecryptDeviceUniqueDataLayout), spl::ResultInvalidSize()); std::memcpy(layout->data, src, src_size); armDCacheFlush(layout, sizeof(*layout)); @@ -779,10 +785,10 @@ namespace ams::spl::impl { smc::Result smc_res; size_t copy_size = 0; if (hos::GetVersion() >= hos::Version_5_0_0) { - copy_size = std::min(dst_size, src_size - RsaPrivateKeyMetaSize); - smc_res = smc::DecryptOrImportRsaPrivateKey(layout->data, src_size, access_key, key_source, static_cast(option)); + copy_size = std::min(dst_size, src_size - DeviceUniqueDataMetaSize); + smc_res = smc::DecryptDeviceUniqueData(layout->data, src_size, access_key, key_source, static_cast(option)); } else { - smc_res = smc::DecryptRsaPrivateKey(©_size, layout->data, src_size, access_key, key_source, option); + smc_res = smc::DecryptDeviceUniqueData(©_size, layout->data, src_size, access_key, key_source, option); copy_size = std::min(dst_size, copy_size); } @@ -795,71 +801,66 @@ namespace ams::spl::impl { } /* SSL */ - Result ImportSslKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source) { - return ImportSecureExpModKey(src, src_size, access_key, key_source, static_cast(smc::DecryptOrImportMode::ImportSslKey)); + Result DecryptAndStoreSslClientCertKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source) { + return DecryptAndStoreDeviceUniqueKey(src, src_size, access_key, key_source, static_cast(smc::DeviceUniqueDataMode::DecryptAndStoreSslKey)); } - Result SslExpMod(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size) { - return SecureExpMod(out, out_size, base, base_size, mod, mod_size, smc::SecureExpModMode::Ssl); + Result ModularExponentiateWithSslClientCertKey(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size) { + return ModularExponentiateWithStorageKey(out, out_size, base, base_size, mod, mod_size, smc::ModularExponentiateWithStorageKeyMode::Ssl); } /* ES */ - Result ImportEsKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option) { + Result LoadEsDeviceKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option) { if (hos::GetVersion() >= hos::Version_5_0_0) { - return ImportSecureExpModKey(src, src_size, access_key, key_source, option); + return DecryptAndStoreDeviceUniqueKey(src, src_size, access_key, key_source, option); } else { - struct ImportEsKeyLayout { - u8 data[RsaPrivateKeyMetaSize + 2 * RsaPrivateKeySize + 0x10]; + struct LoadEsDeviceKeyLayout { + u8 data[DeviceUniqueDataMetaSize + 2 * RsaPrivateKeySize + 0x10]; }; - ImportEsKeyLayout *layout = reinterpret_cast(g_work_buffer); + LoadEsDeviceKeyLayout *layout = reinterpret_cast(g_work_buffer); /* Validate size. */ - R_UNLESS(src_size <= sizeof(ImportEsKeyLayout), spl::ResultInvalidSize()); + R_UNLESS(src_size <= sizeof(LoadEsDeviceKeyLayout), spl::ResultInvalidSize()); std::memcpy(layout, src, src_size); armDCacheFlush(layout, sizeof(*layout)); - return smc::ConvertResult(smc::ImportEsKey(layout->data, src_size, access_key, key_source, option)); + return smc::ConvertResult(smc::LoadEsDeviceKey(layout->data, src_size, access_key, key_source, option)); } } - Result UnwrapTitleKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation) { - return UnwrapEsRsaOaepWrappedKey(out_access_key, base, base_size, mod, mod_size, label_digest, label_digest_size, generation, smc::EsKeyType::TitleKey); + Result PrepareEsTitleKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation) { + return PrepareEsDeviceUniqueKey(out_access_key, base, base_size, mod, mod_size, label_digest, label_digest_size, generation, smc::EsCommonKeyType::TitleKey); } - Result UnwrapCommonTitleKey(AccessKey *out_access_key, const KeySource &key_source, u32 generation) { - return smc::ConvertResult(smc::UnwrapCommonTitleKey(out_access_key, key_source, generation)); + Result PrepareCommonEsTitleKey(AccessKey *out_access_key, const KeySource &key_source, u32 generation) { + return smc::ConvertResult(smc::PrepareCommonEsTitleKey(out_access_key, key_source, generation)); } - Result ImportDrmKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source) { - return ImportSecureExpModKey(src, src_size, access_key, key_source, static_cast(smc::DecryptOrImportMode::ImportDrmKey)); + Result DecryptAndStoreDrmDeviceCertKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source) { + return DecryptAndStoreDeviceUniqueKey(src, src_size, access_key, key_source, static_cast(smc::DeviceUniqueDataMode::DecryptAndStoreDrmDeviceCertKey)); } - Result DrmExpMod(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size) { - return SecureExpMod(out, out_size, base, base_size, mod, mod_size, smc::SecureExpModMode::Drm); + Result ModularExponentiateWithDrmDeviceCertKey(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size) { + return ModularExponentiateWithStorageKey(out, out_size, base, base_size, mod, mod_size, smc::ModularExponentiateWithStorageKeyMode::DrmDeviceCert); } - Result UnwrapElicenseKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation) { - return UnwrapEsRsaOaepWrappedKey(out_access_key, base, base_size, mod, mod_size, label_digest, label_digest_size, generation, smc::EsKeyType::ElicenseKey); - } - - Result LoadElicenseKey(s32 keyslot, const void *owner, const AccessKey &access_key) { - /* Right now, this is just literally the same function as LoadTitleKey in N's impl. */ - return LoadTitleKey(keyslot, owner, access_key); + Result PrepareEsArchiveKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation) { + return PrepareEsDeviceUniqueKey(out_access_key, base, base_size, mod, mod_size, label_digest, label_digest_size, generation, smc::EsCommonKeyType::ArchiveKey); } /* FS */ - Result ImportLotusKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option) { - return ImportSecureExpModKey(src, src_size, access_key, key_source, option); + Result DecryptAndStoreGcKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option) { + return DecryptAndStoreDeviceUniqueKey(src, src_size, access_key, key_source, option); } - Result DecryptLotusMessage(u32 *out_size, void *dst, size_t dst_size, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size) { + Result DecryptGcMessage(u32 *out_size, void *dst, size_t dst_size, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size) { /* Validate sizes. */ R_UNLESS(dst_size <= WorkBufferSizeMax, spl::ResultInvalidSize()); R_UNLESS(label_digest_size == LabelDigestSizeMax, spl::ResultInvalidSize()); /* Nintendo doesn't check this result code, but we will. */ - R_TRY(SecureExpMod(g_work_buffer, 0x100, base, base_size, mod, mod_size, smc::SecureExpModMode::Lotus)); + R_TRY(ModularExponentiateWithStorageKey(g_work_buffer, 0x100, base, base_size, mod, mod_size, smc::ModularExponentiateWithStorageKeyMode::Gc)); size_t data_size = crypto::DecodeRsa2048OaepSha256(dst, dst_size, label_digest, label_digest_size, g_work_buffer, 0x100); R_UNLESS(data_size > 0, spl::ResultDecryptionFailed()); @@ -872,9 +873,9 @@ namespace ams::spl::impl { return smc::ConvertResult(smc::GenerateSpecificAesKey(out_key, key_source, generation, which)); } - Result LoadTitleKey(s32 keyslot, const void *owner, const AccessKey &access_key) { - R_TRY(ValidateAesKeyslot(keyslot, owner)); - return LoadVirtualTitleKey(keyslot, access_key); + Result LoadPreparedAesKey(s32 keyslot, const void *owner, const AccessKey &access_key) { + R_TRY(ValidateAesKeySlot(keyslot, owner)); + return LoadVirtualPreparedAesKey(keyslot, access_key); } Result GetPackage2Hash(void *dst, const size_t size) { @@ -882,7 +883,7 @@ namespace ams::spl::impl { R_UNLESS(size >= sizeof(hash), spl::ResultInvalidSize()); smc::Result smc_res; - if ((smc_res = smc::GetConfig(hash, 4, SplConfigItem_Package2Hash)) != smc::Result::Success) { + if ((smc_res = smc::GetConfig(hash, 4, ConfigItem::Package2Hash)) != smc::Result::Success) { return smc::ConvertResult(smc_res); } @@ -891,19 +892,19 @@ namespace ams::spl::impl { } /* Manu. */ - Result ReEncryptRsaPrivateKey(void *dst, size_t dst_size, const void *src, size_t src_size, const AccessKey &access_key_dec, const KeySource &source_dec, const AccessKey &access_key_enc, const KeySource &source_enc, u32 option) { - struct ReEncryptRsaPrivateKeyLayout { - u8 data[RsaPrivateKeyMetaSize + 2 * RsaPrivateKeySize + 0x10]; + Result ReencryptDeviceUniqueData(void *dst, size_t dst_size, const void *src, size_t src_size, const AccessKey &access_key_dec, const KeySource &source_dec, const AccessKey &access_key_enc, const KeySource &source_enc, u32 option) { + struct ReencryptDeviceUniqueDataLayout { + u8 data[DeviceUniqueDataMetaSize + 2 * RsaPrivateKeySize + 0x10]; AccessKey access_key_dec; KeySource source_dec; AccessKey access_key_enc; KeySource source_enc; }; - ReEncryptRsaPrivateKeyLayout *layout = reinterpret_cast(g_work_buffer); + ReencryptDeviceUniqueDataLayout *layout = reinterpret_cast(g_work_buffer); /* Validate size. */ - R_UNLESS(src_size >= RsaPrivateKeyMetaSize, spl::ResultInvalidSize()); - R_UNLESS(src_size <= sizeof(ReEncryptRsaPrivateKeyLayout), spl::ResultInvalidSize()); + R_UNLESS(src_size >= DeviceUniqueDataMetaSize, spl::ResultInvalidSize()); + R_UNLESS(src_size <= sizeof(ReencryptDeviceUniqueDataLayout), spl::ResultInvalidSize()); std::memcpy(layout, src, src_size); layout->access_key_dec = access_key_dec; @@ -913,7 +914,7 @@ namespace ams::spl::impl { armDCacheFlush(layout, sizeof(*layout)); - smc::Result smc_res = smc::ReEncryptRsaPrivateKey(layout->data, src_size, layout->access_key_dec, layout->source_dec, layout->access_key_enc, layout->source_enc, option); + smc::Result smc_res = smc::ReencryptDeviceUniqueData(layout->data, src_size, layout->access_key_dec, layout->source_dec, layout->access_key_enc, layout->source_enc, option); if (smc_res == smc::Result::Success) { size_t copy_size = std::min(dst_size, src_size); armDCacheFlush(layout, copy_size); @@ -924,16 +925,16 @@ namespace ams::spl::impl { } /* Helper. */ - Result FreeAesKeyslots(const void *owner) { + Result DeallocateAllAesKeySlots(const void *owner) { for (s32 slot = VirtualKeySlotMin; slot <= VirtualKeySlotMax; ++slot) { if (g_keyslot_owners[GetVirtualKeySlotIndex(slot)] == owner) { - FreeAesKeyslot(slot, owner); + DeallocateAesKeySlot(slot, owner); } } return ResultSuccess(); } - Handle GetAesKeyslotAvailableEventHandle() { + Handle GetAesKeySlotAvailableEventHandle() { return os::GetReadableHandleOfSystemEvent(std::addressof(g_se_keyslot_available_event)); } diff --git a/stratosphere/spl/source/spl_api_impl.hpp b/stratosphere/spl/source/spl_api_impl.hpp index 483f29e53..f41129035 100644 --- a/stratosphere/spl/source/spl_api_impl.hpp +++ b/stratosphere/spl/source/spl_api_impl.hpp @@ -22,9 +22,9 @@ namespace ams::spl::impl { void Initialize(); /* General. */ - Result GetConfig(u64 *out, SplConfigItem which); - Result ExpMod(void *out, size_t out_size, const void *base, size_t base_size, const void *exp, size_t exp_size, const void *mod, size_t mod_size); - Result SetConfig(SplConfigItem which, u64 value); + Result GetConfig(u64 *out, spl::ConfigItem which); + Result ModularExponentiate(void *out, size_t out_size, const void *base, size_t base_size, const void *exp, size_t exp_size, const void *mod, size_t mod_size); + Result SetConfig(spl::ConfigItem which, u64 value); Result GenerateRandomBytes(void *out, size_t size); Result IsDevelopment(bool *out); Result SetBootReason(BootReasonValue boot_reason); @@ -35,39 +35,39 @@ namespace ams::spl::impl { Result LoadAesKey(s32 keyslot, const void *owner, const AccessKey &access_key, const KeySource &key_source); Result GenerateAesKey(AesKey *out_key, const AccessKey &access_key, const KeySource &key_source); Result DecryptAesKey(AesKey *out_key, const KeySource &key_source, u32 generation, u32 option); - Result CryptAesCtr(void *dst, size_t dst_size, s32 keyslot, const void *owner, const void *src, size_t src_size, const IvCtr &iv_ctr); + Result ComputeCtr(void *dst, size_t dst_size, s32 keyslot, const void *owner, const void *src, size_t src_size, const IvCtr &iv_ctr); Result ComputeCmac(Cmac *out_cmac, s32 keyslot, const void *owner, const void *data, size_t size); - Result AllocateAesKeyslot(s32 *out_keyslot, const void *owner); - Result FreeAesKeyslot(s32 keyslot, const void *owner); + Result AllocateAesKeySlot(s32 *out_keyslot, const void *owner); + Result DeallocateAesKeySlot(s32 keyslot, const void *owner); /* RSA. */ - Result DecryptRsaPrivateKey(void *dst, size_t dst_size, const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option); + Result DecryptDeviceUniqueData(void *dst, size_t dst_size, const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option); /* SSL */ - Result ImportSslKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source); - Result SslExpMod(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size); + Result DecryptAndStoreSslClientCertKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source); + Result ModularExponentiateWithSslClientCertKey(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size); /* ES */ - Result ImportEsKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option); - Result UnwrapTitleKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation); - Result UnwrapCommonTitleKey(AccessKey *out_access_key, const KeySource &key_source, u32 generation); - Result ImportDrmKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source); - Result DrmExpMod(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size); - Result UnwrapElicenseKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation); - Result LoadElicenseKey(s32 keyslot, const void *owner, const AccessKey &access_key); + Result LoadEsDeviceKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option); + Result PrepareEsTitleKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation); + Result PrepareCommonEsTitleKey(AccessKey *out_access_key, const KeySource &key_source, u32 generation); + Result DecryptAndStoreDrmDeviceCertKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source); + Result ModularExponentiateWithDrmDeviceCertKey(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size); + Result PrepareEsArchiveKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation); + Result LoadPreparedAesKey(s32 keyslot, const void *owner, const AccessKey &access_key); /* FS */ - Result ImportLotusKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option); - Result DecryptLotusMessage(u32 *out_size, void *dst, size_t dst_size, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size); + Result DecryptAndStoreGcKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option); + Result DecryptGcMessage(u32 *out_size, void *dst, size_t dst_size, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size); Result GenerateSpecificAesKey(AesKey *out_key, const KeySource &key_source, u32 generation, u32 which); - Result LoadTitleKey(s32 keyslot, const void *owner, const AccessKey &access_key); + Result LoadPreparedAesKey(s32 keyslot, const void *owner, const AccessKey &access_key); Result GetPackage2Hash(void *dst, const size_t size); /* Manu. */ - Result ReEncryptRsaPrivateKey(void *dst, size_t dst_size, const void *src, size_t src_size, const AccessKey &access_key_dec, const KeySource &source_dec, const AccessKey &access_key_enc, const KeySource &source_enc, u32 option); + Result ReencryptDeviceUniqueData(void *dst, size_t dst_size, const void *src, size_t src_size, const AccessKey &access_key_dec, const KeySource &source_dec, const AccessKey &access_key_enc, const KeySource &source_enc, u32 option); /* Helper. */ - Result FreeAesKeyslots(const void *owner); - Handle GetAesKeyslotAvailableEventHandle(); + Result DeallocateAllAesKeySlots(const void *owner); + Handle GetAesKeySlotAvailableEventHandle(); } diff --git a/stratosphere/spl/source/spl_crypto_service.cpp b/stratosphere/spl/source/spl_crypto_service.cpp index 63b150a43..b559e6e87 100644 --- a/stratosphere/spl/source/spl_crypto_service.cpp +++ b/stratosphere/spl/source/spl_crypto_service.cpp @@ -21,7 +21,7 @@ namespace ams::spl { CryptoService::~CryptoService() { /* Free any keyslots this service is using. */ - impl::FreeAesKeyslots(this); + impl::DeallocateAllAesKeySlots(this); } Result CryptoService::GenerateAesKek(sf::Out out_access_key, KeySource key_source, u32 generation, u32 option) { @@ -40,24 +40,25 @@ namespace ams::spl { return impl::DecryptAesKey(out_key.GetPointer(), key_source, generation, option); } - Result CryptoService::CryptAesCtr(const sf::OutNonSecureBuffer &out_buf, s32 keyslot, const sf::InNonSecureBuffer &in_buf, IvCtr iv_ctr) { - return impl::CryptAesCtr(out_buf.GetPointer(), out_buf.GetSize(), keyslot, this, in_buf.GetPointer(), in_buf.GetSize(), iv_ctr); + Result CryptoService::ComputeCtr(const sf::OutNonSecureBuffer &out_buf, s32 keyslot, const sf::InNonSecureBuffer &in_buf, IvCtr iv_ctr) { + return impl::ComputeCtr(out_buf.GetPointer(), out_buf.GetSize(), keyslot, this, in_buf.GetPointer(), in_buf.GetSize(), iv_ctr); } Result CryptoService::ComputeCmac(sf::Out out_cmac, s32 keyslot, const sf::InPointerBuffer &in_buf) { return impl::ComputeCmac(out_cmac.GetPointer(), keyslot, this, in_buf.GetPointer(), in_buf.GetSize()); } - Result CryptoService::AllocateAesKeyslot(sf::Out out_keyslot) { - return impl::AllocateAesKeyslot(out_keyslot.GetPointer(), this); + Result CryptoService::AllocateAesKeySlot(sf::Out out_keyslot) { + return impl::AllocateAesKeySlot(out_keyslot.GetPointer(), this); } - Result CryptoService::FreeAesKeyslot(s32 keyslot) { - return impl::FreeAesKeyslot(keyslot, this); + Result CryptoService::DeallocateAesKeySlot(s32 keyslot) { + return impl::DeallocateAesKeySlot(keyslot, this); } - void CryptoService::GetAesKeyslotAvailableEvent(sf::OutCopyHandle out_hnd) { - out_hnd.SetValue(impl::GetAesKeyslotAvailableEventHandle()); + Result CryptoService::GetAesKeySlotAvailableEvent(sf::OutCopyHandle out_hnd) { + out_hnd.SetValue(impl::GetAesKeySlotAvailableEventHandle()); + return ResultSuccess(); } } diff --git a/stratosphere/spl/source/spl_crypto_service.hpp b/stratosphere/spl/source/spl_crypto_service.hpp index 8eec93dd8..a6facc9cc 100644 --- a/stratosphere/spl/source/spl_crypto_service.hpp +++ b/stratosphere/spl/source/spl_crypto_service.hpp @@ -20,38 +20,19 @@ namespace ams::spl { class CryptoService : public GeneralService { public: - CryptoService() : GeneralService() { /* ... */ } virtual ~CryptoService(); - protected: - /* Actual commands. */ - virtual Result GenerateAesKek(sf::Out out_access_key, KeySource key_source, u32 generation, u32 option); - virtual Result LoadAesKey(s32 keyslot, AccessKey access_key, KeySource key_source); - virtual Result GenerateAesKey(sf::Out out_key, AccessKey access_key, KeySource key_source); - virtual Result DecryptAesKey(sf::Out out_key, KeySource key_source, u32 generation, u32 option); - virtual Result CryptAesCtr(const sf::OutNonSecureBuffer &out_buf, s32 keyslot, const sf::InNonSecureBuffer &in_buf, IvCtr iv_ctr); - virtual Result ComputeCmac(sf::Out out_cmac, s32 keyslot, const sf::InPointerBuffer &in_buf); - virtual Result AllocateAesKeyslot(sf::Out out_keyslot); - virtual Result FreeAesKeyslot(s32 keyslot); - virtual void GetAesKeyslotAvailableEvent(sf::OutCopyHandle out_hnd); public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetConfig), - MAKE_SERVICE_COMMAND_META(ExpMod), - MAKE_SERVICE_COMMAND_META(SetConfig), - MAKE_SERVICE_COMMAND_META(GenerateRandomBytes), - MAKE_SERVICE_COMMAND_META(IsDevelopment), - MAKE_SERVICE_COMMAND_META(SetBootReason, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GetBootReason, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GenerateAesKek), - MAKE_SERVICE_COMMAND_META(LoadAesKey), - MAKE_SERVICE_COMMAND_META(GenerateAesKey), - MAKE_SERVICE_COMMAND_META(DecryptAesKey), - MAKE_SERVICE_COMMAND_META(CryptAesCtr), - MAKE_SERVICE_COMMAND_META(ComputeCmac), - MAKE_SERVICE_COMMAND_META(AllocateAesKeyslot /* Atmosphere extension: This was added in hos::Version_2_0_0, but is allowed on older firmware by atmosphere. */), - MAKE_SERVICE_COMMAND_META(FreeAesKeyslot /* Atmosphere extension: This was added in hos::Version_2_0_0, but is allowed on older firmware by atmosphere. */), - MAKE_SERVICE_COMMAND_META(GetAesKeyslotAvailableEvent /* Atmosphere extension: This was added in hos::Version_2_0_0, but is allowed on older firmware by atmosphere. */), - }; + /* Actual commands. */ + Result GenerateAesKek(sf::Out out_access_key, KeySource key_source, u32 generation, u32 option); + Result LoadAesKey(s32 keyslot, AccessKey access_key, KeySource key_source); + Result GenerateAesKey(sf::Out out_key, AccessKey access_key, KeySource key_source); + Result DecryptAesKey(sf::Out out_key, KeySource key_source, u32 generation, u32 option); + Result ComputeCtr(const sf::OutNonSecureBuffer &out_buf, s32 keyslot, const sf::InNonSecureBuffer &in_buf, IvCtr iv_ctr); + Result ComputeCmac(sf::Out out_cmac, s32 keyslot, const sf::InPointerBuffer &in_buf); + Result AllocateAesKeySlot(sf::Out out_keyslot); + Result DeallocateAesKeySlot(s32 keyslot); + Result GetAesKeySlotAvailableEvent(sf::OutCopyHandle out_hnd); }; + static_assert(spl::impl::IsICryptoInterface); } diff --git a/stratosphere/spl/source/spl_deprecated_service.cpp b/stratosphere/spl/source/spl_deprecated_service.cpp index f6c4ec8c8..358a557a8 100644 --- a/stratosphere/spl/source/spl_deprecated_service.cpp +++ b/stratosphere/spl/source/spl_deprecated_service.cpp @@ -19,12 +19,17 @@ namespace ams::spl { - Result DeprecatedService::GetConfig(sf::Out out, u32 which) { - return impl::GetConfig(out.GetPointer(), static_cast(which)); + DeprecatedService::~DeprecatedService() { + /* Free any keyslots this service is using. */ + impl::DeallocateAllAesKeySlots(this); } - Result DeprecatedService::ExpMod(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &exp, const sf::InPointerBuffer &mod) { - return impl::ExpMod(out.GetPointer(), out.GetSize(), base.GetPointer(), base.GetSize(), exp.GetPointer(), exp.GetSize(), mod.GetPointer(), mod.GetSize()); + Result DeprecatedService::GetConfig(sf::Out out, u32 which) { + return impl::GetConfig(out.GetPointer(), static_cast(which)); + } + + Result DeprecatedService::ModularExponentiate(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &exp, const sf::InPointerBuffer &mod) { + return impl::ModularExponentiate(out.GetPointer(), out.GetSize(), base.GetPointer(), base.GetSize(), exp.GetPointer(), exp.GetSize(), mod.GetPointer(), mod.GetSize()); } Result DeprecatedService::GenerateAesKek(sf::Out out_access_key, KeySource key_source, u32 generation, u32 option) { @@ -40,19 +45,19 @@ namespace ams::spl { } Result DeprecatedService::SetConfig(u32 which, u64 value) { - return impl::SetConfig(static_cast(which), value); + return impl::SetConfig(static_cast(which), value); } Result DeprecatedService::GenerateRandomBytes(const sf::OutPointerBuffer &out) { return impl::GenerateRandomBytes(out.GetPointer(), out.GetSize()); } - Result DeprecatedService::ImportLotusKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option) { - return impl::ImportLotusKey(src.GetPointer(), src.GetSize(), access_key, key_source, option); + Result DeprecatedService::DecryptAndStoreGcKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option) { + return impl::DecryptAndStoreGcKey(src.GetPointer(), src.GetSize(), access_key, key_source, option); } - Result DeprecatedService::DecryptLotusMessage(sf::Out out_size, const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest) { - return impl::DecryptLotusMessage(out_size.GetPointer(), out.GetPointer(), out.GetSize(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize()); + Result DeprecatedService::DecryptGcMessage(sf::Out out_size, const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest) { + return impl::DecryptGcMessage(out_size.GetPointer(), out.GetPointer(), out.GetSize(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize()); } Result DeprecatedService::IsDevelopment(sf::Out is_dev) { @@ -63,60 +68,61 @@ namespace ams::spl { return impl::GenerateSpecificAesKey(out_key.GetPointer(), key_source, generation, which); } - Result DeprecatedService::DecryptRsaPrivateKey(const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option) { - return impl::DecryptRsaPrivateKey(dst.GetPointer(), dst.GetSize(), src.GetPointer(), src.GetSize(), access_key, key_source, option); + Result DeprecatedService::DecryptDeviceUniqueData(const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option) { + return impl::DecryptDeviceUniqueData(dst.GetPointer(), dst.GetSize(), src.GetPointer(), src.GetSize(), access_key, key_source, option); } Result DeprecatedService::DecryptAesKey(sf::Out out_key, KeySource key_source, u32 generation, u32 option) { return impl::DecryptAesKey(out_key.GetPointer(), key_source, generation, option); } - Result DeprecatedService::CryptAesCtrDeprecated(const sf::OutBuffer &out_buf, s32 keyslot, const sf::InBuffer &in_buf, IvCtr iv_ctr) { - return impl::CryptAesCtr(out_buf.GetPointer(), out_buf.GetSize(), keyslot, this, in_buf.GetPointer(), in_buf.GetSize(), iv_ctr); + Result DeprecatedService::ComputeCtrDeprecated(const sf::OutBuffer &out_buf, s32 keyslot, const sf::InBuffer &in_buf, IvCtr iv_ctr) { + return impl::ComputeCtr(out_buf.GetPointer(), out_buf.GetSize(), keyslot, this, in_buf.GetPointer(), in_buf.GetSize(), iv_ctr); } - Result DeprecatedService::CryptAesCtr(const sf::OutNonSecureBuffer &out_buf, s32 keyslot, const sf::InNonSecureBuffer &in_buf, IvCtr iv_ctr) { - return impl::CryptAesCtr(out_buf.GetPointer(), out_buf.GetSize(), keyslot, this, in_buf.GetPointer(), in_buf.GetSize(), iv_ctr); + Result DeprecatedService::ComputeCtr(const sf::OutNonSecureBuffer &out_buf, s32 keyslot, const sf::InNonSecureBuffer &in_buf, IvCtr iv_ctr) { + return impl::ComputeCtr(out_buf.GetPointer(), out_buf.GetSize(), keyslot, this, in_buf.GetPointer(), in_buf.GetSize(), iv_ctr); } Result DeprecatedService::ComputeCmac(sf::Out out_cmac, s32 keyslot, const sf::InPointerBuffer &in_buf) { return impl::ComputeCmac(out_cmac.GetPointer(), keyslot, this, in_buf.GetPointer(), in_buf.GetSize()); } - Result DeprecatedService::ImportEsKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option) { - return impl::ImportEsKey(src.GetPointer(), src.GetSize(), access_key, key_source, option); + Result DeprecatedService::LoadEsDeviceKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option) { + return impl::LoadEsDeviceKey(src.GetPointer(), src.GetSize(), access_key, key_source, option); } - Result DeprecatedService::UnwrapTitleKeyDeprecated(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest) { - return impl::UnwrapTitleKey(out_access_key.GetPointer(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize(), 0); + Result DeprecatedService::PrepareEsTitleKeyDeprecated(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest) { + return impl::PrepareEsTitleKey(out_access_key.GetPointer(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize(), 0); } - Result DeprecatedService::UnwrapTitleKey(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation) { - return impl::UnwrapTitleKey(out_access_key.GetPointer(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize(), generation); + Result DeprecatedService::PrepareEsTitleKey(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation) { + return impl::PrepareEsTitleKey(out_access_key.GetPointer(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize(), generation); } - Result DeprecatedService::LoadTitleKey(s32 keyslot, AccessKey access_key) { - return impl::LoadTitleKey(keyslot, this, access_key); + Result DeprecatedService::LoadPreparedAesKey(s32 keyslot, AccessKey access_key) { + return impl::LoadPreparedAesKey(keyslot, this, access_key); } - Result DeprecatedService::UnwrapCommonTitleKeyDeprecated(sf::Out out_access_key, KeySource key_source) { - return impl::UnwrapCommonTitleKey(out_access_key.GetPointer(), key_source, 0); + Result DeprecatedService::PrepareCommonEsTitleKeyDeprecated(sf::Out out_access_key, KeySource key_source) { + return impl::PrepareCommonEsTitleKey(out_access_key.GetPointer(), key_source, 0); } - Result DeprecatedService::UnwrapCommonTitleKey(sf::Out out_access_key, KeySource key_source, u32 generation) { - return impl::UnwrapCommonTitleKey(out_access_key.GetPointer(), key_source, generation); + Result DeprecatedService::PrepareCommonEsTitleKey(sf::Out out_access_key, KeySource key_source, u32 generation) { + return impl::PrepareCommonEsTitleKey(out_access_key.GetPointer(), key_source, generation); } - Result DeprecatedService::AllocateAesKeyslot(sf::Out out_keyslot) { - return impl::AllocateAesKeyslot(out_keyslot.GetPointer(), this); + Result DeprecatedService::AllocateAesKeySlot(sf::Out out_keyslot) { + return impl::AllocateAesKeySlot(out_keyslot.GetPointer(), this); } - Result DeprecatedService::FreeAesKeyslot(s32 keyslot) { - return impl::FreeAesKeyslot(keyslot, this); + Result DeprecatedService::DeallocateAesKeySlot(s32 keyslot) { + return impl::DeallocateAesKeySlot(keyslot, this); } - void DeprecatedService::GetAesKeyslotAvailableEvent(sf::OutCopyHandle out_hnd) { - out_hnd.SetValue(impl::GetAesKeyslotAvailableEventHandle()); + Result DeprecatedService::GetAesKeySlotAvailableEvent(sf::OutCopyHandle out_hnd) { + out_hnd.SetValue(impl::GetAesKeySlotAvailableEventHandle()); + return ResultSuccess(); } Result DeprecatedService::SetBootReason(BootReasonValue boot_reason) { diff --git a/stratosphere/spl/source/spl_deprecated_service.hpp b/stratosphere/spl/source/spl_deprecated_service.hpp index 11d49a7d9..b7c03291b 100644 --- a/stratosphere/spl/source/spl_deprecated_service.hpp +++ b/stratosphere/spl/source/spl_deprecated_service.hpp @@ -18,112 +18,39 @@ namespace ams::spl { - class DeprecatedService : public sf::IServiceObject { - protected: - enum class CommandId { - /* 1.0.0+ */ - GetConfig = 0, - ExpMod = 1, - GenerateAesKek = 2, - LoadAesKey = 3, - GenerateAesKey = 4, - SetConfig = 5, - GenerateRandomBytes = 7, - ImportLotusKey = 9, - DecryptLotusMessage = 10, - IsDevelopment = 11, - GenerateSpecificAesKey = 12, - DecryptRsaPrivateKeyDeprecated = 13, - DecryptRsaPrivateKey = 13, - DecryptAesKey = 14, - CryptAesCtrDeprecated = 15, - CryptAesCtr = 15, - ComputeCmac = 16, - ImportEsKey = 17, - UnwrapTitleKeyDeprecated = 18, - UnwrapTitleKey = 18, - LoadTitleKey = 19, - - /* 2.0.0+ */ - UnwrapCommonTitleKeyDeprecated = 20, - UnwrapCommonTitleKey = 20, - AllocateAesKeyslot = 21, - FreeAesKeyslot = 22, - GetAesKeyslotAvailableEvent = 23, - - /* 3.0.0+ */ - SetBootReason = 24, - GetBootReason = 25, - }; + class DeprecatedService final { + public: + virtual ~DeprecatedService(); public: - DeprecatedService() { /* ... */ } - virtual ~DeprecatedService() { /* ... */ } - protected: /* Actual commands. */ - virtual Result GetConfig(sf::Out out, u32 which); - virtual Result ExpMod(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &exp, const sf::InPointerBuffer &mod); - virtual Result GenerateAesKek(sf::Out out_access_key, KeySource key_source, u32 generation, u32 option); - virtual Result LoadAesKey(s32 keyslot, AccessKey access_key, KeySource key_source); - virtual Result GenerateAesKey(sf::Out out_key, AccessKey access_key, KeySource key_source); - virtual Result SetConfig(u32 which, u64 value); - virtual Result GenerateRandomBytes(const sf::OutPointerBuffer &out); - virtual Result ImportLotusKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option); - virtual Result DecryptLotusMessage(sf::Out out_size, const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest); - virtual Result IsDevelopment(sf::Out is_dev); - virtual Result GenerateSpecificAesKey(sf::Out out_key, KeySource key_source, u32 generation, u32 which); - virtual Result DecryptRsaPrivateKey(const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option); - virtual Result DecryptAesKey(sf::Out out_key, KeySource key_source, u32 generation, u32 option); - virtual Result CryptAesCtrDeprecated(const sf::OutBuffer &out_buf, s32 keyslot, const sf::InBuffer &in_buf, IvCtr iv_ctr); - virtual Result CryptAesCtr(const sf::OutNonSecureBuffer &out_buf, s32 keyslot, const sf::InNonSecureBuffer &in_buf, IvCtr iv_ctr); - virtual Result ComputeCmac(sf::Out out_cmac, s32 keyslot, const sf::InPointerBuffer &in_buf); - virtual Result ImportEsKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option); - virtual Result UnwrapTitleKeyDeprecated(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest); - virtual Result UnwrapTitleKey(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation); - virtual Result LoadTitleKey(s32 keyslot, AccessKey access_key); - virtual Result UnwrapCommonTitleKeyDeprecated(sf::Out out_access_key, KeySource key_source); - virtual Result UnwrapCommonTitleKey(sf::Out out_access_key, KeySource key_source, u32 generation); - virtual Result AllocateAesKeyslot(sf::Out out_keyslot); - virtual Result FreeAesKeyslot(s32 keyslot); - virtual void GetAesKeyslotAvailableEvent(sf::OutCopyHandle out_hnd); - virtual Result SetBootReason(BootReasonValue boot_reason); - virtual Result GetBootReason(sf::Out out); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetConfig), - MAKE_SERVICE_COMMAND_META(ExpMod), - MAKE_SERVICE_COMMAND_META(GenerateAesKek), - MAKE_SERVICE_COMMAND_META(LoadAesKey), - MAKE_SERVICE_COMMAND_META(GenerateAesKey), - MAKE_SERVICE_COMMAND_META(SetConfig), - MAKE_SERVICE_COMMAND_META(GenerateRandomBytes), - MAKE_SERVICE_COMMAND_META(ImportLotusKey), - MAKE_SERVICE_COMMAND_META(DecryptLotusMessage), - MAKE_SERVICE_COMMAND_META(IsDevelopment), - MAKE_SERVICE_COMMAND_META(GenerateSpecificAesKey), - MAKE_SERVICE_COMMAND_META(DecryptRsaPrivateKey), - MAKE_SERVICE_COMMAND_META(DecryptAesKey), - - MAKE_SERVICE_COMMAND_META(CryptAesCtrDeprecated, hos::Version_1_0_0, hos::Version_1_0_0), - MAKE_SERVICE_COMMAND_META(CryptAesCtr, hos::Version_2_0_0), - - MAKE_SERVICE_COMMAND_META(ComputeCmac), - MAKE_SERVICE_COMMAND_META(ImportEsKey), - - MAKE_SERVICE_COMMAND_META(UnwrapTitleKeyDeprecated, hos::Version_1_0_0, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(UnwrapTitleKey, hos::Version_3_0_0), - - MAKE_SERVICE_COMMAND_META(LoadTitleKey), - - MAKE_SERVICE_COMMAND_META(UnwrapCommonTitleKeyDeprecated, hos::Version_2_0_0, hos::Version_2_3_0), - MAKE_SERVICE_COMMAND_META(UnwrapCommonTitleKey, hos::Version_3_0_0), - - MAKE_SERVICE_COMMAND_META(AllocateAesKeyslot /* Atmosphere extension: This was added in hos::Version_2_0_0, but is allowed on older firmware by atmosphere. */), - MAKE_SERVICE_COMMAND_META(FreeAesKeyslot /* Atmosphere extension: This was added in hos::Version_2_0_0, but is allowed on older firmware by atmosphere. */), - MAKE_SERVICE_COMMAND_META(GetAesKeyslotAvailableEvent /* Atmosphere extension: This was added in hos::Version_2_0_0, but is allowed on older firmware by atmosphere. */), - - MAKE_SERVICE_COMMAND_META(SetBootReason, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GetBootReason, hos::Version_3_0_0), - }; + Result GetConfig(sf::Out out, u32 which); + Result ModularExponentiate(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &exp, const sf::InPointerBuffer &mod); + Result GenerateAesKek(sf::Out out_access_key, KeySource key_source, u32 generation, u32 option); + Result LoadAesKey(s32 keyslot, AccessKey access_key, KeySource key_source); + Result GenerateAesKey(sf::Out out_key, AccessKey access_key, KeySource key_source); + Result SetConfig(u32 which, u64 value); + Result GenerateRandomBytes(const sf::OutPointerBuffer &out); + Result DecryptAndStoreGcKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option); + Result DecryptGcMessage(sf::Out out_size, const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest); + Result IsDevelopment(sf::Out is_dev); + Result GenerateSpecificAesKey(sf::Out out_key, KeySource key_source, u32 generation, u32 which); + Result DecryptDeviceUniqueData(const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option); + Result DecryptAesKey(sf::Out out_key, KeySource key_source, u32 generation, u32 option); + Result ComputeCtrDeprecated(const sf::OutBuffer &out_buf, s32 keyslot, const sf::InBuffer &in_buf, IvCtr iv_ctr); + Result ComputeCtr(const sf::OutNonSecureBuffer &out_buf, s32 keyslot, const sf::InNonSecureBuffer &in_buf, IvCtr iv_ctr); + Result ComputeCmac(sf::Out out_cmac, s32 keyslot, const sf::InPointerBuffer &in_buf); + Result LoadEsDeviceKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option); + Result PrepareEsTitleKeyDeprecated(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest); + Result PrepareEsTitleKey(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation); + Result LoadPreparedAesKey(s32 keyslot, AccessKey access_key); + Result PrepareCommonEsTitleKeyDeprecated(sf::Out out_access_key, KeySource key_source); + Result PrepareCommonEsTitleKey(sf::Out out_access_key, KeySource key_source, u32 generation); + Result AllocateAesKeySlot(sf::Out out_keyslot); + Result DeallocateAesKeySlot(s32 keyslot); + Result GetAesKeySlotAvailableEvent(sf::OutCopyHandle out_hnd); + Result SetBootReason(BootReasonValue boot_reason); + Result GetBootReason(sf::Out out); }; + static_assert(spl::impl::IsIDeprecatedGeneralInterface); } diff --git a/stratosphere/spl/source/spl_device_unique_data_service.cpp b/stratosphere/spl/source/spl_device_unique_data_service.cpp new file mode 100644 index 000000000..4dd281fd6 --- /dev/null +++ b/stratosphere/spl/source/spl_device_unique_data_service.cpp @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include "spl_api_impl.hpp" +#include "spl_device_unique_data_service.hpp" + +namespace ams::spl { + + Result DeviceUniqueDataService::DecryptDeviceUniqueDataDeprecated(const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option) { + return impl::DecryptDeviceUniqueData(dst.GetPointer(), dst.GetSize(), src.GetPointer(), src.GetSize(), access_key, key_source, option); + } + + Result DeviceUniqueDataService::DecryptDeviceUniqueData(const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source) { + return impl::DecryptDeviceUniqueData(dst.GetPointer(), dst.GetSize(), src.GetPointer(), src.GetSize(), access_key, key_source, static_cast(smc::DeviceUniqueDataMode::DecryptDeviceUniqueData)); + } + +} diff --git a/stratosphere/spl/source/spl_rsa_service.hpp b/stratosphere/spl/source/spl_device_unique_data_service.hpp similarity index 60% rename from stratosphere/spl/source/spl_rsa_service.hpp rename to stratosphere/spl/source/spl_device_unique_data_service.hpp index e7c38b6fd..397a271f0 100644 --- a/stratosphere/spl/source/spl_rsa_service.hpp +++ b/stratosphere/spl/source/spl_device_unique_data_service.hpp @@ -18,14 +18,12 @@ namespace ams::spl { - class RsaService : public CryptoService { + class DeviceUniqueDataService : public CryptoService { public: - RsaService() : CryptoService() { /* ... */ } - virtual ~RsaService() { /* ... */ } - protected: /* Actual commands. */ - virtual Result DecryptRsaPrivateKeyDeprecated(const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option); - virtual Result DecryptRsaPrivateKey(const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source); + Result DecryptDeviceUniqueDataDeprecated(const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option); + Result DecryptDeviceUniqueData(const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source); }; + static_assert(spl::impl::IsIDeviceUniqueDataInterface); } diff --git a/stratosphere/spl/source/spl_es_service.cpp b/stratosphere/spl/source/spl_es_service.cpp index 9f1a9cffc..969212835 100644 --- a/stratosphere/spl/source/spl_es_service.cpp +++ b/stratosphere/spl/source/spl_es_service.cpp @@ -19,36 +19,36 @@ namespace ams::spl { - Result EsService::ImportEsKeyDeprecated(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option) { - return impl::ImportEsKey(src.GetPointer(), src.GetSize(), access_key, key_source, option); + Result EsService::LoadEsDeviceKeyDeprecated(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option) { + return impl::LoadEsDeviceKey(src.GetPointer(), src.GetSize(), access_key, key_source, option); } - Result EsService::ImportEsKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source) { - return impl::ImportEsKey(src.GetPointer(), src.GetSize(), access_key, key_source, static_cast(smc::DecryptOrImportMode::ImportEsKey)); + Result EsService::LoadEsDeviceKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source) { + return impl::LoadEsDeviceKey(src.GetPointer(), src.GetSize(), access_key, key_source, static_cast(smc::DeviceUniqueDataMode::DecryptAndStoreEsDeviceKey)); } - Result EsService::UnwrapTitleKey(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation) { - return impl::UnwrapTitleKey(out_access_key.GetPointer(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize(), generation); + Result EsService::PrepareEsTitleKey(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation) { + return impl::PrepareEsTitleKey(out_access_key.GetPointer(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize(), generation); } - Result EsService::UnwrapCommonTitleKey(sf::Out out_access_key, KeySource key_source, u32 generation) { - return impl::UnwrapCommonTitleKey(out_access_key.GetPointer(), key_source, generation); + Result EsService::PrepareCommonEsTitleKey(sf::Out out_access_key, KeySource key_source, u32 generation) { + return impl::PrepareCommonEsTitleKey(out_access_key.GetPointer(), key_source, generation); } - Result EsService::ImportDrmKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source) { - return impl::ImportDrmKey(src.GetPointer(), src.GetSize(), access_key, key_source); + Result EsService::DecryptAndStoreDrmDeviceCertKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source) { + return impl::DecryptAndStoreDrmDeviceCertKey(src.GetPointer(), src.GetSize(), access_key, key_source); } - Result EsService::DrmExpMod(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod) { - return impl::DrmExpMod(out.GetPointer(), out.GetSize(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize()); + Result EsService::ModularExponentiateWithDrmDeviceCertKey(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod) { + return impl::ModularExponentiateWithDrmDeviceCertKey(out.GetPointer(), out.GetSize(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize()); } - Result EsService::UnwrapElicenseKey(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation) { - return impl::UnwrapElicenseKey(out_access_key.GetPointer(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize(), generation); + Result EsService::PrepareEsArchiveKey(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation) { + return impl::PrepareEsArchiveKey(out_access_key.GetPointer(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize(), generation); } - Result EsService::LoadElicenseKey(s32 keyslot, AccessKey access_key) { - return impl::LoadElicenseKey(keyslot, this, access_key); + Result EsService::LoadPreparedAesKey(s32 keyslot, AccessKey access_key) { + return impl::LoadPreparedAesKey(keyslot, this, access_key); } } diff --git a/stratosphere/spl/source/spl_es_service.hpp b/stratosphere/spl/source/spl_es_service.hpp index abbe0b59b..2c5a375bf 100644 --- a/stratosphere/spl/source/spl_es_service.hpp +++ b/stratosphere/spl/source/spl_es_service.hpp @@ -14,53 +14,22 @@ * along with this program. If not, see . */ #pragma once -#include "spl_rsa_service.hpp" +#include "spl_device_unique_data_service.hpp" namespace ams::spl { - class EsService : public RsaService { + class EsService : public DeviceUniqueDataService { public: - EsService() : RsaService() { /* ... */ } - virtual ~EsService() { /* ... */} - protected: /* Actual commands. */ - virtual Result ImportEsKeyDeprecated(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option); - virtual Result ImportEsKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source); - virtual Result UnwrapTitleKey(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation); - virtual Result UnwrapCommonTitleKey(sf::Out out_access_key, KeySource key_source, u32 generation); - virtual Result ImportDrmKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source); - virtual Result DrmExpMod(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod); - virtual Result UnwrapElicenseKey(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation); - virtual Result LoadElicenseKey(s32 keyslot, AccessKey access_key); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetConfig), - MAKE_SERVICE_COMMAND_META(ExpMod), - MAKE_SERVICE_COMMAND_META(SetConfig), - MAKE_SERVICE_COMMAND_META(GenerateRandomBytes), - MAKE_SERVICE_COMMAND_META(IsDevelopment), - MAKE_SERVICE_COMMAND_META(SetBootReason, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GetBootReason, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GenerateAesKek), - MAKE_SERVICE_COMMAND_META(LoadAesKey), - MAKE_SERVICE_COMMAND_META(GenerateAesKey), - MAKE_SERVICE_COMMAND_META(DecryptAesKey), - MAKE_SERVICE_COMMAND_META(CryptAesCtr), - MAKE_SERVICE_COMMAND_META(ComputeCmac), - MAKE_SERVICE_COMMAND_META(AllocateAesKeyslot, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(FreeAesKeyslot, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(GetAesKeyslotAvailableEvent, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(DecryptRsaPrivateKeyDeprecated, hos::Version_4_0_0, hos::Version_4_1_0), - MAKE_SERVICE_COMMAND_META(DecryptRsaPrivateKey, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(ImportEsKeyDeprecated, hos::Version_4_0_0, hos::Version_4_1_0), - MAKE_SERVICE_COMMAND_META(ImportEsKey, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(UnwrapTitleKey), - MAKE_SERVICE_COMMAND_META(UnwrapCommonTitleKey, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(ImportDrmKey, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(DrmExpMod, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(UnwrapElicenseKey, hos::Version_6_0_0), - MAKE_SERVICE_COMMAND_META(LoadElicenseKey, hos::Version_6_0_0), - }; + Result LoadEsDeviceKeyDeprecated(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option); + Result LoadEsDeviceKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source); + Result PrepareEsTitleKey(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation); + Result PrepareCommonEsTitleKey(sf::Out out_access_key, KeySource key_source, u32 generation); + Result DecryptAndStoreDrmDeviceCertKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source); + Result ModularExponentiateWithDrmDeviceCertKey(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod); + Result PrepareEsArchiveKey(sf::Out out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation); + Result LoadPreparedAesKey(s32 keyslot, AccessKey access_key); }; + static_assert(spl::impl::IsIEsInterface); } diff --git a/stratosphere/spl/source/spl_fs_service.cpp b/stratosphere/spl/source/spl_fs_service.cpp index 290e4b177..411909c30 100644 --- a/stratosphere/spl/source/spl_fs_service.cpp +++ b/stratosphere/spl/source/spl_fs_service.cpp @@ -19,24 +19,24 @@ namespace ams::spl { - Result FsService::ImportLotusKeyDeprecated(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option) { - return impl::ImportLotusKey(src.GetPointer(), src.GetSize(), access_key, key_source, option); + Result FsService::DecryptAndStoreGcKeyDeprecated(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option) { + return impl::DecryptAndStoreGcKey(src.GetPointer(), src.GetSize(), access_key, key_source, option); } - Result FsService::ImportLotusKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source) { - return impl::ImportLotusKey(src.GetPointer(), src.GetSize(), access_key, key_source, static_cast(smc::DecryptOrImportMode::ImportLotusKey)); + Result FsService::DecryptAndStoreGcKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source) { + return impl::DecryptAndStoreGcKey(src.GetPointer(), src.GetSize(), access_key, key_source, static_cast(smc::DeviceUniqueDataMode::DecryptAndStoreGcKey)); } - Result FsService::DecryptLotusMessage(sf::Out out_size, const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest) { - return impl::DecryptLotusMessage(out_size.GetPointer(), out.GetPointer(), out.GetSize(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize()); + Result FsService::DecryptGcMessage(sf::Out out_size, const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest) { + return impl::DecryptGcMessage(out_size.GetPointer(), out.GetPointer(), out.GetSize(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize(), label_digest.GetPointer(), label_digest.GetSize()); } Result FsService::GenerateSpecificAesKey(sf::Out out_key, KeySource key_source, u32 generation, u32 which) { return impl::GenerateSpecificAesKey(out_key.GetPointer(), key_source, generation, which); } - Result FsService::LoadTitleKey(s32 keyslot, AccessKey access_key) { - return impl::LoadTitleKey(keyslot, this, access_key); + Result FsService::LoadPreparedAesKey(s32 keyslot, AccessKey access_key) { + return impl::LoadPreparedAesKey(keyslot, this, access_key); } Result FsService::GetPackage2Hash(const sf::OutPointerBuffer &dst) { diff --git a/stratosphere/spl/source/spl_fs_service.hpp b/stratosphere/spl/source/spl_fs_service.hpp index eace4735f..a236047a8 100644 --- a/stratosphere/spl/source/spl_fs_service.hpp +++ b/stratosphere/spl/source/spl_fs_service.hpp @@ -20,41 +20,14 @@ namespace ams::spl { class FsService : public CryptoService { public: - FsService() : CryptoService() { /* ... */ } - virtual ~FsService() { /* ... */ } - protected: /* Actual commands. */ - virtual Result ImportLotusKeyDeprecated(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option); - virtual Result ImportLotusKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source); - virtual Result DecryptLotusMessage(sf::Out out_size, const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest); - virtual Result GenerateSpecificAesKey(sf::Out out_key, KeySource key_source, u32 generation, u32 which); - virtual Result LoadTitleKey(s32 keyslot, AccessKey access_key); - virtual Result GetPackage2Hash(const sf::OutPointerBuffer &dst); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetConfig), - MAKE_SERVICE_COMMAND_META(ExpMod), - MAKE_SERVICE_COMMAND_META(SetConfig), - MAKE_SERVICE_COMMAND_META(GenerateRandomBytes), - MAKE_SERVICE_COMMAND_META(IsDevelopment), - MAKE_SERVICE_COMMAND_META(SetBootReason, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GetBootReason, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GenerateAesKek), - MAKE_SERVICE_COMMAND_META(LoadAesKey), - MAKE_SERVICE_COMMAND_META(GenerateAesKey), - MAKE_SERVICE_COMMAND_META(DecryptAesKey), - MAKE_SERVICE_COMMAND_META(CryptAesCtr), - MAKE_SERVICE_COMMAND_META(ComputeCmac), - MAKE_SERVICE_COMMAND_META(AllocateAesKeyslot, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(FreeAesKeyslot, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(GetAesKeyslotAvailableEvent, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(ImportLotusKeyDeprecated, hos::Version_4_0_0, hos::Version_4_1_0), - MAKE_SERVICE_COMMAND_META(ImportLotusKey, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(DecryptLotusMessage), - MAKE_SERVICE_COMMAND_META(GenerateSpecificAesKey), - MAKE_SERVICE_COMMAND_META(LoadTitleKey), - MAKE_SERVICE_COMMAND_META(GetPackage2Hash, hos::Version_5_0_0), - }; + Result DecryptAndStoreGcKeyDeprecated(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option); + Result DecryptAndStoreGcKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source); + Result DecryptGcMessage(sf::Out out_size, const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest); + Result GenerateSpecificAesKey(sf::Out out_key, KeySource key_source, u32 generation, u32 which); + Result LoadPreparedAesKey(s32 keyslot, AccessKey access_key); + Result GetPackage2Hash(const sf::OutPointerBuffer &dst); }; + static_assert(spl::impl::IsIFsInterface); } diff --git a/stratosphere/spl/source/spl_general_service.cpp b/stratosphere/spl/source/spl_general_service.cpp index cb87c37fb..83bbe9419 100644 --- a/stratosphere/spl/source/spl_general_service.cpp +++ b/stratosphere/spl/source/spl_general_service.cpp @@ -20,15 +20,15 @@ namespace ams::spl { Result GeneralService::GetConfig(sf::Out out, u32 which) { - return impl::GetConfig(out.GetPointer(), static_cast(which)); + return impl::GetConfig(out.GetPointer(), static_cast(which)); } - Result GeneralService::ExpMod(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &exp, const sf::InPointerBuffer &mod) { - return impl::ExpMod(out.GetPointer(), out.GetSize(), base.GetPointer(), base.GetSize(), exp.GetPointer(), exp.GetSize(), mod.GetPointer(), mod.GetSize()); + Result GeneralService::ModularExponentiate(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &exp, const sf::InPointerBuffer &mod) { + return impl::ModularExponentiate(out.GetPointer(), out.GetSize(), base.GetPointer(), base.GetSize(), exp.GetPointer(), exp.GetSize(), mod.GetPointer(), mod.GetSize()); } Result GeneralService::SetConfig(u32 which, u64 value) { - return impl::SetConfig(static_cast(which), value); + return impl::SetConfig(static_cast(which), value); } Result GeneralService::GenerateRandomBytes(const sf::OutPointerBuffer &out) { diff --git a/stratosphere/spl/source/spl_general_service.hpp b/stratosphere/spl/source/spl_general_service.hpp index b8c212a48..7e397f4f8 100644 --- a/stratosphere/spl/source/spl_general_service.hpp +++ b/stratosphere/spl/source/spl_general_service.hpp @@ -18,76 +18,17 @@ namespace ams::spl { - class GeneralService : public sf::IServiceObject { - protected: - enum class CommandId { - /* 1.0.0+ */ - GetConfig = 0, - ExpMod = 1, - GenerateAesKek = 2, - LoadAesKey = 3, - GenerateAesKey = 4, - SetConfig = 5, - GenerateRandomBytes = 7, - ImportLotusKeyDeprecated = 9, - ImportLotusKey = 9, - DecryptLotusMessage = 10, - IsDevelopment = 11, - GenerateSpecificAesKey = 12, - DecryptRsaPrivateKeyDeprecated = 13, - DecryptRsaPrivateKey = 13, - DecryptAesKey = 14, - CryptAesCtr = 15, - ComputeCmac = 16, - ImportEsKeyDeprecated = 17, - ImportEsKey = 17, - UnwrapTitleKey = 18, - LoadTitleKey = 19, - - /* 2.0.0+ */ - UnwrapCommonTitleKey = 20, - AllocateAesKeyslot = 21, - FreeAesKeyslot = 22, - GetAesKeyslotAvailableEvent = 23, - - /* 3.0.0+ */ - SetBootReason = 24, - GetBootReason = 25, - - /* 5.0.0+ */ - ImportSslKey = 26, - SslExpMod = 27, - ImportDrmKey = 28, - DrmExpMod = 29, - ReEncryptRsaPrivateKey = 30, - GetPackage2Hash = 31, - - /* 6.0.0+ */ - UnwrapElicenseKey = 31, /* re-used command id :( */ - LoadElicenseKey = 32, - }; + class GeneralService { public: - GeneralService() { /* ... */ } - virtual ~GeneralService() { /* ... */ } - protected: /* Actual commands. */ - virtual Result GetConfig(sf::Out out, u32 which); - virtual Result ExpMod(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &exp, const sf::InPointerBuffer &mod); - virtual Result SetConfig(u32 which, u64 value); - virtual Result GenerateRandomBytes(const sf::OutPointerBuffer &out); - virtual Result IsDevelopment(sf::Out is_dev); - virtual Result SetBootReason(BootReasonValue boot_reason); - virtual Result GetBootReason(sf::Out out); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetConfig), - MAKE_SERVICE_COMMAND_META(ExpMod), - MAKE_SERVICE_COMMAND_META(SetConfig), - MAKE_SERVICE_COMMAND_META(GenerateRandomBytes), - MAKE_SERVICE_COMMAND_META(IsDevelopment), - MAKE_SERVICE_COMMAND_META(SetBootReason, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GetBootReason, hos::Version_3_0_0), - }; + Result GetConfig(sf::Out out, u32 which); + Result ModularExponentiate(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &exp, const sf::InPointerBuffer &mod); + Result SetConfig(u32 which, u64 value); + Result GenerateRandomBytes(const sf::OutPointerBuffer &out); + Result IsDevelopment(sf::Out is_dev); + Result SetBootReason(BootReasonValue boot_reason); + Result GetBootReason(sf::Out out); }; + static_assert(spl::impl::IsIGeneralInterface); } diff --git a/stratosphere/spl/source/spl_main.cpp b/stratosphere/spl/source/spl_main.cpp index 6ffc5a290..5a615d303 100644 --- a/stratosphere/spl/source/spl_main.cpp +++ b/stratosphere/spl/source/spl_main.cpp @@ -138,18 +138,18 @@ int main(int argc, char **argv) spl::impl::Initialize(); /* Create services. */ - R_ABORT_UNLESS(g_server_manager.RegisterServer(RandomServiceName, RandomMaxSessions)); + R_ABORT_UNLESS((g_server_manager.RegisterServer(RandomServiceName, RandomMaxSessions))); if (hos::GetVersion() >= hos::Version_4_0_0) { - R_ABORT_UNLESS(g_server_manager.RegisterServer(GeneralServiceName, GeneralMaxSessions)); - R_ABORT_UNLESS(g_server_manager.RegisterServer(CryptoServiceName, CryptoMaxSessions)); - R_ABORT_UNLESS(g_server_manager.RegisterServer(SslServiceName, SslMaxSessions)); - R_ABORT_UNLESS(g_server_manager.RegisterServer(EsServiceName, EsMaxSessions)); - R_ABORT_UNLESS(g_server_manager.RegisterServer(FsServiceName, FsMaxSessions)); + R_ABORT_UNLESS((g_server_manager.RegisterServer(GeneralServiceName, GeneralMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(CryptoServiceName, CryptoMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(SslServiceName, SslMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(EsServiceName, EsMaxSessions))); + R_ABORT_UNLESS((g_server_manager.RegisterServer(FsServiceName, FsMaxSessions))); if (hos::GetVersion() >= hos::Version_5_0_0) { - R_ABORT_UNLESS(g_server_manager.RegisterServer(ManuServiceName, ManuMaxSessions)); + R_ABORT_UNLESS((g_server_manager.RegisterServer(ManuServiceName, ManuMaxSessions))); } } else { - R_ABORT_UNLESS(g_server_manager.RegisterServer(DeprecatedServiceName, DeprecatedMaxSessions)); + R_ABORT_UNLESS((g_server_manager.RegisterServer(DeprecatedServiceName, DeprecatedMaxSessions))); } /* Loop forever, servicing our services. */ diff --git a/stratosphere/spl/source/spl_manu_service.cpp b/stratosphere/spl/source/spl_manu_service.cpp index 17cc8bcce..4532a0d54 100644 --- a/stratosphere/spl/source/spl_manu_service.cpp +++ b/stratosphere/spl/source/spl_manu_service.cpp @@ -19,8 +19,8 @@ namespace ams::spl { - Result ManuService::ReEncryptRsaPrivateKey(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &src, AccessKey access_key_dec, KeySource source_dec, AccessKey access_key_enc, KeySource source_enc, u32 option) { - return impl::ReEncryptRsaPrivateKey(out.GetPointer(), out.GetSize(), src.GetPointer(), src.GetSize(), access_key_dec, source_dec, access_key_enc, source_enc, option); + Result ManuService::ReencryptDeviceUniqueData(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &src, AccessKey access_key_dec, KeySource source_dec, AccessKey access_key_enc, KeySource source_enc, u32 option) { + return impl::ReencryptDeviceUniqueData(out.GetPointer(), out.GetSize(), src.GetPointer(), src.GetSize(), access_key_dec, source_dec, access_key_enc, source_enc, option); } } diff --git a/stratosphere/spl/source/spl_manu_service.hpp b/stratosphere/spl/source/spl_manu_service.hpp index 9e6342369..30f4b63c3 100644 --- a/stratosphere/spl/source/spl_manu_service.hpp +++ b/stratosphere/spl/source/spl_manu_service.hpp @@ -14,40 +14,15 @@ * along with this program. If not, see . */ #pragma once -#include "spl_rsa_service.hpp" +#include "spl_device_unique_data_service.hpp" namespace ams::spl { - class ManuService : public RsaService { + class ManuService : public DeviceUniqueDataService { public: - ManuService() : RsaService() { /* ... */ } - - virtual ~ManuService() { /* ... */ } - protected: /* Actual commands. */ - virtual Result ReEncryptRsaPrivateKey(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &src, AccessKey access_key_dec, KeySource source_dec, AccessKey access_key_enc, KeySource source_enc, u32 option); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetConfig), - MAKE_SERVICE_COMMAND_META(ExpMod), - MAKE_SERVICE_COMMAND_META(SetConfig), - MAKE_SERVICE_COMMAND_META(GenerateRandomBytes), - MAKE_SERVICE_COMMAND_META(IsDevelopment), - MAKE_SERVICE_COMMAND_META(SetBootReason, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GetBootReason, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GenerateAesKek), - MAKE_SERVICE_COMMAND_META(LoadAesKey), - MAKE_SERVICE_COMMAND_META(GenerateAesKey), - MAKE_SERVICE_COMMAND_META(DecryptAesKey), - MAKE_SERVICE_COMMAND_META(CryptAesCtr), - MAKE_SERVICE_COMMAND_META(ComputeCmac), - MAKE_SERVICE_COMMAND_META(AllocateAesKeyslot, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(FreeAesKeyslot, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(GetAesKeyslotAvailableEvent, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(DecryptRsaPrivateKeyDeprecated, hos::Version_4_0_0, hos::Version_4_1_0), - MAKE_SERVICE_COMMAND_META(DecryptRsaPrivateKey, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(ReEncryptRsaPrivateKey, hos::Version_5_0_0), - }; + Result ReencryptDeviceUniqueData(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &src, AccessKey access_key_dec, KeySource source_dec, AccessKey access_key_enc, KeySource source_enc, u32 option); }; + static_assert(spl::impl::IsIManuInterface); } diff --git a/stratosphere/spl/source/spl_random_service.hpp b/stratosphere/spl/source/spl_random_service.hpp index e6d71c287..fdbba30e8 100644 --- a/stratosphere/spl/source/spl_random_service.hpp +++ b/stratosphere/spl/source/spl_random_service.hpp @@ -18,21 +18,11 @@ namespace ams::spl { - class RandomService final : public sf::IServiceObject { - protected: - enum class CommandId { - GenerateRandomBytes = 0, - }; + class RandomService final { public: - RandomService() { /* ... */ } - virtual ~RandomService() { /* ... */ } - private: /* Actual commands. */ - virtual Result GenerateRandomBytes(const sf::OutBuffer &out); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GenerateRandomBytes), - }; + Result GenerateRandomBytes(const sf::OutBuffer &out); }; + static_assert(spl::impl::IsIRandomInterface); } diff --git a/stratosphere/spl/source/spl_rsa_service.cpp b/stratosphere/spl/source/spl_rsa_service.cpp deleted file mode 100644 index c14d01834..000000000 --- a/stratosphere/spl/source/spl_rsa_service.cpp +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2018-2020 Atmosphère-NX - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#include -#include "spl_api_impl.hpp" -#include "spl_rsa_service.hpp" - -namespace ams::spl { - - Result RsaService::DecryptRsaPrivateKeyDeprecated(const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source, u32 option) { - return impl::DecryptRsaPrivateKey(dst.GetPointer(), dst.GetSize(), src.GetPointer(), src.GetSize(), access_key, key_source, option); - } - - Result RsaService::DecryptRsaPrivateKey(const sf::OutPointerBuffer &dst, const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source) { - return impl::DecryptRsaPrivateKey(dst.GetPointer(), dst.GetSize(), src.GetPointer(), src.GetSize(), access_key, key_source, static_cast(smc::DecryptOrImportMode::DecryptRsaPrivateKey)); - } - -} diff --git a/stratosphere/spl/source/spl_ssl_service.cpp b/stratosphere/spl/source/spl_ssl_service.cpp index b6c8eeb44..312291119 100644 --- a/stratosphere/spl/source/spl_ssl_service.cpp +++ b/stratosphere/spl/source/spl_ssl_service.cpp @@ -19,12 +19,12 @@ namespace ams::spl { - Result SslService::ImportSslKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source) { - return impl::ImportSslKey(src.GetPointer(), src.GetSize(), access_key, key_source); + Result SslService::DecryptAndStoreSslClientCertKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source) { + return impl::DecryptAndStoreSslClientCertKey(src.GetPointer(), src.GetSize(), access_key, key_source); } - Result SslService::SslExpMod(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod) { - return impl::SslExpMod(out.GetPointer(), out.GetSize(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize()); + Result SslService::ModularExponentiateWithSslClientCertKey(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod) { + return impl::ModularExponentiateWithSslClientCertKey(out.GetPointer(), out.GetSize(), base.GetPointer(), base.GetSize(), mod.GetPointer(), mod.GetSize()); } } diff --git a/stratosphere/spl/source/spl_ssl_service.hpp b/stratosphere/spl/source/spl_ssl_service.hpp index ba213b309..c06f38dfb 100644 --- a/stratosphere/spl/source/spl_ssl_service.hpp +++ b/stratosphere/spl/source/spl_ssl_service.hpp @@ -14,42 +14,16 @@ * along with this program. If not, see . */ #pragma once -#include "spl_rsa_service.hpp" +#include "spl_device_unique_data_service.hpp" namespace ams::spl { - class SslService : public RsaService { + class SslService : public DeviceUniqueDataService { public: - SslService() : RsaService() { /* ... */ } - virtual ~SslService() { /* ... */ } - protected: /* Actual commands. */ - virtual Result ImportSslKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source); - virtual Result SslExpMod(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod); - public: - DEFINE_SERVICE_DISPATCH_TABLE { - MAKE_SERVICE_COMMAND_META(GetConfig), - MAKE_SERVICE_COMMAND_META(ExpMod), - MAKE_SERVICE_COMMAND_META(SetConfig), - MAKE_SERVICE_COMMAND_META(GenerateRandomBytes), - MAKE_SERVICE_COMMAND_META(IsDevelopment), - MAKE_SERVICE_COMMAND_META(SetBootReason, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GetBootReason, hos::Version_3_0_0), - MAKE_SERVICE_COMMAND_META(GenerateAesKek), - MAKE_SERVICE_COMMAND_META(LoadAesKey), - MAKE_SERVICE_COMMAND_META(GenerateAesKey), - MAKE_SERVICE_COMMAND_META(DecryptAesKey), - MAKE_SERVICE_COMMAND_META(CryptAesCtr), - MAKE_SERVICE_COMMAND_META(ComputeCmac), - MAKE_SERVICE_COMMAND_META(AllocateAesKeyslot, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(FreeAesKeyslot, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(GetAesKeyslotAvailableEvent, hos::Version_2_0_0), - MAKE_SERVICE_COMMAND_META(DecryptRsaPrivateKeyDeprecated, hos::Version_4_0_0, hos::Version_4_1_0), - MAKE_SERVICE_COMMAND_META(DecryptRsaPrivateKey, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(ImportSslKey, hos::Version_5_0_0), - MAKE_SERVICE_COMMAND_META(SslExpMod, hos::Version_5_0_0), - - }; + Result DecryptAndStoreSslClientCertKey(const sf::InPointerBuffer &src, AccessKey access_key, KeySource key_source); + Result ModularExponentiateWithSslClientCertKey(const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod); }; + static_assert(spl::impl::IsISslInterface); } diff --git a/troposphere/Makefile b/troposphere/Makefile index 2be61ea0a..25adf0da2 100644 --- a/troposphere/Makefile +++ b/troposphere/Makefile @@ -1,4 +1,4 @@ -APPLICATIONS := reboot_to_payload +APPLICATIONS := daybreak reboot_to_payload SUBFOLDERS := $(APPLICATIONS) diff --git a/troposphere/daybreak/Makefile b/troposphere/daybreak/Makefile new file mode 100644 index 000000000..8753aca8f --- /dev/null +++ b/troposphere/daybreak/Makefile @@ -0,0 +1,282 @@ +#--------------------------------------------------------------------------------- +.SUFFIXES: +#--------------------------------------------------------------------------------- + +ifeq ($(strip $(DEVKITPRO)),) +$(error "Please set DEVKITPRO in your environment. export DEVKITPRO=/devkitpro") +endif + +TOPDIR ?= $(CURDIR) +include $(DEVKITPRO)/libnx/switch_rules + +#--------------------------------------------------------------------------------- +# TARGET is the name of the output +# BUILD is the directory where object files & intermediate files will be placed +# SOURCES is a list of directories containing source code +# DATA is a list of directories containing data files +# INCLUDES is a list of directories containing header files +# ROMFS is the directory containing data to be added to RomFS, relative to the Makefile (Optional) +# +# NO_ICON: if set to anything, do not use icon. +# NO_NACP: if set to anything, no .nacp file is generated. +# APP_TITLE is the name of the app stored in the .nacp file (Optional) +# APP_AUTHOR is the author of the app stored in the .nacp file (Optional) +# APP_VERSION is the version of the app stored in the .nacp file (Optional) +# APP_TITLEID is the titleID of the app stored in the .nacp file (Optional) +# ICON is the filename of the icon (.jpg), relative to the project folder. +# If not set, it attempts to use one of the following (in this order): +# - .jpg +# - icon.jpg +# - /default_icon.jpg +# +# CONFIG_JSON is the filename of the NPDM config file (.json), relative to the project folder. +# If not set, it attempts to use one of the following (in this order): +# - .json +# - config.json +# If a JSON file is provided or autodetected, an ExeFS PFS0 (.nsp) is built instead +# of a homebrew executable (.nro). This is intended to be used for sysmodules. +# NACP building is skipped as well. +#--------------------------------------------------------------------------------- +TARGET := daybreak +BUILD := build +SOURCES := source nanovg/shaders +DATA := data +INCLUDES := include ../include +ROMFS := romfs + +# Output folders for autogenerated files in romfs +OUT_SHADERS := shaders + +APP_TITLE := Daybreak +APP_AUTHOR := Atmosphere-NX +APP_VERSION := 1.0.0 + +#--------------------------------------------------------------------------------- +# options for code generation +#--------------------------------------------------------------------------------- +ARCH := -march=armv8-a+crc+crypto -mtune=cortex-a57 -mtp=soft -fPIE + +CFLAGS := -g -Wall -O2 -ffunction-sections \ + $(ARCH) $(DEFINES) + +CFLAGS += $(INCLUDE) -D__SWITCH__ + +CXXFLAGS := $(CFLAGS) -std=gnu++17 -fno-exceptions -fno-rtti + +ASFLAGS := -g $(ARCH) +LDFLAGS = -specs=$(DEVKITPRO)/libnx/switch.specs -g $(ARCH) -Wl,-Map,$(notdir $*.map) + +LIBS := -lnanovg -ldeko3d -lnx + +#--------------------------------------------------------------------------------- +# list of directories containing libraries, this must be the top level containing +# include and lib +#--------------------------------------------------------------------------------- +LIBDIRS := $(PORTLIBS) $(LIBNX) $(CURDIR)/nanovg/ + +#--------------------------------------------------------------------------------- +# no real need to edit anything past this point unless you need to add additional +# rules for different file extensions +#--------------------------------------------------------------------------------- +ifneq ($(BUILD),$(notdir $(CURDIR))) +#--------------------------------------------------------------------------------- + +export OUTPUT := $(CURDIR)/$(TARGET) +export TOPDIR := $(CURDIR) + +export VPATH := $(foreach dir,$(SOURCES),$(CURDIR)/$(dir)) \ + $(foreach dir,$(DATA),$(CURDIR)/$(dir)) + +export DEPSDIR := $(CURDIR)/$(BUILD) + +SUBFOLDERS := nanovg + +TOPTARGETS := all clean + +$(TOPTARGETS): $(SUBFOLDERS) + +$(SUBFOLDERS): + $(MAKE) -C $@ $(MAKECMDGOALS) + +CFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.c))) +CPPFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.cpp))) +SFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.s))) +GLSLFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.glsl))) +BINFILES := $(foreach dir,$(DATA),$(notdir $(wildcard $(dir)/*.*))) + +#--------------------------------------------------------------------------------- +# use CXX for linking C++ projects, CC for standard C +#--------------------------------------------------------------------------------- +ifeq ($(strip $(CPPFILES)),) +#--------------------------------------------------------------------------------- + export LD := $(CC) +#--------------------------------------------------------------------------------- +else +#--------------------------------------------------------------------------------- + export LD := $(CXX) +#--------------------------------------------------------------------------------- +endif +#--------------------------------------------------------------------------------- + +export OFILES_BIN := $(addsuffix .o,$(BINFILES)) +export OFILES_SRC := $(CPPFILES:.cpp=.o) $(CFILES:.c=.o) $(SFILES:.s=.o) +export OFILES := $(OFILES_BIN) $(OFILES_SRC) +export HFILES_BIN := $(addsuffix .h,$(subst .,_,$(BINFILES))) +export LIBPATHS := $(foreach dir,$(LIBDIRS),-L$(dir)/lib) + +ifneq ($(strip $(ROMFS)),) + ROMFS_TARGETS := + ROMFS_FOLDERS := + ifneq ($(strip $(OUT_SHADERS)),) + ROMFS_SHADERS := $(ROMFS)/$(OUT_SHADERS) + ROMFS_TARGETS += $(patsubst %.glsl, $(ROMFS_SHADERS)/%.dksh, $(GLSLFILES)) + ROMFS_FOLDERS += $(ROMFS_SHADERS) + endif + + export ROMFS_DEPS := $(foreach file,$(ROMFS_TARGETS),$(CURDIR)/$(file)) +endif + +export INCLUDE := $(foreach dir,$(INCLUDES),-I$(CURDIR)/$(dir)) \ + $(foreach dir,$(LIBDIRS),-I$(dir)/include) \ + -I$(CURDIR)/$(BUILD) + +ifeq ($(strip $(CONFIG_JSON)),) + jsons := $(wildcard *.json) + ifneq (,$(findstring $(TARGET).json,$(jsons))) + export APP_JSON := $(TOPDIR)/$(TARGET).json + else + ifneq (,$(findstring config.json,$(jsons))) + export APP_JSON := $(TOPDIR)/config.json + endif + endif +else + export APP_JSON := $(TOPDIR)/$(CONFIG_JSON) +endif + +ifeq ($(strip $(ICON)),) + icons := $(wildcard *.jpg) + ifneq (,$(findstring $(TARGET).jpg,$(icons))) + export APP_ICON := $(TOPDIR)/$(TARGET).jpg + else + ifneq (,$(findstring icon.jpg,$(icons))) + export APP_ICON := $(TOPDIR)/icon.jpg + endif + endif +else + export APP_ICON := $(TOPDIR)/$(ICON) +endif + +ifeq ($(strip $(NO_ICON)),) + export NROFLAGS += --icon=$(APP_ICON) +endif + +ifeq ($(strip $(NO_NACP)),) + export NROFLAGS += --nacp=$(CURDIR)/$(TARGET).nacp +endif + +ifneq ($(APP_TITLEID),) + export NACPFLAGS += --titleid=$(APP_TITLEID) +endif + +ifneq ($(ROMFS),) + export NROFLAGS += --romfsdir=$(CURDIR)/$(ROMFS) +endif + +.PHONY: $(TOPTARGETS) $(SUBFOLDERS) all clean + +#--------------------------------------------------------------------------------- +all: $(ROMFS_TARGETS) | $(BUILD) + @$(MAKE) --no-print-directory -C $(BUILD) -f $(CURDIR)/Makefile + +$(BUILD): + @mkdir -p $@ + +ifneq ($(strip $(ROMFS_TARGETS)),) + +$(ROMFS_TARGETS): | $(ROMFS_FOLDERS) + +$(ROMFS_FOLDERS): + @mkdir -p $@ + +$(ROMFS_SHADERS)/%_vsh.dksh: %_vsh.glsl + @echo {vert} $(notdir $<) + @uam -s vert -o $@ $< + +$(ROMFS_SHADERS)/%_tcsh.dksh: %_tcsh.glsl + @echo {tess_ctrl} $(notdir $<) + @uam -s tess_ctrl -o $@ $< + +$(ROMFS_SHADERS)/%_tesh.dksh: %_tesh.glsl + @echo {tess_eval} $(notdir $<) + @uam -s tess_eval -o $@ $< + +$(ROMFS_SHADERS)/%_gsh.dksh: %_gsh.glsl + @echo {geom} $(notdir $<) + @uam -s geom -o $@ $< + +$(ROMFS_SHADERS)/%_fsh.dksh: %_fsh.glsl + @echo {frag} $(notdir $<) + @uam -s frag -o $@ $< + +$(ROMFS_SHADERS)/%.dksh: %.glsl + @echo {comp} $(notdir $<) + @uam -s comp -o $@ $< + +endif + +#--------------------------------------------------------------------------------- +clean: + @echo clean ... +ifeq ($(strip $(APP_JSON)),) + @rm -fr $(BUILD) $(ROMFS_FOLDERS) $(TARGET).nro $(TARGET).nacp $(TARGET).elf +else + @rm -fr $(BUILD) $(ROMFS_FOLDERS) $(TARGET).nsp $(TARGET).nso $(TARGET).npdm $(TARGET).elf +endif + + +#--------------------------------------------------------------------------------- +else +.PHONY: all + +DEPENDS := $(OFILES:.o=.d) + +#--------------------------------------------------------------------------------- +# main targets +#--------------------------------------------------------------------------------- +ifeq ($(strip $(APP_JSON)),) + +all : $(OUTPUT).nro + +ifeq ($(strip $(NO_NACP)),) +$(OUTPUT).nro : $(OUTPUT).elf $(OUTPUT).nacp $(ROMFS_DEPS) +else +$(OUTPUT).nro : $(OUTPUT).elf $(ROMFS_DEPS) +endif + +else + +all : $(OUTPUT).nsp + +$(OUTPUT).nsp : $(OUTPUT).nso $(OUTPUT).npdm + +$(OUTPUT).nso : $(OUTPUT).elf + +endif + +$(OUTPUT).elf : $(OFILES) + +$(OFILES_SRC) : $(HFILES_BIN) + +#--------------------------------------------------------------------------------- +# you need a rule like this for each extension you use as binary data +#--------------------------------------------------------------------------------- +%.bin.o %_bin.h : %.bin +#--------------------------------------------------------------------------------- + @echo $(notdir $<) + @$(bin2o) + +-include $(DEPENDS) + +#--------------------------------------------------------------------------------------- +endif +#--------------------------------------------------------------------------------------- \ No newline at end of file diff --git a/troposphere/daybreak/icon.jpg b/troposphere/daybreak/icon.jpg new file mode 100644 index 000000000..867e5d53b Binary files /dev/null and b/troposphere/daybreak/icon.jpg differ diff --git a/troposphere/daybreak/nanovg/.gitignore b/troposphere/daybreak/nanovg/.gitignore new file mode 100644 index 000000000..50b6c6179 --- /dev/null +++ b/troposphere/daybreak/nanovg/.gitignore @@ -0,0 +1,93 @@ +# deko3d shaders +*.dksh + +# Prerequisites +*.d + +# Object files +*.o +*.ko +*.obj +*.elf + +# Linker output +*.ilk +*.map +*.exp +*.lst + +# Precompiled Headers +*.gch +*.pch + +# Libraries +*.lib +*.a +*.la +*.lo + +# Shared objects (inc. Windows DLLs) +*.dll +*.so +*.so.* +*.dylib + +# Executables +*.exe +*.lz4 +*.out +*.app +*.i*86 +*.x86_64 +*.hex + +# Switch Executables +*.nso +*.nro +*.nacp +*.npdm +*.pfs0 +*.nsp +*.kip + +# Debug files +*.dSYM/ +*.su +*.idb +*.pdb + +# Kernel Module Compile Results +*.mod* +*.cmd +.tmp_versions/ +modules.order +Module.symvers +Mkfile.old +dkms.conf + +# Distribution files +*.tgz +*.zip +*.bz2 + +# IDA binaries +*.id0 +*.id1 +*.id2 +*.idb +*.i64 +*.nam +*.til + +# KEYS file for sept-secondary. +*.pyc +sept/sept-secondary/KEYS.py + +.**/ + +# NOTE: make sure to make exceptions to this pattern when needed! +*.bin +*.enc + +**/out +**/build diff --git a/troposphere/daybreak/nanovg/.gitrepo b/troposphere/daybreak/nanovg/.gitrepo new file mode 100644 index 000000000..af3cd2185 --- /dev/null +++ b/troposphere/daybreak/nanovg/.gitrepo @@ -0,0 +1,12 @@ +; DO NOT EDIT (unless you know what you are doing) +; +; This subdirectory is a git "subrepo", and this file is maintained by the +; git-subrepo command. See https://github.com/git-commands/git-subrepo#readme +; +[subrepo] + remote = https://github.com/Adubbz/nanovg-deko.git + branch = master + commit = c197ba2f0d1fe2c70e2d49c61f16b4063aef569e + parent = 171d97e0ac2eaf1bca1c48727404fec45621f1fd + method = merge + cmdver = 0.4.1 diff --git a/troposphere/daybreak/nanovg/LICENSE b/troposphere/daybreak/nanovg/LICENSE new file mode 100644 index 000000000..53562c099 --- /dev/null +++ b/troposphere/daybreak/nanovg/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2020 Adubbz, Mikko Mononen memon@inside.org + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any +damages arising from the use of this software. + +Permission is granted to anyone to use this software for any +purpose, including commercial applications, and to alter it and +redistribute it freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you + must not claim that you wrote the original software. If you use + this software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. +2. Altered source versions must be plainly marked as such, and + must not be misrepresented as being the original software. +3. This notice may not be removed or altered from any source + distribution. diff --git a/troposphere/daybreak/nanovg/Makefile b/troposphere/daybreak/nanovg/Makefile new file mode 100644 index 000000000..48826c26f --- /dev/null +++ b/troposphere/daybreak/nanovg/Makefile @@ -0,0 +1,168 @@ +#--------------------------------------------------------------------------------- +.SUFFIXES: +#--------------------------------------------------------------------------------- + +ifeq ($(strip $(DEVKITPRO)),) +$(error "Please set DEVKITPRO in your environment. export DEVKITPRO=/devkitpro") +endif + +TOPDIR ?= $(CURDIR) +include $(DEVKITPRO)/libnx/switch_rules + +#--------------------------------------------------------------------------------- +# TARGET is the name of the output +# BUILD is the directory where object files & intermediate files will be placed +# SOURCES is a list of directories containing source code +# DATA is a list of directories containing data files +# INCLUDES is a list of directories containing header files +# ROMFS is the directory containing data to be added to RomFS, relative to the Makefile (Optional) +# +# NO_ICON: if set to anything, do not use icon. +# NO_NACP: if set to anything, no .nacp file is generated. +# APP_TITLE is the name of the app stored in the .nacp file (Optional) +# APP_AUTHOR is the author of the app stored in the .nacp file (Optional) +# APP_VERSION is the version of the app stored in the .nacp file (Optional) +# APP_TITLEID is the titleID of the app stored in the .nacp file (Optional) +# ICON is the filename of the icon (.jpg), relative to the project folder. +# If not set, it attempts to use one of the following (in this order): +# - .jpg +# - icon.jpg +# - /default_icon.jpg +# +# CONFIG_JSON is the filename of the NPDM config file (.json), relative to the project folder. +# If not set, it attempts to use one of the following (in this order): +# - .json +# - config.json +# If a JSON file is provided or autodetected, an ExeFS PFS0 (.nsp) is built instead +# of a homebrew executable (.nro). This is intended to be used for sysmodules. +# NACP building is skipped as well. +#--------------------------------------------------------------------------------- +TARGET := libnanovg +BUILD := build +SOURCES := source source/framework +INCLUDES := include include/nanovg include/nanovg/framework + +#--------------------------------------------------------------------------------- +# options for code generation +#--------------------------------------------------------------------------------- +ARCH := -march=armv8-a+crc+crypto -mtune=cortex-a57 -mtp=soft -fPIE + +CFLAGS := -g -Wall -O2 -ffunction-sections \ + $(ARCH) $(DEFINES) + +CFLAGS += $(INCLUDE) -D__SWITCH__ + +CXXFLAGS := $(CFLAGS) -std=gnu++17 -fno-exceptions -fno-rtti + +ASFLAGS := -g $(ARCH) +LDFLAGS = -specs=$(DEVKITPRO)/libnx/switch.specs -g $(ARCH) -Wl,-Map,$(notdir $*.map) + +#LIBS := -ldeko3dd -lglad -lEGL -lglapi -ldrm_nouveau +LIBS := -ldeko3d + +#--------------------------------------------------------------------------------- +# list of directories containing libraries, this must be the top level containing +# include and lib +#--------------------------------------------------------------------------------- +LIBDIRS := $(PORTLIBS) $(LIBNX) + +#--------------------------------------------------------------------------------- +# no real need to edit anything past this point unless you need to add additional +# rules for different file extensions +#--------------------------------------------------------------------------------- +ifneq ($(BUILD),$(notdir $(CURDIR))) +#--------------------------------------------------------------------------------- + +export OUTPUT := $(CURDIR)/$(TARGET) +export TOPDIR := $(CURDIR) + +export VPATH := $(foreach dir,$(SOURCES),$(CURDIR)/$(dir)) \ + $(foreach dir,$(DATA),$(CURDIR)/$(dir)) + +export DEPSDIR := $(CURDIR)/$(BUILD) + +CFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.c))) +CPPFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.cpp))) +SFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.s))) +GLSLFILES := $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.glsl))) +BINFILES := $(foreach dir,$(DATA),$(notdir $(wildcard $(dir)/*.*))) + +#--------------------------------------------------------------------------------- +# use CXX for linking C++ projects, CC for standard C +#--------------------------------------------------------------------------------- +ifeq ($(strip $(CPPFILES)),) +#--------------------------------------------------------------------------------- + export LD := $(CC) +#--------------------------------------------------------------------------------- +else +#--------------------------------------------------------------------------------- + export LD := $(CXX) +#--------------------------------------------------------------------------------- +endif +#--------------------------------------------------------------------------------- + +export OFILES_BIN := $(addsuffix .o,$(BINFILES)) +export OFILES_SRC := $(CPPFILES:.cpp=.o) $(CFILES:.c=.o) $(SFILES:.s=.o) +export OFILES := $(OFILES_BIN) $(OFILES_SRC) +export HFILES_BIN := $(addsuffix .h,$(subst .,_,$(BINFILES))) + +export INCLUDE := $(foreach dir,$(INCLUDES),-I$(CURDIR)/$(dir)) \ + $(foreach dir,$(LIBDIRS),-I$(dir)/include) \ + -I$(CURDIR)/$(BUILD) + +.PHONY: all clean + +#--------------------------------------------------------------------------------- +all: lib/$(TARGET).a + +lib: + @[ -d $@ ] || mkdir -p $@ + +release: + @[ -d $@ ] || mkdir -p $@ + +lib/$(TARGET).a : lib release $(SOURCES) $(INCLUDES) + @$(MAKE) BUILD=release OUTPUT=$(CURDIR)/$@ \ + BUILD_CFLAGS="-DNDEBUG=1 -O2" \ + DEPSDIR=$(CURDIR)/release \ + --no-print-directory -C release \ + -f $(CURDIR)/Makefile + +dist-bin: all + @tar --exclude=*~ -cjf $(TARGET).tar.bz2 include lib + +dist-src: + @tar --exclude=*~ -cjf $(TARGET)-src.tar.bz2 include source Makefile + +dist: dist-src dist-bin +#--------------------------------------------------------------------------------- +clean: + @echo clean ... + @rm -fr release lib *.bz2 +#--------------------------------------------------------------------------------- +else + +DEPENDS := $(OFILES:.o=.d) + +#--------------------------------------------------------------------------------- +# main targets +#--------------------------------------------------------------------------------- +$(OUTPUT) : $(OFILES) + +$(OFILES) : $(GCH_FILES) + +$(OFILES_SRC) : $(HFILES_BIN) + +#--------------------------------------------------------------------------------- +# you need a rule like this for each extension you use as binary data +#--------------------------------------------------------------------------------- +%.bin.o %_bin.h : %.bin +#--------------------------------------------------------------------------------- + @echo $(notdir $<) + @$(bin2o) + +-include $(DEPENDS) + +#--------------------------------------------------------------------------------------- +endif +#--------------------------------------------------------------------------------------- \ No newline at end of file diff --git a/troposphere/daybreak/nanovg/README.md b/troposphere/daybreak/nanovg/README.md new file mode 100644 index 000000000..96943b459 --- /dev/null +++ b/troposphere/daybreak/nanovg/README.md @@ -0,0 +1,18 @@ +NanoVG for Deko3D +========== + +NanoVG is small antialiased vector graphics rendering library. This is a port to [deko3d](https://github.com/devkitPro/deko3d), a low level 3D graphics API targetting the Nvidia Tegra X1 found inside the Nintendo Switch. + +## Example +An example of using this library can be found [here](https://github.com/Adubbz/nanovg-deko3d-example). + +## License +The library is licensed under [zlib license](LICENSE). + +Dependencies: +- fincs' deko3d framework is licensed under [zlib license](source/framework/LICENSE). + +## Links +The original [nanovg project](https://github.com/memononen/nanovg). +Uses [stb_truetype](http://nothings.org) for font rendering. +Uses [stb_image](http://nothings.org) for image loading. diff --git a/troposphere/daybreak/nanovg/include/nanovg.h b/troposphere/daybreak/nanovg/include/nanovg.h new file mode 100644 index 000000000..646b42424 --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg.h @@ -0,0 +1,697 @@ +// +// Copyright (c) 2013 Mikko Mononen memon@inside.org +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// + +#ifndef NANOVG_H +#define NANOVG_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define NVG_PI 3.14159265358979323846264338327f + +#ifdef _MSC_VER +#pragma warning(push) +#pragma warning(disable: 4201) // nonstandard extension used : nameless struct/union +#endif + +typedef struct NVGcontext NVGcontext; + +struct NVGcolor { + union { + float rgba[4]; + struct { + float r,g,b,a; + }; + }; +}; +typedef struct NVGcolor NVGcolor; + +struct NVGpaint { + float xform[6]; + float extent[2]; + float radius; + float feather; + NVGcolor innerColor; + NVGcolor outerColor; + int image; +}; +typedef struct NVGpaint NVGpaint; + +enum NVGwinding { + NVG_CCW = 1, // Winding for solid shapes + NVG_CW = 2, // Winding for holes +}; + +enum NVGsolidity { + NVG_SOLID = 1, // CCW + NVG_HOLE = 2, // CW +}; + +enum NVGlineCap { + NVG_BUTT, + NVG_ROUND, + NVG_SQUARE, + NVG_BEVEL, + NVG_MITER, +}; + +enum NVGalign { + // Horizontal align + NVG_ALIGN_LEFT = 1<<0, // Default, align text horizontally to left. + NVG_ALIGN_CENTER = 1<<1, // Align text horizontally to center. + NVG_ALIGN_RIGHT = 1<<2, // Align text horizontally to right. + // Vertical align + NVG_ALIGN_TOP = 1<<3, // Align text vertically to top. + NVG_ALIGN_MIDDLE = 1<<4, // Align text vertically to middle. + NVG_ALIGN_BOTTOM = 1<<5, // Align text vertically to bottom. + NVG_ALIGN_BASELINE = 1<<6, // Default, align text vertically to baseline. +}; + +enum NVGblendFactor { + NVG_ZERO = 1<<0, + NVG_ONE = 1<<1, + NVG_SRC_COLOR = 1<<2, + NVG_ONE_MINUS_SRC_COLOR = 1<<3, + NVG_DST_COLOR = 1<<4, + NVG_ONE_MINUS_DST_COLOR = 1<<5, + NVG_SRC_ALPHA = 1<<6, + NVG_ONE_MINUS_SRC_ALPHA = 1<<7, + NVG_DST_ALPHA = 1<<8, + NVG_ONE_MINUS_DST_ALPHA = 1<<9, + NVG_SRC_ALPHA_SATURATE = 1<<10, +}; + +enum NVGcompositeOperation { + NVG_SOURCE_OVER, + NVG_SOURCE_IN, + NVG_SOURCE_OUT, + NVG_ATOP, + NVG_DESTINATION_OVER, + NVG_DESTINATION_IN, + NVG_DESTINATION_OUT, + NVG_DESTINATION_ATOP, + NVG_LIGHTER, + NVG_COPY, + NVG_XOR, +}; + +struct NVGcompositeOperationState { + int srcRGB; + int dstRGB; + int srcAlpha; + int dstAlpha; +}; +typedef struct NVGcompositeOperationState NVGcompositeOperationState; + +struct NVGglyphPosition { + const char* str; // Position of the glyph in the input string. + float x; // The x-coordinate of the logical glyph position. + float minx, maxx; // The bounds of the glyph shape. +}; +typedef struct NVGglyphPosition NVGglyphPosition; + +struct NVGtextRow { + const char* start; // Pointer to the input text where the row starts. + const char* end; // Pointer to the input text where the row ends (one past the last character). + const char* next; // Pointer to the beginning of the next row. + float width; // Logical width of the row. + float minx, maxx; // Actual bounds of the row. Logical with and bounds can differ because of kerning and some parts over extending. +}; +typedef struct NVGtextRow NVGtextRow; + +enum NVGimageFlags { + NVG_IMAGE_GENERATE_MIPMAPS = 1<<0, // Generate mipmaps during creation of the image. + NVG_IMAGE_REPEATX = 1<<1, // Repeat image in X direction. + NVG_IMAGE_REPEATY = 1<<2, // Repeat image in Y direction. + NVG_IMAGE_FLIPY = 1<<3, // Flips (inverses) image in Y direction when rendered. + NVG_IMAGE_PREMULTIPLIED = 1<<4, // Image data has premultiplied alpha. + NVG_IMAGE_NEAREST = 1<<5, // Image interpolation is Nearest instead Linear +}; + +// Begin drawing a new frame +// Calls to nanovg drawing API should be wrapped in nvgBeginFrame() & nvgEndFrame() +// nvgBeginFrame() defines the size of the window to render to in relation currently +// set viewport (i.e. glViewport on GL backends). Device pixel ration allows to +// control the rendering on Hi-DPI devices. +// For example, GLFW returns two dimension for an opened window: window size and +// frame buffer size. In that case you would set windowWidth/Height to the window size +// devicePixelRatio to: frameBufferWidth / windowWidth. +void nvgBeginFrame(NVGcontext* ctx, float windowWidth, float windowHeight, float devicePixelRatio); + +// Cancels drawing the current frame. +void nvgCancelFrame(NVGcontext* ctx); + +// Ends drawing flushing remaining render state. +void nvgEndFrame(NVGcontext* ctx); + +// +// Composite operation +// +// The composite operations in NanoVG are modeled after HTML Canvas API, and +// the blend func is based on OpenGL (see corresponding manuals for more info). +// The colors in the blending state have premultiplied alpha. + +// Sets the composite operation. The op parameter should be one of NVGcompositeOperation. +void nvgGlobalCompositeOperation(NVGcontext* ctx, int op); + +// Sets the composite operation with custom pixel arithmetic. The parameters should be one of NVGblendFactor. +void nvgGlobalCompositeBlendFunc(NVGcontext* ctx, int sfactor, int dfactor); + +// Sets the composite operation with custom pixel arithmetic for RGB and alpha components separately. The parameters should be one of NVGblendFactor. +void nvgGlobalCompositeBlendFuncSeparate(NVGcontext* ctx, int srcRGB, int dstRGB, int srcAlpha, int dstAlpha); + +// +// Color utils +// +// Colors in NanoVG are stored as unsigned ints in ABGR format. + +// Returns a color value from red, green, blue values. Alpha will be set to 255 (1.0f). +NVGcolor nvgRGB(unsigned char r, unsigned char g, unsigned char b); + +// Returns a color value from red, green, blue values. Alpha will be set to 1.0f. +NVGcolor nvgRGBf(float r, float g, float b); + + +// Returns a color value from red, green, blue and alpha values. +NVGcolor nvgRGBA(unsigned char r, unsigned char g, unsigned char b, unsigned char a); + +// Returns a color value from red, green, blue and alpha values. +NVGcolor nvgRGBAf(float r, float g, float b, float a); + + +// Linearly interpolates from color c0 to c1, and returns resulting color value. +NVGcolor nvgLerpRGBA(NVGcolor c0, NVGcolor c1, float u); + +// Sets transparency of a color value. +NVGcolor nvgTransRGBA(NVGcolor c0, unsigned char a); + +// Sets transparency of a color value. +NVGcolor nvgTransRGBAf(NVGcolor c0, float a); + +// Returns color value specified by hue, saturation and lightness. +// HSL values are all in range [0..1], alpha will be set to 255. +NVGcolor nvgHSL(float h, float s, float l); + +// Returns color value specified by hue, saturation and lightness and alpha. +// HSL values are all in range [0..1], alpha in range [0..255] +NVGcolor nvgHSLA(float h, float s, float l, unsigned char a); + +// +// State Handling +// +// NanoVG contains state which represents how paths will be rendered. +// The state contains transform, fill and stroke styles, text and font styles, +// and scissor clipping. + +// Pushes and saves the current render state into a state stack. +// A matching nvgRestore() must be used to restore the state. +void nvgSave(NVGcontext* ctx); + +// Pops and restores current render state. +void nvgRestore(NVGcontext* ctx); + +// Resets current render state to default values. Does not affect the render state stack. +void nvgReset(NVGcontext* ctx); + +// +// Render styles +// +// Fill and stroke render style can be either a solid color or a paint which is a gradient or a pattern. +// Solid color is simply defined as a color value, different kinds of paints can be created +// using nvgLinearGradient(), nvgBoxGradient(), nvgRadialGradient() and nvgImagePattern(). +// +// Current render style can be saved and restored using nvgSave() and nvgRestore(). + +// Sets whether to draw antialias for nvgStroke() and nvgFill(). It's enabled by default. +void nvgShapeAntiAlias(NVGcontext* ctx, int enabled); + +// Sets current stroke style to a solid color. +void nvgStrokeColor(NVGcontext* ctx, NVGcolor color); + +// Sets current stroke style to a paint, which can be a one of the gradients or a pattern. +void nvgStrokePaint(NVGcontext* ctx, NVGpaint paint); + +// Sets current fill style to a solid color. +void nvgFillColor(NVGcontext* ctx, NVGcolor color); + +// Sets current fill style to a paint, which can be a one of the gradients or a pattern. +void nvgFillPaint(NVGcontext* ctx, NVGpaint paint); + +// Sets the miter limit of the stroke style. +// Miter limit controls when a sharp corner is beveled. +void nvgMiterLimit(NVGcontext* ctx, float limit); + +// Sets the stroke width of the stroke style. +void nvgStrokeWidth(NVGcontext* ctx, float size); + +// Sets how the end of the line (cap) is drawn, +// Can be one of: NVG_BUTT (default), NVG_ROUND, NVG_SQUARE. +void nvgLineCap(NVGcontext* ctx, int cap); + +// Sets how sharp path corners are drawn. +// Can be one of NVG_MITER (default), NVG_ROUND, NVG_BEVEL. +void nvgLineJoin(NVGcontext* ctx, int join); + +// Sets the transparency applied to all rendered shapes. +// Already transparent paths will get proportionally more transparent as well. +void nvgGlobalAlpha(NVGcontext* ctx, float alpha); + +// +// Transforms +// +// The paths, gradients, patterns and scissor region are transformed by an transformation +// matrix at the time when they are passed to the API. +// The current transformation matrix is a affine matrix: +// [sx kx tx] +// [ky sy ty] +// [ 0 0 1] +// Where: sx,sy define scaling, kx,ky skewing, and tx,ty translation. +// The last row is assumed to be 0,0,1 and is not stored. +// +// Apart from nvgResetTransform(), each transformation function first creates +// specific transformation matrix and pre-multiplies the current transformation by it. +// +// Current coordinate system (transformation) can be saved and restored using nvgSave() and nvgRestore(). + +// Resets current transform to a identity matrix. +void nvgResetTransform(NVGcontext* ctx); + +// Premultiplies current coordinate system by specified matrix. +// The parameters are interpreted as matrix as follows: +// [a c e] +// [b d f] +// [0 0 1] +void nvgTransform(NVGcontext* ctx, float a, float b, float c, float d, float e, float f); + +// Translates current coordinate system. +void nvgTranslate(NVGcontext* ctx, float x, float y); + +// Rotates current coordinate system. Angle is specified in radians. +void nvgRotate(NVGcontext* ctx, float angle); + +// Skews the current coordinate system along X axis. Angle is specified in radians. +void nvgSkewX(NVGcontext* ctx, float angle); + +// Skews the current coordinate system along Y axis. Angle is specified in radians. +void nvgSkewY(NVGcontext* ctx, float angle); + +// Scales the current coordinate system. +void nvgScale(NVGcontext* ctx, float x, float y); + +// Stores the top part (a-f) of the current transformation matrix in to the specified buffer. +// [a c e] +// [b d f] +// [0 0 1] +// There should be space for 6 floats in the return buffer for the values a-f. +void nvgCurrentTransform(NVGcontext* ctx, float* xform); + + +// The following functions can be used to make calculations on 2x3 transformation matrices. +// A 2x3 matrix is represented as float[6]. + +// Sets the transform to identity matrix. +void nvgTransformIdentity(float* dst); + +// Sets the transform to translation matrix matrix. +void nvgTransformTranslate(float* dst, float tx, float ty); + +// Sets the transform to scale matrix. +void nvgTransformScale(float* dst, float sx, float sy); + +// Sets the transform to rotate matrix. Angle is specified in radians. +void nvgTransformRotate(float* dst, float a); + +// Sets the transform to skew-x matrix. Angle is specified in radians. +void nvgTransformSkewX(float* dst, float a); + +// Sets the transform to skew-y matrix. Angle is specified in radians. +void nvgTransformSkewY(float* dst, float a); + +// Sets the transform to the result of multiplication of two transforms, of A = A*B. +void nvgTransformMultiply(float* dst, const float* src); + +// Sets the transform to the result of multiplication of two transforms, of A = B*A. +void nvgTransformPremultiply(float* dst, const float* src); + +// Sets the destination to inverse of specified transform. +// Returns 1 if the inverse could be calculated, else 0. +int nvgTransformInverse(float* dst, const float* src); + +// Transform a point by given transform. +void nvgTransformPoint(float* dstx, float* dsty, const float* xform, float srcx, float srcy); + +// Converts degrees to radians and vice versa. +float nvgDegToRad(float deg); +float nvgRadToDeg(float rad); + +// +// Images +// +// NanoVG allows you to load jpg, png, psd, tga, pic and gif files to be used for rendering. +// In addition you can upload your own image. The image loading is provided by stb_image. +// The parameter imageFlags is combination of flags defined in NVGimageFlags. + +// Creates image by loading it from the disk from specified file name. +// Returns handle to the image. +int nvgCreateImage(NVGcontext* ctx, const char* filename, int imageFlags); + +// Creates image by loading it from the specified chunk of memory. +// Returns handle to the image. +int nvgCreateImageMem(NVGcontext* ctx, int imageFlags, unsigned char* data, int ndata); + +// Creates image from specified image data. +// Returns handle to the image. +int nvgCreateImageRGBA(NVGcontext* ctx, int w, int h, int imageFlags, const unsigned char* data); + +// Updates image data specified by image handle. +void nvgUpdateImage(NVGcontext* ctx, int image, const unsigned char* data); + +// Returns the dimensions of a created image. +void nvgImageSize(NVGcontext* ctx, int image, int* w, int* h); + +// Deletes created image. +void nvgDeleteImage(NVGcontext* ctx, int image); + +// +// Paints +// +// NanoVG supports four types of paints: linear gradient, box gradient, radial gradient and image pattern. +// These can be used as paints for strokes and fills. + +// Creates and returns a linear gradient. Parameters (sx,sy)-(ex,ey) specify the start and end coordinates +// of the linear gradient, icol specifies the start color and ocol the end color. +// The gradient is transformed by the current transform when it is passed to nvgFillPaint() or nvgStrokePaint(). +NVGpaint nvgLinearGradient(NVGcontext* ctx, float sx, float sy, float ex, float ey, + NVGcolor icol, NVGcolor ocol); + +// Creates and returns a box gradient. Box gradient is a feathered rounded rectangle, it is useful for rendering +// drop shadows or highlights for boxes. Parameters (x,y) define the top-left corner of the rectangle, +// (w,h) define the size of the rectangle, r defines the corner radius, and f feather. Feather defines how blurry +// the border of the rectangle is. Parameter icol specifies the inner color and ocol the outer color of the gradient. +// The gradient is transformed by the current transform when it is passed to nvgFillPaint() or nvgStrokePaint(). +NVGpaint nvgBoxGradient(NVGcontext* ctx, float x, float y, float w, float h, + float r, float f, NVGcolor icol, NVGcolor ocol); + +// Creates and returns a radial gradient. Parameters (cx,cy) specify the center, inr and outr specify +// the inner and outer radius of the gradient, icol specifies the start color and ocol the end color. +// The gradient is transformed by the current transform when it is passed to nvgFillPaint() or nvgStrokePaint(). +NVGpaint nvgRadialGradient(NVGcontext* ctx, float cx, float cy, float inr, float outr, + NVGcolor icol, NVGcolor ocol); + +// Creates and returns an image patter. Parameters (ox,oy) specify the left-top location of the image pattern, +// (ex,ey) the size of one image, angle rotation around the top-left corner, image is handle to the image to render. +// The gradient is transformed by the current transform when it is passed to nvgFillPaint() or nvgStrokePaint(). +NVGpaint nvgImagePattern(NVGcontext* ctx, float ox, float oy, float ex, float ey, + float angle, int image, float alpha); + +// +// Scissoring +// +// Scissoring allows you to clip the rendering into a rectangle. This is useful for various +// user interface cases like rendering a text edit or a timeline. + +// Sets the current scissor rectangle. +// The scissor rectangle is transformed by the current transform. +void nvgScissor(NVGcontext* ctx, float x, float y, float w, float h); + +// Intersects current scissor rectangle with the specified rectangle. +// The scissor rectangle is transformed by the current transform. +// Note: in case the rotation of previous scissor rect differs from +// the current one, the intersection will be done between the specified +// rectangle and the previous scissor rectangle transformed in the current +// transform space. The resulting shape is always rectangle. +void nvgIntersectScissor(NVGcontext* ctx, float x, float y, float w, float h); + +// Reset and disables scissoring. +void nvgResetScissor(NVGcontext* ctx); + +// +// Paths +// +// Drawing a new shape starts with nvgBeginPath(), it clears all the currently defined paths. +// Then you define one or more paths and sub-paths which describe the shape. The are functions +// to draw common shapes like rectangles and circles, and lower level step-by-step functions, +// which allow to define a path curve by curve. +// +// NanoVG uses even-odd fill rule to draw the shapes. Solid shapes should have counter clockwise +// winding and holes should have counter clockwise order. To specify winding of a path you can +// call nvgPathWinding(). This is useful especially for the common shapes, which are drawn CCW. +// +// Finally you can fill the path using current fill style by calling nvgFill(), and stroke it +// with current stroke style by calling nvgStroke(). +// +// The curve segments and sub-paths are transformed by the current transform. + +// Clears the current path and sub-paths. +void nvgBeginPath(NVGcontext* ctx); + +// Starts new sub-path with specified point as first point. +void nvgMoveTo(NVGcontext* ctx, float x, float y); + +// Adds line segment from the last point in the path to the specified point. +void nvgLineTo(NVGcontext* ctx, float x, float y); + +// Adds cubic bezier segment from last point in the path via two control points to the specified point. +void nvgBezierTo(NVGcontext* ctx, float c1x, float c1y, float c2x, float c2y, float x, float y); + +// Adds quadratic bezier segment from last point in the path via a control point to the specified point. +void nvgQuadTo(NVGcontext* ctx, float cx, float cy, float x, float y); + +// Adds an arc segment at the corner defined by the last path point, and two specified points. +void nvgArcTo(NVGcontext* ctx, float x1, float y1, float x2, float y2, float radius); + +// Closes current sub-path with a line segment. +void nvgClosePath(NVGcontext* ctx); + +// Sets the current sub-path winding, see NVGwinding and NVGsolidity. +void nvgPathWinding(NVGcontext* ctx, int dir); + +// Creates new circle arc shaped sub-path. The arc center is at cx,cy, the arc radius is r, +// and the arc is drawn from angle a0 to a1, and swept in direction dir (NVG_CCW, or NVG_CW). +// Angles are specified in radians. +void nvgArc(NVGcontext* ctx, float cx, float cy, float r, float a0, float a1, int dir); + +// Creates new rectangle shaped sub-path. +void nvgRect(NVGcontext* ctx, float x, float y, float w, float h); + +// Creates new rounded rectangle shaped sub-path. +void nvgRoundedRect(NVGcontext* ctx, float x, float y, float w, float h, float r); + +// Creates new rounded rectangle shaped sub-path with varying radii for each corner. +void nvgRoundedRectVarying(NVGcontext* ctx, float x, float y, float w, float h, float radTopLeft, float radTopRight, float radBottomRight, float radBottomLeft); + +// Creates new ellipse shaped sub-path. +void nvgEllipse(NVGcontext* ctx, float cx, float cy, float rx, float ry); + +// Creates new circle shaped sub-path. +void nvgCircle(NVGcontext* ctx, float cx, float cy, float r); + +// Fills the current path with current fill style. +void nvgFill(NVGcontext* ctx); + +// Fills the current path with current stroke style. +void nvgStroke(NVGcontext* ctx); + + +// +// Text +// +// NanoVG allows you to load .ttf files and use the font to render text. +// +// The appearance of the text can be defined by setting the current text style +// and by specifying the fill color. Common text and font settings such as +// font size, letter spacing and text align are supported. Font blur allows you +// to create simple text effects such as drop shadows. +// +// At render time the font face can be set based on the font handles or name. +// +// Font measure functions return values in local space, the calculations are +// carried in the same resolution as the final rendering. This is done because +// the text glyph positions are snapped to the nearest pixels sharp rendering. +// +// The local space means that values are not rotated or scale as per the current +// transformation. For example if you set font size to 12, which would mean that +// line height is 16, then regardless of the current scaling and rotation, the +// returned line height is always 16. Some measures may vary because of the scaling +// since aforementioned pixel snapping. +// +// While this may sound a little odd, the setup allows you to always render the +// same way regardless of scaling. I.e. following works regardless of scaling: +// +// const char* txt = "Text me up."; +// nvgTextBounds(vg, x,y, txt, NULL, bounds); +// nvgBeginPath(vg); +// nvgRoundedRect(vg, bounds[0],bounds[1], bounds[2]-bounds[0], bounds[3]-bounds[1]); +// nvgFill(vg); +// +// Note: currently only solid color fill is supported for text. + +// Creates font by loading it from the disk from specified file name. +// Returns handle to the font. +int nvgCreateFont(NVGcontext* ctx, const char* name, const char* filename); + +// fontIndex specifies which font face to load from a .ttf/.ttc file. +int nvgCreateFontAtIndex(NVGcontext* ctx, const char* name, const char* filename, const int fontIndex); + +// Creates font by loading it from the specified memory chunk. +// Returns handle to the font. +int nvgCreateFontMem(NVGcontext* ctx, const char* name, unsigned char* data, int ndata, int freeData); + +// fontIndex specifies which font face to load from a .ttf/.ttc file. +int nvgCreateFontMemAtIndex(NVGcontext* ctx, const char* name, unsigned char* data, int ndata, int freeData, const int fontIndex); + +// Finds a loaded font of specified name, and returns handle to it, or -1 if the font is not found. +int nvgFindFont(NVGcontext* ctx, const char* name); + +// Adds a fallback font by handle. +int nvgAddFallbackFontId(NVGcontext* ctx, int baseFont, int fallbackFont); + +// Adds a fallback font by name. +int nvgAddFallbackFont(NVGcontext* ctx, const char* baseFont, const char* fallbackFont); + +// Resets fallback fonts by handle. +void nvgResetFallbackFontsId(NVGcontext* ctx, int baseFont); + +// Resets fallback fonts by name. +void nvgResetFallbackFonts(NVGcontext* ctx, const char* baseFont); + +// Sets the font size of current text style. +void nvgFontSize(NVGcontext* ctx, float size); + +// Sets the blur of current text style. +void nvgFontBlur(NVGcontext* ctx, float blur); + +// Sets the letter spacing of current text style. +void nvgTextLetterSpacing(NVGcontext* ctx, float spacing); + +// Sets the proportional line height of current text style. The line height is specified as multiple of font size. +void nvgTextLineHeight(NVGcontext* ctx, float lineHeight); + +// Sets the text align of current text style, see NVGalign for options. +void nvgTextAlign(NVGcontext* ctx, int align); + +// Sets the font face based on specified id of current text style. +void nvgFontFaceId(NVGcontext* ctx, int font); + +// Sets the font face based on specified name of current text style. +void nvgFontFace(NVGcontext* ctx, const char* font); + +// Draws text string at specified location. If end is specified only the sub-string up to the end is drawn. +float nvgText(NVGcontext* ctx, float x, float y, const char* string, const char* end); + +// Draws multi-line text string at specified location wrapped at the specified width. If end is specified only the sub-string up to the end is drawn. +// White space is stripped at the beginning of the rows, the text is split at word boundaries or when new-line characters are encountered. +// Words longer than the max width are slit at nearest character (i.e. no hyphenation). +void nvgTextBox(NVGcontext* ctx, float x, float y, float breakRowWidth, const char* string, const char* end); + +// Measures the specified text string. Parameter bounds should be a pointer to float[4], +// if the bounding box of the text should be returned. The bounds value are [xmin,ymin, xmax,ymax] +// Returns the horizontal advance of the measured text (i.e. where the next character should drawn). +// Measured values are returned in local coordinate space. +float nvgTextBounds(NVGcontext* ctx, float x, float y, const char* string, const char* end, float* bounds); + +// Measures the specified multi-text string. Parameter bounds should be a pointer to float[4], +// if the bounding box of the text should be returned. The bounds value are [xmin,ymin, xmax,ymax] +// Measured values are returned in local coordinate space. +void nvgTextBoxBounds(NVGcontext* ctx, float x, float y, float breakRowWidth, const char* string, const char* end, float* bounds); + +// Calculates the glyph x positions of the specified text. If end is specified only the sub-string will be used. +// Measured values are returned in local coordinate space. +int nvgTextGlyphPositions(NVGcontext* ctx, float x, float y, const char* string, const char* end, NVGglyphPosition* positions, int maxPositions); + +// Returns the vertical metrics based on the current text style. +// Measured values are returned in local coordinate space. +void nvgTextMetrics(NVGcontext* ctx, float* ascender, float* descender, float* lineh); + +// Breaks the specified text into lines. If end is specified only the sub-string will be used. +// White space is stripped at the beginning of the rows, the text is split at word boundaries or when new-line characters are encountered. +// Words longer than the max width are slit at nearest character (i.e. no hyphenation). +int nvgTextBreakLines(NVGcontext* ctx, const char* string, const char* end, float breakRowWidth, NVGtextRow* rows, int maxRows); + +// +// Internal Render API +// +enum NVGtexture { + NVG_TEXTURE_ALPHA = 0x01, + NVG_TEXTURE_RGBA = 0x02, +}; + +struct NVGscissor { + float xform[6]; + float extent[2]; +}; +typedef struct NVGscissor NVGscissor; + +struct NVGvertex { + float x,y,u,v; +}; +typedef struct NVGvertex NVGvertex; + +struct NVGpath { + int first; + int count; + unsigned char closed; + int nbevel; + NVGvertex* fill; + int nfill; + NVGvertex* stroke; + int nstroke; + int winding; + int convex; +}; +typedef struct NVGpath NVGpath; + +struct NVGparams { + void* userPtr; + int edgeAntiAlias; + int (*renderCreate)(void* uptr); + int (*renderCreateTexture)(void* uptr, int type, int w, int h, int imageFlags, const unsigned char* data); + int (*renderDeleteTexture)(void* uptr, int image); + int (*renderUpdateTexture)(void* uptr, int image, int x, int y, int w, int h, const unsigned char* data); + int (*renderGetTextureSize)(void* uptr, int image, int* w, int* h); + void (*renderViewport)(void* uptr, float width, float height, float devicePixelRatio); + void (*renderCancel)(void* uptr); + void (*renderFlush)(void* uptr); + void (*renderFill)(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe, const float* bounds, const NVGpath* paths, int npaths); + void (*renderStroke)(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe, float strokeWidth, const NVGpath* paths, int npaths); + void (*renderTriangles)(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, const NVGvertex* verts, int nverts, float fringe); + void (*renderDelete)(void* uptr); +}; +typedef struct NVGparams NVGparams; + +// Constructor and destructor, called by the render back-end. +NVGcontext* nvgCreateInternal(NVGparams* params); +void nvgDeleteInternal(NVGcontext* ctx); + +NVGparams* nvgInternalParams(NVGcontext* ctx); + +// Debug function to dump cached path data. +void nvgDebugDumpPathCache(NVGcontext* ctx); + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +#define NVG_NOTUSED(v) for (;;) { (void)(1 ? (void)0 : ( (void)(v) ) ); break; } + +#ifdef __cplusplus +} +#endif + +#endif // NANOVG_H diff --git a/troposphere/daybreak/nanovg/include/nanovg/dk_renderer.hpp b/troposphere/daybreak/nanovg/include/nanovg/dk_renderer.hpp new file mode 100644 index 000000000..d1dbb853e --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/dk_renderer.hpp @@ -0,0 +1,207 @@ +#pragma once + +#include +#include +#include +#include + +#include "framework/CDescriptorSet.h" +#include "framework/CMemPool.h" +#include "framework/CShader.h" +#include "framework/CCmdMemRing.h" +#include "nanovg.h" + +// Create flags +enum NVGcreateFlags { + // Flag indicating if geometry based anti-aliasing is used (may not be needed when using MSAA). + NVG_ANTIALIAS = 1<<0, + // Flag indicating if strokes should be drawn using stencil buffer. The rendering will be a little + // slower, but path overlaps (i.e. self-intersecting or sharp turns) will be drawn just once. + NVG_STENCIL_STROKES = 1<<1, + // Flag indicating that additional debug checks are done. + NVG_DEBUG = 1<<2, +}; + +enum DKNVGuniformLoc +{ + DKNVG_LOC_VIEWSIZE, + DKNVG_LOC_TEX, + DKNVG_LOC_FRAG, + DKNVG_MAX_LOCS +}; + +enum VKNVGshaderType { + NSVG_SHADER_FILLGRAD, + NSVG_SHADER_FILLIMG, + NSVG_SHADER_SIMPLE, + NSVG_SHADER_IMG +}; + +struct DKNVGtextureDescriptor { + int width, height; + int type; + int flags; +}; + +struct DKNVGblend { + int srcRGB; + int dstRGB; + int srcAlpha; + int dstAlpha; +}; + +enum DKNVGcallType { + DKNVG_NONE = 0, + DKNVG_FILL, + DKNVG_CONVEXFILL, + DKNVG_STROKE, + DKNVG_TRIANGLES, +}; + +struct DKNVGcall { + int type; + int image; + int pathOffset; + int pathCount; + int triangleOffset; + int triangleCount; + int uniformOffset; + DKNVGblend blendFunc; +}; + +struct DKNVGpath { + int fillOffset; + int fillCount; + int strokeOffset; + int strokeCount; +}; + +struct DKNVGfragUniforms { + float scissorMat[12]; // matrices are actually 3 vec4s + float paintMat[12]; + struct NVGcolor innerCol; + struct NVGcolor outerCol; + float scissorExt[2]; + float scissorScale[2]; + float extent[2]; + float radius; + float feather; + float strokeMult; + float strokeThr; + int texType; + int type; +}; + +namespace nvg { + class DkRenderer; +} + +struct DKNVGcontext { + nvg::DkRenderer *renderer; + float view[2]; + int fragSize; + int flags; + // Per frame buffers + DKNVGcall* calls; + int ccalls; + int ncalls; + DKNVGpath* paths; + int cpaths; + int npaths; + struct NVGvertex* verts; + int cverts; + int nverts; + unsigned char* uniforms; + int cuniforms; + int nuniforms; +}; + +namespace nvg { + + class Texture { + private: + const int m_id; + dk::Image m_image; + dk::ImageDescriptor m_image_descriptor; + CMemPool::Handle m_image_mem; + DKNVGtextureDescriptor m_texture_descriptor; + public: + Texture(int id); + ~Texture(); + + void Initialize(CMemPool &image_pool, CMemPool &scratch_pool, dk::Device device, dk::Queue transfer_queue, int type, int w, int h, int image_flags, const u8 *data); + void Update(CMemPool &image_pool, CMemPool &scratch_pool, dk::Device device, dk::Queue transfer_queue, int type, int w, int h, int image_flags, const u8 *data); + + int GetId(); + const DKNVGtextureDescriptor &GetDescriptor(); + + dk::Image &GetImage(); + dk::ImageDescriptor &GetImageDescriptor(); + }; + + class DkRenderer { + private: + enum SamplerType : u8 { + SamplerType_MipFilter = 1 << 0, + SamplerType_Nearest = 1 << 1, + SamplerType_RepeatX = 1 << 2, + SamplerType_RepeatY = 1 << 3, + SamplerType_Total = 0x10, + }; + private: + static constexpr size_t DynamicCmdSize = 0x20000; + static constexpr size_t FragmentUniformSize = sizeof(DKNVGfragUniforms) + 4 - sizeof(DKNVGfragUniforms) % 4; + static constexpr size_t MaxImages = 0x1000; + + /* From the application. */ + u32 m_view_width; + u32 m_view_height; + dk::Device m_device; + dk::Queue m_queue; + CMemPool &m_image_mem_pool; + CMemPool &m_code_mem_pool; + CMemPool &m_data_mem_pool; + + /* State. */ + dk::UniqueCmdBuf m_dyn_cmd_buf; + CCmdMemRing<1> m_dyn_cmd_mem; + std::optional m_vertex_buffer; + CShader m_vertex_shader; + CShader m_fragment_shader; + CMemPool::Handle m_view_uniform_buffer; + CMemPool::Handle m_frag_uniform_buffer; + + u32 m_next_texture_id = 1; + std::vector> m_textures; + CDescriptorSet m_image_descriptor_set; + CDescriptorSet m_sampler_descriptor_set; + std::array m_image_descriptor_mappings; + int m_last_image_descriptor = 0; + + int AcquireImageDescriptor(std::shared_ptr texture, int image); + void FreeImageDescriptor(int image); + void SetUniforms(const DKNVGcontext &ctx, int offset, int image); + + void UpdateVertexBuffer(const void *data, size_t size); + + void DrawFill(const DKNVGcontext &ctx, const DKNVGcall &call); + void DrawConvexFill(const DKNVGcontext &ctx, const DKNVGcall &call); + void DrawStroke(const DKNVGcontext &ctx, const DKNVGcall &call); + void DrawTriangles(const DKNVGcontext &ctx, const DKNVGcall &call); + + std::shared_ptr FindTexture(int id); + public: + DkRenderer(unsigned int view_width, unsigned int view_height, dk::Device device, dk::Queue queue, CMemPool &image_mem_pool, CMemPool &code_mem_pool, CMemPool &data_mem_pool); + ~DkRenderer(); + + int Create(DKNVGcontext &ctx); + int CreateTexture(const DKNVGcontext &ctx, int type, int w, int h, int image_flags, const u8 *data); + int DeleteTexture(const DKNVGcontext &ctx, int id); + int UpdateTexture(const DKNVGcontext &ctx, int id, int x, int y, int w, int h, const u8 *data); + int GetTextureSize(const DKNVGcontext &ctx, int id, int *w, int *h); + const DKNVGtextureDescriptor *GetTextureDescriptor(const DKNVGcontext &ctx, int id); + + void Flush(DKNVGcontext &ctx); + }; + +} \ No newline at end of file diff --git a/troposphere/daybreak/nanovg/include/nanovg/fontstash.h b/troposphere/daybreak/nanovg/include/nanovg/fontstash.h new file mode 100644 index 000000000..e7da6019b --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/fontstash.h @@ -0,0 +1,1785 @@ +// +// Copyright (c) 2009-2013 Mikko Mononen memon@inside.org +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// + +#ifndef FONS_H +#define FONS_H + +#define FONS_INVALID -1 + +enum FONSflags { + FONS_ZERO_TOPLEFT = 1, + FONS_ZERO_BOTTOMLEFT = 2, +}; + +enum FONSalign { + // Horizontal align + FONS_ALIGN_LEFT = 1<<0, // Default + FONS_ALIGN_CENTER = 1<<1, + FONS_ALIGN_RIGHT = 1<<2, + // Vertical align + FONS_ALIGN_TOP = 1<<3, + FONS_ALIGN_MIDDLE = 1<<4, + FONS_ALIGN_BOTTOM = 1<<5, + FONS_ALIGN_BASELINE = 1<<6, // Default +}; + +enum FONSglyphBitmap { + FONS_GLYPH_BITMAP_OPTIONAL = 1, + FONS_GLYPH_BITMAP_REQUIRED = 2, +}; + +enum FONSerrorCode { + // Font atlas is full. + FONS_ATLAS_FULL = 1, + // Scratch memory used to render glyphs is full, requested size reported in 'val', you may need to bump up FONS_SCRATCH_BUF_SIZE. + FONS_SCRATCH_FULL = 2, + // Calls to fonsPushState has created too large stack, if you need deep state stack bump up FONS_MAX_STATES. + FONS_STATES_OVERFLOW = 3, + // Trying to pop too many states fonsPopState(). + FONS_STATES_UNDERFLOW = 4, +}; + +struct FONSparams { + int width, height; + unsigned char flags; + void* userPtr; + int (*renderCreate)(void* uptr, int width, int height); + int (*renderResize)(void* uptr, int width, int height); + void (*renderUpdate)(void* uptr, int* rect, const unsigned char* data); + void (*renderDraw)(void* uptr, const float* verts, const float* tcoords, const unsigned int* colors, int nverts); + void (*renderDelete)(void* uptr); +}; +typedef struct FONSparams FONSparams; + +struct FONSquad +{ + float x0,y0,s0,t0; + float x1,y1,s1,t1; +}; +typedef struct FONSquad FONSquad; + +struct FONStextIter { + float x, y, nextx, nexty, scale, spacing; + unsigned int codepoint; + short isize, iblur; + struct FONSfont* font; + int prevGlyphIndex; + const char* str; + const char* next; + const char* end; + unsigned int utf8state; + int bitmapOption; +}; +typedef struct FONStextIter FONStextIter; + +typedef struct FONScontext FONScontext; + +// Constructor and destructor. +FONScontext* fonsCreateInternal(FONSparams* params); +void fonsDeleteInternal(FONScontext* s); + +void fonsSetErrorCallback(FONScontext* s, void (*callback)(void* uptr, int error, int val), void* uptr); +// Returns current atlas size. +void fonsGetAtlasSize(FONScontext* s, int* width, int* height); +// Expands the atlas size. +int fonsExpandAtlas(FONScontext* s, int width, int height); +// Resets the whole stash. +int fonsResetAtlas(FONScontext* stash, int width, int height); + +// Add fonts +int fonsAddFont(FONScontext* s, const char* name, const char* path, int fontIndex); +int fonsAddFontMem(FONScontext* s, const char* name, unsigned char* data, int ndata, int freeData, int fontIndex); +int fonsGetFontByName(FONScontext* s, const char* name); + +// State handling +void fonsPushState(FONScontext* s); +void fonsPopState(FONScontext* s); +void fonsClearState(FONScontext* s); + +// State setting +void fonsSetSize(FONScontext* s, float size); +void fonsSetColor(FONScontext* s, unsigned int color); +void fonsSetSpacing(FONScontext* s, float spacing); +void fonsSetBlur(FONScontext* s, float blur); +void fonsSetAlign(FONScontext* s, int align); +void fonsSetFont(FONScontext* s, int font); + +// Draw text +float fonsDrawText(FONScontext* s, float x, float y, const char* string, const char* end); + +// Measure text +float fonsTextBounds(FONScontext* s, float x, float y, const char* string, const char* end, float* bounds); +void fonsLineBounds(FONScontext* s, float y, float* miny, float* maxy); +void fonsVertMetrics(FONScontext* s, float* ascender, float* descender, float* lineh); + +// Text iterator +int fonsTextIterInit(FONScontext* stash, FONStextIter* iter, float x, float y, const char* str, const char* end, int bitmapOption); +int fonsTextIterNext(FONScontext* stash, FONStextIter* iter, struct FONSquad* quad); + +// Pull texture changes +const unsigned char* fonsGetTextureData(FONScontext* stash, int* width, int* height); +int fonsValidateTexture(FONScontext* s, int* dirty); + +// Draws the stash texture for debugging +void fonsDrawDebug(FONScontext* s, float x, float y); + +#endif // FONTSTASH_H + + +#ifdef FONTSTASH_IMPLEMENTATION + +#define FONS_NOTUSED(v) (void)sizeof(v) + +#ifdef FONS_USE_FREETYPE + +#include +#include FT_FREETYPE_H +#include FT_ADVANCES_H +#include + +struct FONSttFontImpl { + FT_Face font; +}; +typedef struct FONSttFontImpl FONSttFontImpl; + +static FT_Library ftLibrary; + +int fons__tt_init(FONScontext *context) +{ + FT_Error ftError; + FONS_NOTUSED(context); + ftError = FT_Init_FreeType(&ftLibrary); + return ftError == 0; +} + +int fons__tt_done(FONScontext *context) +{ + FT_Error ftError; + FONS_NOTUSED(context); + ftError = FT_Done_FreeType(ftLibrary); + return ftError == 0; +} + +int fons__tt_loadFont(FONScontext *context, FONSttFontImpl *font, unsigned char *data, int dataSize, int fontIndex) +{ + FT_Error ftError; + FONS_NOTUSED(context); + + //font->font.userdata = stash; + ftError = FT_New_Memory_Face(ftLibrary, (const FT_Byte*)data, dataSize, fontIndex, &font->font); + return ftError == 0; +} + +void fons__tt_getFontVMetrics(FONSttFontImpl *font, int *ascent, int *descent, int *lineGap) +{ + *ascent = font->font->ascender; + *descent = font->font->descender; + *lineGap = font->font->height - (*ascent - *descent); +} + +float fons__tt_getPixelHeightScale(FONSttFontImpl *font, float size) +{ + return size / font->font->units_per_EM; +} + +int fons__tt_getGlyphIndex(FONSttFontImpl *font, int codepoint) +{ + return FT_Get_Char_Index(font->font, codepoint); +} + +int fons__tt_buildGlyphBitmap(FONSttFontImpl *font, int glyph, float size, float scale, + int *advance, int *lsb, int *x0, int *y0, int *x1, int *y1) +{ + FT_Error ftError; + FT_GlyphSlot ftGlyph; + FT_Fixed advFixed; + FONS_NOTUSED(scale); + + ftError = FT_Set_Pixel_Sizes(font->font, 0, size); + if (ftError) return 0; + ftError = FT_Load_Glyph(font->font, glyph, FT_LOAD_RENDER | FT_LOAD_FORCE_AUTOHINT); + if (ftError) return 0; + ftError = FT_Get_Advance(font->font, glyph, FT_LOAD_NO_SCALE, &advFixed); + if (ftError) return 0; + ftGlyph = font->font->glyph; + *advance = (int)advFixed; + *lsb = (int)ftGlyph->metrics.horiBearingX; + *x0 = ftGlyph->bitmap_left; + *x1 = *x0 + ftGlyph->bitmap.width; + *y0 = -ftGlyph->bitmap_top; + *y1 = *y0 + ftGlyph->bitmap.rows; + return 1; +} + +void fons__tt_renderGlyphBitmap(FONSttFontImpl *font, unsigned char *output, int outWidth, int outHeight, int outStride, + float scaleX, float scaleY, int glyph) +{ + FT_GlyphSlot ftGlyph = font->font->glyph; + int ftGlyphOffset = 0; + unsigned int x, y; + FONS_NOTUSED(outWidth); + FONS_NOTUSED(outHeight); + FONS_NOTUSED(scaleX); + FONS_NOTUSED(scaleY); + FONS_NOTUSED(glyph); // glyph has already been loaded by fons__tt_buildGlyphBitmap + + for ( y = 0; y < ftGlyph->bitmap.rows; y++ ) { + for ( x = 0; x < ftGlyph->bitmap.width; x++ ) { + output[(y * outStride) + x] = ftGlyph->bitmap.buffer[ftGlyphOffset++]; + } + } +} + +int fons__tt_getGlyphKernAdvance(FONSttFontImpl *font, int glyph1, int glyph2) +{ + FT_Vector ftKerning; + FT_Get_Kerning(font->font, glyph1, glyph2, FT_KERNING_DEFAULT, &ftKerning); + return (int)((ftKerning.x + 32) >> 6); // Round up and convert to integer +} + +#else + +#define STB_TRUETYPE_IMPLEMENTATION +static void* fons__tmpalloc(size_t size, void* up); +static void fons__tmpfree(void* ptr, void* up); +#define STBTT_malloc(x,u) fons__tmpalloc(x,u) +#define STBTT_free(x,u) fons__tmpfree(x,u) +#include "stb_truetype.h" + +struct FONSttFontImpl { + stbtt_fontinfo font; +}; +typedef struct FONSttFontImpl FONSttFontImpl; + +int fons__tt_init(FONScontext *context) +{ + FONS_NOTUSED(context); + return 1; +} + +int fons__tt_done(FONScontext *context) +{ + FONS_NOTUSED(context); + return 1; +} + +int fons__tt_loadFont(FONScontext *context, FONSttFontImpl *font, unsigned char *data, int dataSize, int fontIndex) +{ + int offset, stbError; + FONS_NOTUSED(dataSize); + + font->font.userdata = context; + offset = stbtt_GetFontOffsetForIndex(data, fontIndex); + if (offset == -1) { + stbError = 0; + } else { + stbError = stbtt_InitFont(&font->font, data, offset); + } + return stbError; +} + +void fons__tt_getFontVMetrics(FONSttFontImpl *font, int *ascent, int *descent, int *lineGap) +{ + stbtt_GetFontVMetrics(&font->font, ascent, descent, lineGap); +} + +float fons__tt_getPixelHeightScale(FONSttFontImpl *font, float size) +{ + return stbtt_ScaleForMappingEmToPixels(&font->font, size); +} + +int fons__tt_getGlyphIndex(FONSttFontImpl *font, int codepoint) +{ + return stbtt_FindGlyphIndex(&font->font, codepoint); +} + +int fons__tt_buildGlyphBitmap(FONSttFontImpl *font, int glyph, float size, float scale, + int *advance, int *lsb, int *x0, int *y0, int *x1, int *y1) +{ + FONS_NOTUSED(size); + stbtt_GetGlyphHMetrics(&font->font, glyph, advance, lsb); + stbtt_GetGlyphBitmapBox(&font->font, glyph, scale, scale, x0, y0, x1, y1); + return 1; +} + +void fons__tt_renderGlyphBitmap(FONSttFontImpl *font, unsigned char *output, int outWidth, int outHeight, int outStride, + float scaleX, float scaleY, int glyph) +{ + stbtt_MakeGlyphBitmap(&font->font, output, outWidth, outHeight, outStride, scaleX, scaleY, glyph); +} + +int fons__tt_getGlyphKernAdvance(FONSttFontImpl *font, int glyph1, int glyph2) +{ + return stbtt_GetGlyphKernAdvance(&font->font, glyph1, glyph2); +} + +#endif + +#ifndef FONS_SCRATCH_BUF_SIZE +# define FONS_SCRATCH_BUF_SIZE 96000 +#endif +#ifndef FONS_HASH_LUT_SIZE +# define FONS_HASH_LUT_SIZE 256 +#endif +#ifndef FONS_INIT_FONTS +# define FONS_INIT_FONTS 4 +#endif +#ifndef FONS_INIT_GLYPHS +# define FONS_INIT_GLYPHS 256 +#endif +#ifndef FONS_INIT_ATLAS_NODES +# define FONS_INIT_ATLAS_NODES 256 +#endif +#ifndef FONS_VERTEX_COUNT +# define FONS_VERTEX_COUNT 1024 +#endif +#ifndef FONS_MAX_STATES +# define FONS_MAX_STATES 20 +#endif +#ifndef FONS_MAX_FALLBACKS +# define FONS_MAX_FALLBACKS 20 +#endif + +static unsigned int fons__hashint(unsigned int a) +{ + a += ~(a<<15); + a ^= (a>>10); + a += (a<<3); + a ^= (a>>6); + a += ~(a<<11); + a ^= (a>>16); + return a; +} + +static int fons__mini(int a, int b) +{ + return a < b ? a : b; +} + +static int fons__maxi(int a, int b) +{ + return a > b ? a : b; +} + +struct FONSglyph +{ + unsigned int codepoint; + int index; + int next; + short size, blur; + short x0,y0,x1,y1; + short xadv,xoff,yoff; +}; +typedef struct FONSglyph FONSglyph; + +struct FONSfont +{ + FONSttFontImpl font; + char name[64]; + unsigned char* data; + int dataSize; + unsigned char freeData; + float ascender; + float descender; + float lineh; + FONSglyph* glyphs; + int cglyphs; + int nglyphs; + int lut[FONS_HASH_LUT_SIZE]; + int fallbacks[FONS_MAX_FALLBACKS]; + int nfallbacks; +}; +typedef struct FONSfont FONSfont; + +struct FONSstate +{ + int font; + int align; + float size; + unsigned int color; + float blur; + float spacing; +}; +typedef struct FONSstate FONSstate; + +struct FONSatlasNode { + short x, y, width; +}; +typedef struct FONSatlasNode FONSatlasNode; + +struct FONSatlas +{ + int width, height; + FONSatlasNode* nodes; + int nnodes; + int cnodes; +}; +typedef struct FONSatlas FONSatlas; + +struct FONScontext +{ + FONSparams params; + float itw,ith; + unsigned char* texData; + int dirtyRect[4]; + FONSfont** fonts; + FONSatlas* atlas; + int cfonts; + int nfonts; + float verts[FONS_VERTEX_COUNT*2]; + float tcoords[FONS_VERTEX_COUNT*2]; + unsigned int colors[FONS_VERTEX_COUNT]; + int nverts; + unsigned char* scratch; + int nscratch; + FONSstate states[FONS_MAX_STATES]; + int nstates; + void (*handleError)(void* uptr, int error, int val); + void* errorUptr; +}; + +#ifdef STB_TRUETYPE_IMPLEMENTATION + +static void* fons__tmpalloc(size_t size, void* up) +{ + unsigned char* ptr; + FONScontext* stash = (FONScontext*)up; + + // 16-byte align the returned pointer + size = (size + 0xf) & ~0xf; + + if (stash->nscratch+(int)size > FONS_SCRATCH_BUF_SIZE) { + if (stash->handleError) + stash->handleError(stash->errorUptr, FONS_SCRATCH_FULL, stash->nscratch+(int)size); + return NULL; + } + ptr = stash->scratch + stash->nscratch; + stash->nscratch += (int)size; + return ptr; +} + +static void fons__tmpfree(void* ptr, void* up) +{ + (void)ptr; + (void)up; + // empty +} + +#endif // STB_TRUETYPE_IMPLEMENTATION + +// Copyright (c) 2008-2010 Bjoern Hoehrmann +// See http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ for details. + +#define FONS_UTF8_ACCEPT 0 +#define FONS_UTF8_REJECT 12 + +static unsigned int fons__decutf8(unsigned int* state, unsigned int* codep, unsigned int byte) +{ + static const unsigned char utf8d[] = { + // The first part of the table maps bytes to character classes that + // to reduce the size of the transition table and create bitmasks. + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, + 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8, + + // The second part is a transition table that maps a combination + // of a state of the automaton and a character class to a state. + 0,12,24,36,60,96,84,12,12,12,48,72, 12,12,12,12,12,12,12,12,12,12,12,12, + 12, 0,12,12,12,12,12, 0,12, 0,12,12, 12,24,12,12,12,12,12,24,12,24,12,12, + 12,12,12,12,12,12,12,24,12,12,12,12, 12,24,12,12,12,12,12,12,12,24,12,12, + 12,12,12,12,12,12,12,36,12,36,12,12, 12,36,12,12,12,12,12,36,12,36,12,12, + 12,36,12,12,12,12,12,12,12,12,12,12, + }; + + unsigned int type = utf8d[byte]; + + *codep = (*state != FONS_UTF8_ACCEPT) ? + (byte & 0x3fu) | (*codep << 6) : + (0xff >> type) & (byte); + + *state = utf8d[256 + *state + type]; + return *state; +} + +// Atlas based on Skyline Bin Packer by Jukka Jylänki + +static void fons__deleteAtlas(FONSatlas* atlas) +{ + if (atlas == NULL) return; + if (atlas->nodes != NULL) free(atlas->nodes); + free(atlas); +} + +static FONSatlas* fons__allocAtlas(int w, int h, int nnodes) +{ + FONSatlas* atlas = NULL; + + // Allocate memory for the font stash. + atlas = (FONSatlas*)malloc(sizeof(FONSatlas)); + if (atlas == NULL) goto error; + memset(atlas, 0, sizeof(FONSatlas)); + + atlas->width = w; + atlas->height = h; + + // Allocate space for skyline nodes + atlas->nodes = (FONSatlasNode*)malloc(sizeof(FONSatlasNode) * nnodes); + if (atlas->nodes == NULL) goto error; + memset(atlas->nodes, 0, sizeof(FONSatlasNode) * nnodes); + atlas->nnodes = 0; + atlas->cnodes = nnodes; + + // Init root node. + atlas->nodes[0].x = 0; + atlas->nodes[0].y = 0; + atlas->nodes[0].width = (short)w; + atlas->nnodes++; + + return atlas; + +error: + if (atlas) fons__deleteAtlas(atlas); + return NULL; +} + +static int fons__atlasInsertNode(FONSatlas* atlas, int idx, int x, int y, int w) +{ + int i; + // Insert node + if (atlas->nnodes+1 > atlas->cnodes) { + atlas->cnodes = atlas->cnodes == 0 ? 8 : atlas->cnodes * 2; + atlas->nodes = (FONSatlasNode*)realloc(atlas->nodes, sizeof(FONSatlasNode) * atlas->cnodes); + if (atlas->nodes == NULL) + return 0; + } + for (i = atlas->nnodes; i > idx; i--) + atlas->nodes[i] = atlas->nodes[i-1]; + atlas->nodes[idx].x = (short)x; + atlas->nodes[idx].y = (short)y; + atlas->nodes[idx].width = (short)w; + atlas->nnodes++; + + return 1; +} + +static void fons__atlasRemoveNode(FONSatlas* atlas, int idx) +{ + int i; + if (atlas->nnodes == 0) return; + for (i = idx; i < atlas->nnodes-1; i++) + atlas->nodes[i] = atlas->nodes[i+1]; + atlas->nnodes--; +} + +static void fons__atlasExpand(FONSatlas* atlas, int w, int h) +{ + // Insert node for empty space + if (w > atlas->width) + fons__atlasInsertNode(atlas, atlas->nnodes, atlas->width, 0, w - atlas->width); + atlas->width = w; + atlas->height = h; +} + +static void fons__atlasReset(FONSatlas* atlas, int w, int h) +{ + atlas->width = w; + atlas->height = h; + atlas->nnodes = 0; + + // Init root node. + atlas->nodes[0].x = 0; + atlas->nodes[0].y = 0; + atlas->nodes[0].width = (short)w; + atlas->nnodes++; +} + +static int fons__atlasAddSkylineLevel(FONSatlas* atlas, int idx, int x, int y, int w, int h) +{ + int i; + + // Insert new node + if (fons__atlasInsertNode(atlas, idx, x, y+h, w) == 0) + return 0; + + // Delete skyline segments that fall under the shadow of the new segment. + for (i = idx+1; i < atlas->nnodes; i++) { + if (atlas->nodes[i].x < atlas->nodes[i-1].x + atlas->nodes[i-1].width) { + int shrink = atlas->nodes[i-1].x + atlas->nodes[i-1].width - atlas->nodes[i].x; + atlas->nodes[i].x += (short)shrink; + atlas->nodes[i].width -= (short)shrink; + if (atlas->nodes[i].width <= 0) { + fons__atlasRemoveNode(atlas, i); + i--; + } else { + break; + } + } else { + break; + } + } + + // Merge same height skyline segments that are next to each other. + for (i = 0; i < atlas->nnodes-1; i++) { + if (atlas->nodes[i].y == atlas->nodes[i+1].y) { + atlas->nodes[i].width += atlas->nodes[i+1].width; + fons__atlasRemoveNode(atlas, i+1); + i--; + } + } + + return 1; +} + +static int fons__atlasRectFits(FONSatlas* atlas, int i, int w, int h) +{ + // Checks if there is enough space at the location of skyline span 'i', + // and return the max height of all skyline spans under that at that location, + // (think tetris block being dropped at that position). Or -1 if no space found. + int x = atlas->nodes[i].x; + int y = atlas->nodes[i].y; + int spaceLeft; + if (x + w > atlas->width) + return -1; + spaceLeft = w; + while (spaceLeft > 0) { + if (i == atlas->nnodes) return -1; + y = fons__maxi(y, atlas->nodes[i].y); + if (y + h > atlas->height) return -1; + spaceLeft -= atlas->nodes[i].width; + ++i; + } + return y; +} + +static int fons__atlasAddRect(FONSatlas* atlas, int rw, int rh, int* rx, int* ry) +{ + int besth = atlas->height, bestw = atlas->width, besti = -1; + int bestx = -1, besty = -1, i; + + // Bottom left fit heuristic. + for (i = 0; i < atlas->nnodes; i++) { + int y = fons__atlasRectFits(atlas, i, rw, rh); + if (y != -1) { + if (y + rh < besth || (y + rh == besth && atlas->nodes[i].width < bestw)) { + besti = i; + bestw = atlas->nodes[i].width; + besth = y + rh; + bestx = atlas->nodes[i].x; + besty = y; + } + } + } + + if (besti == -1) + return 0; + + // Perform the actual packing. + if (fons__atlasAddSkylineLevel(atlas, besti, bestx, besty, rw, rh) == 0) + return 0; + + *rx = bestx; + *ry = besty; + + return 1; +} + +static void fons__addWhiteRect(FONScontext* stash, int w, int h) +{ + int x, y, gx, gy; + unsigned char* dst; + if (fons__atlasAddRect(stash->atlas, w, h, &gx, &gy) == 0) + return; + + // Rasterize + dst = &stash->texData[gx + gy * stash->params.width]; + for (y = 0; y < h; y++) { + for (x = 0; x < w; x++) + dst[x] = 0xff; + dst += stash->params.width; + } + + stash->dirtyRect[0] = fons__mini(stash->dirtyRect[0], gx); + stash->dirtyRect[1] = fons__mini(stash->dirtyRect[1], gy); + stash->dirtyRect[2] = fons__maxi(stash->dirtyRect[2], gx+w); + stash->dirtyRect[3] = fons__maxi(stash->dirtyRect[3], gy+h); +} + +FONScontext* fonsCreateInternal(FONSparams* params) +{ + FONScontext* stash = NULL; + + // Allocate memory for the font stash. + stash = (FONScontext*)malloc(sizeof(FONScontext)); + if (stash == NULL) goto error; + memset(stash, 0, sizeof(FONScontext)); + + stash->params = *params; + + // Allocate scratch buffer. + stash->scratch = (unsigned char*)malloc(FONS_SCRATCH_BUF_SIZE); + if (stash->scratch == NULL) goto error; + + // Initialize implementation library + if (!fons__tt_init(stash)) goto error; + + if (stash->params.renderCreate != NULL) { + if (stash->params.renderCreate(stash->params.userPtr, stash->params.width, stash->params.height) == 0) + goto error; + } + + stash->atlas = fons__allocAtlas(stash->params.width, stash->params.height, FONS_INIT_ATLAS_NODES); + if (stash->atlas == NULL) goto error; + + // Allocate space for fonts. + stash->fonts = (FONSfont**)malloc(sizeof(FONSfont*) * FONS_INIT_FONTS); + if (stash->fonts == NULL) goto error; + memset(stash->fonts, 0, sizeof(FONSfont*) * FONS_INIT_FONTS); + stash->cfonts = FONS_INIT_FONTS; + stash->nfonts = 0; + + // Create texture for the cache. + stash->itw = 1.0f/stash->params.width; + stash->ith = 1.0f/stash->params.height; + stash->texData = (unsigned char*)malloc(stash->params.width * stash->params.height); + if (stash->texData == NULL) goto error; + memset(stash->texData, 0, stash->params.width * stash->params.height); + + stash->dirtyRect[0] = stash->params.width; + stash->dirtyRect[1] = stash->params.height; + stash->dirtyRect[2] = 0; + stash->dirtyRect[3] = 0; + + // Add white rect at 0,0 for debug drawing. + fons__addWhiteRect(stash, 2,2); + + fonsPushState(stash); + fonsClearState(stash); + + return stash; + +error: + fonsDeleteInternal(stash); + return NULL; +} + +static FONSstate* fons__getState(FONScontext* stash) +{ + return &stash->states[stash->nstates-1]; +} + +int fonsAddFallbackFont(FONScontext* stash, int base, int fallback) +{ + FONSfont* baseFont = stash->fonts[base]; + if (baseFont->nfallbacks < FONS_MAX_FALLBACKS) { + baseFont->fallbacks[baseFont->nfallbacks++] = fallback; + return 1; + } + return 0; +} + +void fonsResetFallbackFont(FONScontext* stash, int base) +{ + int i; + + FONSfont* baseFont = stash->fonts[base]; + baseFont->nfallbacks = 0; + baseFont->nglyphs = 0; + for (i = 0; i < FONS_HASH_LUT_SIZE; i++) + baseFont->lut[i] = -1; +} + +void fonsSetSize(FONScontext* stash, float size) +{ + fons__getState(stash)->size = size; +} + +void fonsSetColor(FONScontext* stash, unsigned int color) +{ + fons__getState(stash)->color = color; +} + +void fonsSetSpacing(FONScontext* stash, float spacing) +{ + fons__getState(stash)->spacing = spacing; +} + +void fonsSetBlur(FONScontext* stash, float blur) +{ + fons__getState(stash)->blur = blur; +} + +void fonsSetAlign(FONScontext* stash, int align) +{ + fons__getState(stash)->align = align; +} + +void fonsSetFont(FONScontext* stash, int font) +{ + fons__getState(stash)->font = font; +} + +void fonsPushState(FONScontext* stash) +{ + if (stash->nstates >= FONS_MAX_STATES) { + if (stash->handleError) + stash->handleError(stash->errorUptr, FONS_STATES_OVERFLOW, 0); + return; + } + if (stash->nstates > 0) + memcpy(&stash->states[stash->nstates], &stash->states[stash->nstates-1], sizeof(FONSstate)); + stash->nstates++; +} + +void fonsPopState(FONScontext* stash) +{ + if (stash->nstates <= 1) { + if (stash->handleError) + stash->handleError(stash->errorUptr, FONS_STATES_UNDERFLOW, 0); + return; + } + stash->nstates--; +} + +void fonsClearState(FONScontext* stash) +{ + FONSstate* state = fons__getState(stash); + state->size = 12.0f; + state->color = 0xffffffff; + state->font = 0; + state->blur = 0; + state->spacing = 0; + state->align = FONS_ALIGN_LEFT | FONS_ALIGN_BASELINE; +} + +static void fons__freeFont(FONSfont* font) +{ + if (font == NULL) return; + if (font->glyphs) free(font->glyphs); + if (font->freeData && font->data) free(font->data); + free(font); +} + +static int fons__allocFont(FONScontext* stash) +{ + FONSfont* font = NULL; + if (stash->nfonts+1 > stash->cfonts) { + stash->cfonts = stash->cfonts == 0 ? 8 : stash->cfonts * 2; + stash->fonts = (FONSfont**)realloc(stash->fonts, sizeof(FONSfont*) * stash->cfonts); + if (stash->fonts == NULL) + return -1; + } + font = (FONSfont*)malloc(sizeof(FONSfont)); + if (font == NULL) goto error; + memset(font, 0, sizeof(FONSfont)); + + font->glyphs = (FONSglyph*)malloc(sizeof(FONSglyph) * FONS_INIT_GLYPHS); + if (font->glyphs == NULL) goto error; + font->cglyphs = FONS_INIT_GLYPHS; + font->nglyphs = 0; + + stash->fonts[stash->nfonts++] = font; + return stash->nfonts-1; + +error: + fons__freeFont(font); + + return FONS_INVALID; +} + +int fonsAddFont(FONScontext* stash, const char* name, const char* path, int fontIndex) +{ + FILE* fp = 0; + int dataSize = 0; + size_t readed; + unsigned char* data = NULL; + + // Read in the font data. + fp = fopen(path, "rb"); + if (fp == NULL) goto error; + fseek(fp,0,SEEK_END); + dataSize = (int)ftell(fp); + fseek(fp,0,SEEK_SET); + data = (unsigned char*)malloc(dataSize); + if (data == NULL) goto error; + readed = fread(data, 1, dataSize, fp); + fclose(fp); + fp = 0; + if (readed != (size_t)dataSize) goto error; + + return fonsAddFontMem(stash, name, data, dataSize, 1, fontIndex); + +error: + if (data) free(data); + if (fp) fclose(fp); + return FONS_INVALID; +} + +int fonsAddFontMem(FONScontext* stash, const char* name, unsigned char* data, int dataSize, int freeData, int fontIndex) +{ + int i, ascent, descent, fh, lineGap; + FONSfont* font; + + int idx = fons__allocFont(stash); + if (idx == FONS_INVALID) + return FONS_INVALID; + + font = stash->fonts[idx]; + + strncpy(font->name, name, sizeof(font->name)); + font->name[sizeof(font->name)-1] = '\0'; + + // Init hash lookup. + for (i = 0; i < FONS_HASH_LUT_SIZE; ++i) + font->lut[i] = -1; + + // Read in the font data. + font->dataSize = dataSize; + font->data = data; + font->freeData = (unsigned char)freeData; + + // Init font + stash->nscratch = 0; + if (!fons__tt_loadFont(stash, &font->font, data, dataSize, fontIndex)) goto error; + + // Store normalized line height. The real line height is got + // by multiplying the lineh by font size. + fons__tt_getFontVMetrics( &font->font, &ascent, &descent, &lineGap); + ascent += lineGap; + fh = ascent - descent; + font->ascender = (float)ascent / (float)fh; + font->descender = (float)descent / (float)fh; + font->lineh = font->ascender - font->descender; + + return idx; + +error: + fons__freeFont(font); + stash->nfonts--; + return FONS_INVALID; +} + +int fonsGetFontByName(FONScontext* s, const char* name) +{ + int i; + for (i = 0; i < s->nfonts; i++) { + if (strcmp(s->fonts[i]->name, name) == 0) + return i; + } + return FONS_INVALID; +} + + +static FONSglyph* fons__allocGlyph(FONSfont* font) +{ + if (font->nglyphs+1 > font->cglyphs) { + font->cglyphs = font->cglyphs == 0 ? 8 : font->cglyphs * 2; + font->glyphs = (FONSglyph*)realloc(font->glyphs, sizeof(FONSglyph) * font->cglyphs); + if (font->glyphs == NULL) return NULL; + } + font->nglyphs++; + return &font->glyphs[font->nglyphs-1]; +} + + +// Based on Exponential blur, Jani Huhtanen, 2006 + +#define APREC 16 +#define ZPREC 7 + +static void fons__blurCols(unsigned char* dst, int w, int h, int dstStride, int alpha) +{ + int x, y; + for (y = 0; y < h; y++) { + int z = 0; // force zero border + for (x = 1; x < w; x++) { + z += (alpha * (((int)(dst[x]) << ZPREC) - z)) >> APREC; + dst[x] = (unsigned char)(z >> ZPREC); + } + dst[w-1] = 0; // force zero border + z = 0; + for (x = w-2; x >= 0; x--) { + z += (alpha * (((int)(dst[x]) << ZPREC) - z)) >> APREC; + dst[x] = (unsigned char)(z >> ZPREC); + } + dst[0] = 0; // force zero border + dst += dstStride; + } +} + +static void fons__blurRows(unsigned char* dst, int w, int h, int dstStride, int alpha) +{ + int x, y; + for (x = 0; x < w; x++) { + int z = 0; // force zero border + for (y = dstStride; y < h*dstStride; y += dstStride) { + z += (alpha * (((int)(dst[y]) << ZPREC) - z)) >> APREC; + dst[y] = (unsigned char)(z >> ZPREC); + } + dst[(h-1)*dstStride] = 0; // force zero border + z = 0; + for (y = (h-2)*dstStride; y >= 0; y -= dstStride) { + z += (alpha * (((int)(dst[y]) << ZPREC) - z)) >> APREC; + dst[y] = (unsigned char)(z >> ZPREC); + } + dst[0] = 0; // force zero border + dst++; + } +} + + +static void fons__blur(FONScontext* stash, unsigned char* dst, int w, int h, int dstStride, int blur) +{ + int alpha; + float sigma; + (void)stash; + + if (blur < 1) + return; + // Calculate the alpha such that 90% of the kernel is within the radius. (Kernel extends to infinity) + sigma = (float)blur * 0.57735f; // 1 / sqrt(3) + alpha = (int)((1< 20) iblur = 20; + pad = iblur+2; + + // Reset allocator. + stash->nscratch = 0; + + // Find code point and size. + h = fons__hashint(codepoint) & (FONS_HASH_LUT_SIZE-1); + i = font->lut[h]; + while (i != -1) { + if (font->glyphs[i].codepoint == codepoint && font->glyphs[i].size == isize && font->glyphs[i].blur == iblur) { + glyph = &font->glyphs[i]; + if (bitmapOption == FONS_GLYPH_BITMAP_OPTIONAL || (glyph->x0 >= 0 && glyph->y0 >= 0)) { + return glyph; + } + // At this point, glyph exists but the bitmap data is not yet created. + break; + } + i = font->glyphs[i].next; + } + + // Create a new glyph or rasterize bitmap data for a cached glyph. + g = fons__tt_getGlyphIndex(&font->font, codepoint); + // Try to find the glyph in fallback fonts. + if (g == 0) { + for (i = 0; i < font->nfallbacks; ++i) { + FONSfont* fallbackFont = stash->fonts[font->fallbacks[i]]; + int fallbackIndex = fons__tt_getGlyphIndex(&fallbackFont->font, codepoint); + if (fallbackIndex != 0) { + g = fallbackIndex; + renderFont = fallbackFont; + break; + } + } + // It is possible that we did not find a fallback glyph. + // In that case the glyph index 'g' is 0, and we'll proceed below and cache empty glyph. + } + scale = fons__tt_getPixelHeightScale(&renderFont->font, size); + fons__tt_buildGlyphBitmap(&renderFont->font, g, size, scale, &advance, &lsb, &x0, &y0, &x1, &y1); + gw = x1-x0 + pad*2; + gh = y1-y0 + pad*2; + + // Determines the spot to draw glyph in the atlas. + if (bitmapOption == FONS_GLYPH_BITMAP_REQUIRED) { + // Find free spot for the rect in the atlas + added = fons__atlasAddRect(stash->atlas, gw, gh, &gx, &gy); + if (added == 0 && stash->handleError != NULL) { + // Atlas is full, let the user to resize the atlas (or not), and try again. + stash->handleError(stash->errorUptr, FONS_ATLAS_FULL, 0); + added = fons__atlasAddRect(stash->atlas, gw, gh, &gx, &gy); + } + if (added == 0) return NULL; + } else { + // Negative coordinate indicates there is no bitmap data created. + gx = -1; + gy = -1; + } + + // Init glyph. + if (glyph == NULL) { + glyph = fons__allocGlyph(font); + glyph->codepoint = codepoint; + glyph->size = isize; + glyph->blur = iblur; + glyph->next = 0; + + // Insert char to hash lookup. + glyph->next = font->lut[h]; + font->lut[h] = font->nglyphs-1; + } + glyph->index = g; + glyph->x0 = (short)gx; + glyph->y0 = (short)gy; + glyph->x1 = (short)(glyph->x0+gw); + glyph->y1 = (short)(glyph->y0+gh); + glyph->xadv = (short)(scale * advance * 10.0f); + glyph->xoff = (short)(x0 - pad); + glyph->yoff = (short)(y0 - pad); + + if (bitmapOption == FONS_GLYPH_BITMAP_OPTIONAL) { + return glyph; + } + + // Rasterize + dst = &stash->texData[(glyph->x0+pad) + (glyph->y0+pad) * stash->params.width]; + fons__tt_renderGlyphBitmap(&renderFont->font, dst, gw-pad*2,gh-pad*2, stash->params.width, scale, scale, g); + + // Make sure there is one pixel empty border. + dst = &stash->texData[glyph->x0 + glyph->y0 * stash->params.width]; + for (y = 0; y < gh; y++) { + dst[y*stash->params.width] = 0; + dst[gw-1 + y*stash->params.width] = 0; + } + for (x = 0; x < gw; x++) { + dst[x] = 0; + dst[x + (gh-1)*stash->params.width] = 0; + } + + // Debug code to color the glyph background +/* unsigned char* fdst = &stash->texData[glyph->x0 + glyph->y0 * stash->params.width]; + for (y = 0; y < gh; y++) { + for (x = 0; x < gw; x++) { + int a = (int)fdst[x+y*stash->params.width] + 20; + if (a > 255) a = 255; + fdst[x+y*stash->params.width] = a; + } + }*/ + + // Blur + if (iblur > 0) { + stash->nscratch = 0; + bdst = &stash->texData[glyph->x0 + glyph->y0 * stash->params.width]; + fons__blur(stash, bdst, gw, gh, stash->params.width, iblur); + } + + stash->dirtyRect[0] = fons__mini(stash->dirtyRect[0], glyph->x0); + stash->dirtyRect[1] = fons__mini(stash->dirtyRect[1], glyph->y0); + stash->dirtyRect[2] = fons__maxi(stash->dirtyRect[2], glyph->x1); + stash->dirtyRect[3] = fons__maxi(stash->dirtyRect[3], glyph->y1); + + return glyph; +} + +static void fons__getQuad(FONScontext* stash, FONSfont* font, + int prevGlyphIndex, FONSglyph* glyph, + float scale, float spacing, float* x, float* y, FONSquad* q) +{ + float rx,ry,xoff,yoff,x0,y0,x1,y1; + + if (prevGlyphIndex != -1) { + float adv = fons__tt_getGlyphKernAdvance(&font->font, prevGlyphIndex, glyph->index) * scale; + *x += (int)(adv + spacing + 0.5f); + } + + // Each glyph has 2px border to allow good interpolation, + // one pixel to prevent leaking, and one to allow good interpolation for rendering. + // Inset the texture region by one pixel for correct interpolation. + xoff = (short)(glyph->xoff+1); + yoff = (short)(glyph->yoff+1); + x0 = (float)(glyph->x0+1); + y0 = (float)(glyph->y0+1); + x1 = (float)(glyph->x1-1); + y1 = (float)(glyph->y1-1); + + if (stash->params.flags & FONS_ZERO_TOPLEFT) { + rx = floorf(*x + xoff); + ry = floorf(*y + yoff); + + q->x0 = rx; + q->y0 = ry; + q->x1 = rx + x1 - x0; + q->y1 = ry + y1 - y0; + + q->s0 = x0 * stash->itw; + q->t0 = y0 * stash->ith; + q->s1 = x1 * stash->itw; + q->t1 = y1 * stash->ith; + } else { + rx = floorf(*x + xoff); + ry = floorf(*y - yoff); + + q->x0 = rx; + q->y0 = ry; + q->x1 = rx + x1 - x0; + q->y1 = ry - y1 + y0; + + q->s0 = x0 * stash->itw; + q->t0 = y0 * stash->ith; + q->s1 = x1 * stash->itw; + q->t1 = y1 * stash->ith; + } + + *x += (int)(glyph->xadv / 10.0f + 0.5f); +} + +static void fons__flush(FONScontext* stash) +{ + // Flush texture + if (stash->dirtyRect[0] < stash->dirtyRect[2] && stash->dirtyRect[1] < stash->dirtyRect[3]) { + if (stash->params.renderUpdate != NULL) + stash->params.renderUpdate(stash->params.userPtr, stash->dirtyRect, stash->texData); + // Reset dirty rect + stash->dirtyRect[0] = stash->params.width; + stash->dirtyRect[1] = stash->params.height; + stash->dirtyRect[2] = 0; + stash->dirtyRect[3] = 0; + } + + // Flush triangles + if (stash->nverts > 0) { + if (stash->params.renderDraw != NULL) + stash->params.renderDraw(stash->params.userPtr, stash->verts, stash->tcoords, stash->colors, stash->nverts); + stash->nverts = 0; + } +} + +static __inline void fons__vertex(FONScontext* stash, float x, float y, float s, float t, unsigned int c) +{ + stash->verts[stash->nverts*2+0] = x; + stash->verts[stash->nverts*2+1] = y; + stash->tcoords[stash->nverts*2+0] = s; + stash->tcoords[stash->nverts*2+1] = t; + stash->colors[stash->nverts] = c; + stash->nverts++; +} + +static float fons__getVertAlign(FONScontext* stash, FONSfont* font, int align, short isize) +{ + if (stash->params.flags & FONS_ZERO_TOPLEFT) { + if (align & FONS_ALIGN_TOP) { + return font->ascender * (float)isize/10.0f; + } else if (align & FONS_ALIGN_MIDDLE) { + return (font->ascender + font->descender) / 2.0f * (float)isize/10.0f; + } else if (align & FONS_ALIGN_BASELINE) { + return 0.0f; + } else if (align & FONS_ALIGN_BOTTOM) { + return font->descender * (float)isize/10.0f; + } + } else { + if (align & FONS_ALIGN_TOP) { + return -font->ascender * (float)isize/10.0f; + } else if (align & FONS_ALIGN_MIDDLE) { + return -(font->ascender + font->descender) / 2.0f * (float)isize/10.0f; + } else if (align & FONS_ALIGN_BASELINE) { + return 0.0f; + } else if (align & FONS_ALIGN_BOTTOM) { + return -font->descender * (float)isize/10.0f; + } + } + return 0.0; +} + +float fonsDrawText(FONScontext* stash, + float x, float y, + const char* str, const char* end) +{ + FONSstate* state = fons__getState(stash); + unsigned int codepoint; + unsigned int utf8state = 0; + FONSglyph* glyph = NULL; + FONSquad q; + int prevGlyphIndex = -1; + short isize = (short)(state->size*10.0f); + short iblur = (short)state->blur; + float scale; + FONSfont* font; + float width; + + if (stash == NULL) return x; + if (state->font < 0 || state->font >= stash->nfonts) return x; + font = stash->fonts[state->font]; + if (font->data == NULL) return x; + + scale = fons__tt_getPixelHeightScale(&font->font, (float)isize/10.0f); + + if (end == NULL) + end = str + strlen(str); + + // Align horizontally + if (state->align & FONS_ALIGN_LEFT) { + // empty + } else if (state->align & FONS_ALIGN_RIGHT) { + width = fonsTextBounds(stash, x,y, str, end, NULL); + x -= width; + } else if (state->align & FONS_ALIGN_CENTER) { + width = fonsTextBounds(stash, x,y, str, end, NULL); + x -= width * 0.5f; + } + // Align vertically. + y += fons__getVertAlign(stash, font, state->align, isize); + + for (; str != end; ++str) { + if (fons__decutf8(&utf8state, &codepoint, *(const unsigned char*)str)) + continue; + glyph = fons__getGlyph(stash, font, codepoint, isize, iblur, FONS_GLYPH_BITMAP_REQUIRED); + if (glyph != NULL) { + fons__getQuad(stash, font, prevGlyphIndex, glyph, scale, state->spacing, &x, &y, &q); + + if (stash->nverts+6 > FONS_VERTEX_COUNT) + fons__flush(stash); + + fons__vertex(stash, q.x0, q.y0, q.s0, q.t0, state->color); + fons__vertex(stash, q.x1, q.y1, q.s1, q.t1, state->color); + fons__vertex(stash, q.x1, q.y0, q.s1, q.t0, state->color); + + fons__vertex(stash, q.x0, q.y0, q.s0, q.t0, state->color); + fons__vertex(stash, q.x0, q.y1, q.s0, q.t1, state->color); + fons__vertex(stash, q.x1, q.y1, q.s1, q.t1, state->color); + } + prevGlyphIndex = glyph != NULL ? glyph->index : -1; + } + fons__flush(stash); + + return x; +} + +int fonsTextIterInit(FONScontext* stash, FONStextIter* iter, + float x, float y, const char* str, const char* end, int bitmapOption) +{ + FONSstate* state = fons__getState(stash); + float width; + + memset(iter, 0, sizeof(*iter)); + + if (stash == NULL) return 0; + if (state->font < 0 || state->font >= stash->nfonts) return 0; + iter->font = stash->fonts[state->font]; + if (iter->font->data == NULL) return 0; + + iter->isize = (short)(state->size*10.0f); + iter->iblur = (short)state->blur; + iter->scale = fons__tt_getPixelHeightScale(&iter->font->font, (float)iter->isize/10.0f); + + // Align horizontally + if (state->align & FONS_ALIGN_LEFT) { + // empty + } else if (state->align & FONS_ALIGN_RIGHT) { + width = fonsTextBounds(stash, x,y, str, end, NULL); + x -= width; + } else if (state->align & FONS_ALIGN_CENTER) { + width = fonsTextBounds(stash, x,y, str, end, NULL); + x -= width * 0.5f; + } + // Align vertically. + y += fons__getVertAlign(stash, iter->font, state->align, iter->isize); + + if (end == NULL) + end = str + strlen(str); + + iter->x = iter->nextx = x; + iter->y = iter->nexty = y; + iter->spacing = state->spacing; + iter->str = str; + iter->next = str; + iter->end = end; + iter->codepoint = 0; + iter->prevGlyphIndex = -1; + iter->bitmapOption = bitmapOption; + + return 1; +} + +int fonsTextIterNext(FONScontext* stash, FONStextIter* iter, FONSquad* quad) +{ + FONSglyph* glyph = NULL; + const char* str = iter->next; + iter->str = iter->next; + + if (str == iter->end) + return 0; + + for (; str != iter->end; str++) { + if (fons__decutf8(&iter->utf8state, &iter->codepoint, *(const unsigned char*)str)) + continue; + str++; + // Get glyph and quad + iter->x = iter->nextx; + iter->y = iter->nexty; + glyph = fons__getGlyph(stash, iter->font, iter->codepoint, iter->isize, iter->iblur, iter->bitmapOption); + // If the iterator was initialized with FONS_GLYPH_BITMAP_OPTIONAL, then the UV coordinates of the quad will be invalid. + if (glyph != NULL) + fons__getQuad(stash, iter->font, iter->prevGlyphIndex, glyph, iter->scale, iter->spacing, &iter->nextx, &iter->nexty, quad); + iter->prevGlyphIndex = glyph != NULL ? glyph->index : -1; + break; + } + iter->next = str; + + return 1; +} + +void fonsDrawDebug(FONScontext* stash, float x, float y) +{ + int i; + int w = stash->params.width; + int h = stash->params.height; + float u = w == 0 ? 0 : (1.0f / w); + float v = h == 0 ? 0 : (1.0f / h); + + if (stash->nverts+6+6 > FONS_VERTEX_COUNT) + fons__flush(stash); + + // Draw background + fons__vertex(stash, x+0, y+0, u, v, 0x0fffffff); + fons__vertex(stash, x+w, y+h, u, v, 0x0fffffff); + fons__vertex(stash, x+w, y+0, u, v, 0x0fffffff); + + fons__vertex(stash, x+0, y+0, u, v, 0x0fffffff); + fons__vertex(stash, x+0, y+h, u, v, 0x0fffffff); + fons__vertex(stash, x+w, y+h, u, v, 0x0fffffff); + + // Draw texture + fons__vertex(stash, x+0, y+0, 0, 0, 0xffffffff); + fons__vertex(stash, x+w, y+h, 1, 1, 0xffffffff); + fons__vertex(stash, x+w, y+0, 1, 0, 0xffffffff); + + fons__vertex(stash, x+0, y+0, 0, 0, 0xffffffff); + fons__vertex(stash, x+0, y+h, 0, 1, 0xffffffff); + fons__vertex(stash, x+w, y+h, 1, 1, 0xffffffff); + + // Drawbug draw atlas + for (i = 0; i < stash->atlas->nnodes; i++) { + FONSatlasNode* n = &stash->atlas->nodes[i]; + + if (stash->nverts+6 > FONS_VERTEX_COUNT) + fons__flush(stash); + + fons__vertex(stash, x+n->x+0, y+n->y+0, u, v, 0xc00000ff); + fons__vertex(stash, x+n->x+n->width, y+n->y+1, u, v, 0xc00000ff); + fons__vertex(stash, x+n->x+n->width, y+n->y+0, u, v, 0xc00000ff); + + fons__vertex(stash, x+n->x+0, y+n->y+0, u, v, 0xc00000ff); + fons__vertex(stash, x+n->x+0, y+n->y+1, u, v, 0xc00000ff); + fons__vertex(stash, x+n->x+n->width, y+n->y+1, u, v, 0xc00000ff); + } + + fons__flush(stash); +} + +float fonsTextBounds(FONScontext* stash, + float x, float y, + const char* str, const char* end, + float* bounds) +{ + FONSstate* state = fons__getState(stash); + unsigned int codepoint; + unsigned int utf8state = 0; + FONSquad q; + FONSglyph* glyph = NULL; + int prevGlyphIndex = -1; + short isize = (short)(state->size*10.0f); + short iblur = (short)state->blur; + float scale; + FONSfont* font; + float startx, advance; + float minx, miny, maxx, maxy; + + if (stash == NULL) return 0; + if (state->font < 0 || state->font >= stash->nfonts) return 0; + font = stash->fonts[state->font]; + if (font->data == NULL) return 0; + + scale = fons__tt_getPixelHeightScale(&font->font, (float)isize/10.0f); + + // Align vertically. + y += fons__getVertAlign(stash, font, state->align, isize); + + minx = maxx = x; + miny = maxy = y; + startx = x; + + if (end == NULL) + end = str + strlen(str); + + for (; str != end; ++str) { + if (fons__decutf8(&utf8state, &codepoint, *(const unsigned char*)str)) + continue; + glyph = fons__getGlyph(stash, font, codepoint, isize, iblur, FONS_GLYPH_BITMAP_OPTIONAL); + if (glyph != NULL) { + fons__getQuad(stash, font, prevGlyphIndex, glyph, scale, state->spacing, &x, &y, &q); + if (q.x0 < minx) minx = q.x0; + if (q.x1 > maxx) maxx = q.x1; + if (stash->params.flags & FONS_ZERO_TOPLEFT) { + if (q.y0 < miny) miny = q.y0; + if (q.y1 > maxy) maxy = q.y1; + } else { + if (q.y1 < miny) miny = q.y1; + if (q.y0 > maxy) maxy = q.y0; + } + } + prevGlyphIndex = glyph != NULL ? glyph->index : -1; + } + + advance = x - startx; + + // Align horizontally + if (state->align & FONS_ALIGN_LEFT) { + // empty + } else if (state->align & FONS_ALIGN_RIGHT) { + minx -= advance; + maxx -= advance; + } else if (state->align & FONS_ALIGN_CENTER) { + minx -= advance * 0.5f; + maxx -= advance * 0.5f; + } + + if (bounds) { + bounds[0] = minx; + bounds[1] = miny; + bounds[2] = maxx; + bounds[3] = maxy; + } + + return advance; +} + +void fonsVertMetrics(FONScontext* stash, + float* ascender, float* descender, float* lineh) +{ + FONSfont* font; + FONSstate* state = fons__getState(stash); + short isize; + + if (stash == NULL) return; + if (state->font < 0 || state->font >= stash->nfonts) return; + font = stash->fonts[state->font]; + isize = (short)(state->size*10.0f); + if (font->data == NULL) return; + + if (ascender) + *ascender = font->ascender*isize/10.0f; + if (descender) + *descender = font->descender*isize/10.0f; + if (lineh) + *lineh = font->lineh*isize/10.0f; +} + +void fonsLineBounds(FONScontext* stash, float y, float* miny, float* maxy) +{ + FONSfont* font; + FONSstate* state = fons__getState(stash); + short isize; + + if (stash == NULL) return; + if (state->font < 0 || state->font >= stash->nfonts) return; + font = stash->fonts[state->font]; + isize = (short)(state->size*10.0f); + if (font->data == NULL) return; + + y += fons__getVertAlign(stash, font, state->align, isize); + + if (stash->params.flags & FONS_ZERO_TOPLEFT) { + *miny = y - font->ascender * (float)isize/10.0f; + *maxy = *miny + font->lineh*isize/10.0f; + } else { + *maxy = y + font->descender * (float)isize/10.0f; + *miny = *maxy - font->lineh*isize/10.0f; + } +} + +const unsigned char* fonsGetTextureData(FONScontext* stash, int* width, int* height) +{ + if (width != NULL) + *width = stash->params.width; + if (height != NULL) + *height = stash->params.height; + return stash->texData; +} + +int fonsValidateTexture(FONScontext* stash, int* dirty) +{ + if (stash->dirtyRect[0] < stash->dirtyRect[2] && stash->dirtyRect[1] < stash->dirtyRect[3]) { + dirty[0] = stash->dirtyRect[0]; + dirty[1] = stash->dirtyRect[1]; + dirty[2] = stash->dirtyRect[2]; + dirty[3] = stash->dirtyRect[3]; + // Reset dirty rect + stash->dirtyRect[0] = stash->params.width; + stash->dirtyRect[1] = stash->params.height; + stash->dirtyRect[2] = 0; + stash->dirtyRect[3] = 0; + return 1; + } + return 0; +} + +void fonsDeleteInternal(FONScontext* stash) +{ + int i; + if (stash == NULL) return; + + if (stash->params.renderDelete) + stash->params.renderDelete(stash->params.userPtr); + + for (i = 0; i < stash->nfonts; ++i) + fons__freeFont(stash->fonts[i]); + + if (stash->atlas) fons__deleteAtlas(stash->atlas); + if (stash->fonts) free(stash->fonts); + if (stash->texData) free(stash->texData); + if (stash->scratch) free(stash->scratch); + free(stash); + fons__tt_done(stash); +} + +void fonsSetErrorCallback(FONScontext* stash, void (*callback)(void* uptr, int error, int val), void* uptr) +{ + if (stash == NULL) return; + stash->handleError = callback; + stash->errorUptr = uptr; +} + +void fonsGetAtlasSize(FONScontext* stash, int* width, int* height) +{ + if (stash == NULL) return; + *width = stash->params.width; + *height = stash->params.height; +} + +int fonsExpandAtlas(FONScontext* stash, int width, int height) +{ + int i, maxy = 0; + unsigned char* data = NULL; + if (stash == NULL) return 0; + + width = fons__maxi(width, stash->params.width); + height = fons__maxi(height, stash->params.height); + + if (width == stash->params.width && height == stash->params.height) + return 1; + + // Flush pending glyphs. + fons__flush(stash); + + // Create new texture + if (stash->params.renderResize != NULL) { + if (stash->params.renderResize(stash->params.userPtr, width, height) == 0) + return 0; + } + // Copy old texture data over. + data = (unsigned char*)malloc(width * height); + if (data == NULL) + return 0; + for (i = 0; i < stash->params.height; i++) { + unsigned char* dst = &data[i*width]; + unsigned char* src = &stash->texData[i*stash->params.width]; + memcpy(dst, src, stash->params.width); + if (width > stash->params.width) + memset(dst+stash->params.width, 0, width - stash->params.width); + } + if (height > stash->params.height) + memset(&data[stash->params.height * width], 0, (height - stash->params.height) * width); + + free(stash->texData); + stash->texData = data; + + // Increase atlas size + fons__atlasExpand(stash->atlas, width, height); + + // Add existing data as dirty. + for (i = 0; i < stash->atlas->nnodes; i++) + maxy = fons__maxi(maxy, stash->atlas->nodes[i].y); + stash->dirtyRect[0] = 0; + stash->dirtyRect[1] = 0; + stash->dirtyRect[2] = stash->params.width; + stash->dirtyRect[3] = maxy; + + stash->params.width = width; + stash->params.height = height; + stash->itw = 1.0f/stash->params.width; + stash->ith = 1.0f/stash->params.height; + + return 1; +} + +int fonsResetAtlas(FONScontext* stash, int width, int height) +{ + int i, j; + if (stash == NULL) return 0; + + // Flush pending glyphs. + fons__flush(stash); + + // Create new texture + if (stash->params.renderResize != NULL) { + if (stash->params.renderResize(stash->params.userPtr, width, height) == 0) + return 0; + } + + // Reset atlas + fons__atlasReset(stash->atlas, width, height); + + // Clear texture data. + stash->texData = (unsigned char*)realloc(stash->texData, width * height); + if (stash->texData == NULL) return 0; + memset(stash->texData, 0, width * height); + + // Reset dirty rect + stash->dirtyRect[0] = width; + stash->dirtyRect[1] = height; + stash->dirtyRect[2] = 0; + stash->dirtyRect[3] = 0; + + // Reset cached glyphs + for (i = 0; i < stash->nfonts; i++) { + FONSfont* font = stash->fonts[i]; + font->nglyphs = 0; + for (j = 0; j < FONS_HASH_LUT_SIZE; j++) + font->lut[j] = -1; + } + + stash->params.width = width; + stash->params.height = height; + stash->itw = 1.0f/stash->params.width; + stash->ith = 1.0f/stash->params.height; + + // Add white rect at 0,0 for debug drawing. + fons__addWhiteRect(stash, 2,2); + + return 1; +} + + +#endif diff --git a/troposphere/daybreak/nanovg/include/nanovg/framework/CApplication.h b/troposphere/daybreak/nanovg/include/nanovg/framework/CApplication.h new file mode 100644 index 000000000..2d804507b --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/framework/CApplication.h @@ -0,0 +1,38 @@ +/* +** Sample Framework for deko3d Applications +** CApplication.h: Wrapper class containing common application boilerplate +*/ +#pragma once +#include "common.h" + +class CApplication +{ +protected: + virtual void onFocusState(AppletFocusState) { } + virtual void onOperationMode(AppletOperationMode) { } + virtual bool onFrame(u64) { return true; } + +public: + CApplication(); + ~CApplication(); + + void run(); + + static constexpr void chooseFramebufferSize(uint32_t& width, uint32_t& height, AppletOperationMode mode); +}; + +constexpr void CApplication::chooseFramebufferSize(uint32_t& width, uint32_t& height, AppletOperationMode mode) +{ + switch (mode) + { + default: + case AppletOperationMode_Handheld: + width = 1280; + height = 720; + break; + case AppletOperationMode_Docked: + width = 1920; + height = 1080; + break; + } +} diff --git a/troposphere/daybreak/nanovg/include/nanovg/framework/CCmdMemRing.h b/troposphere/daybreak/nanovg/include/nanovg/framework/CCmdMemRing.h new file mode 100644 index 000000000..3a8e1a00b --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/framework/CCmdMemRing.h @@ -0,0 +1,57 @@ +/* +** Sample Framework for deko3d Applications +** CCmdMemRing.h: Memory provider class for dynamic command buffers +*/ +#pragma once +#include "common.h" +#include "CMemPool.h" + +template +class CCmdMemRing +{ + static_assert(NumSlices > 0, "Need a non-zero number of slices..."); + CMemPool::Handle m_mem; + unsigned m_curSlice; + dk::Fence m_fences[NumSlices]; +public: + CCmdMemRing() : m_mem{}, m_curSlice{}, m_fences{} { } + ~CCmdMemRing() + { + m_mem.destroy(); + } + + bool allocate(CMemPool& pool, uint32_t sliceSize) + { + sliceSize = (sliceSize + DK_CMDMEM_ALIGNMENT - 1) &~ (DK_CMDMEM_ALIGNMENT - 1); + m_mem = pool.allocate(NumSlices*sliceSize); + return m_mem; + } + + void begin(dk::CmdBuf cmdbuf) + { + // Clear/reset the command buffer, which also destroys all command list handles + // (but remember: it does *not* in fact destroy the command data) + cmdbuf.clear(); + + // Wait for the current slice of memory to be available, and feed it to the command buffer + uint32_t sliceSize = m_mem.getSize() / NumSlices; + m_fences[m_curSlice].wait(); + + // Feed the memory to the command buffer + cmdbuf.addMemory(m_mem.getMemBlock(), m_mem.getOffset() + m_curSlice * sliceSize, sliceSize); + } + + DkCmdList end(dk::CmdBuf cmdbuf) + { + // Signal the fence corresponding to the current slice; so that in the future when we want + // to use it again, we can wait for the completion of the commands we've just submitted + // (and as such we don't overwrite in-flight command data with new one) + cmdbuf.signalFence(m_fences[m_curSlice]); + + // Advance the current slice counter; wrapping around when we reach the end + m_curSlice = (m_curSlice + 1) % NumSlices; + + // Finish off the command list, returning it to the caller + return cmdbuf.finishList(); + } +}; diff --git a/troposphere/daybreak/nanovg/include/nanovg/framework/CDescriptorSet.h b/troposphere/daybreak/nanovg/include/nanovg/framework/CDescriptorSet.h new file mode 100644 index 000000000..6e4647919 --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/framework/CDescriptorSet.h @@ -0,0 +1,71 @@ +/* +** Sample Framework for deko3d Applications +** CDescriptorSet.h: Image/Sampler descriptor set class +*/ +#pragma once +#include "common.h" +#include "CMemPool.h" + +template +class CDescriptorSet +{ + static_assert(NumDescriptors > 0, "Need a non-zero number of descriptors..."); + static_assert(sizeof(DkImageDescriptor) == sizeof(DkSamplerDescriptor), "shouldn't happen"); + static_assert(DK_IMAGE_DESCRIPTOR_ALIGNMENT == DK_SAMPLER_DESCRIPTOR_ALIGNMENT, "shouldn't happen"); + static constexpr size_t DescriptorSize = sizeof(DkImageDescriptor); + static constexpr size_t DescriptorAlign = DK_IMAGE_DESCRIPTOR_ALIGNMENT; + + CMemPool::Handle m_mem; +public: + CDescriptorSet() : m_mem{} { } + ~CDescriptorSet() + { + m_mem.destroy(); + } + + bool allocate(CMemPool& pool) + { + m_mem = pool.allocate(NumDescriptors*DescriptorSize, DescriptorAlign); + return m_mem; + } + + void bindForImages(dk::CmdBuf cmdbuf) + { + cmdbuf.bindImageDescriptorSet(m_mem.getGpuAddr(), NumDescriptors); + } + + void bindForSamplers(dk::CmdBuf cmdbuf) + { + cmdbuf.bindSamplerDescriptorSet(m_mem.getGpuAddr(), NumDescriptors); + } + + template + void update(dk::CmdBuf cmdbuf, uint32_t id, T const& descriptor) + { + static_assert(sizeof(T) == DescriptorSize); + cmdbuf.pushData(m_mem.getGpuAddr() + id*DescriptorSize, &descriptor, DescriptorSize); + } + + template + void update(dk::CmdBuf cmdbuf, uint32_t id, std::array const& descriptors) + { + static_assert(sizeof(T) == DescriptorSize); + cmdbuf.pushData(m_mem.getGpuAddr() + id*DescriptorSize, descriptors.data(), descriptors.size()*DescriptorSize); + } + +#ifdef DK_HPP_SUPPORT_VECTOR + template > + void update(dk::CmdBuf cmdbuf, uint32_t id, std::vector const& descriptors) + { + static_assert(sizeof(T) == DescriptorSize); + cmdbuf.pushData(m_mem.getGpuAddr() + id*DescriptorSize, descriptors.data(), descriptors.size()*DescriptorSize); + } +#endif + + template + void update(dk::CmdBuf cmdbuf, uint32_t id, std::initializer_list const& descriptors) + { + static_assert(sizeof(T) == DescriptorSize); + cmdbuf.pushData(m_mem.getGpuAddr() + id*DescriptorSize, descriptors.data(), descriptors.size()*DescriptorSize); + } +}; diff --git a/troposphere/daybreak/nanovg/include/nanovg/framework/CExternalImage.h b/troposphere/daybreak/nanovg/include/nanovg/framework/CExternalImage.h new file mode 100644 index 000000000..230e2e91f --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/framework/CExternalImage.h @@ -0,0 +1,37 @@ +/* +** Sample Framework for deko3d Applications +** CExternalImage.h: Utility class for loading images from the filesystem +*/ +#pragma once +#include "common.h" +#include "CMemPool.h" + +class CExternalImage +{ + dk::Image m_image; + dk::ImageDescriptor m_descriptor; + CMemPool::Handle m_mem; +public: + CExternalImage() : m_image{}, m_descriptor{}, m_mem{} { } + ~CExternalImage() + { + m_mem.destroy(); + } + + constexpr operator bool() const + { + return m_mem; + } + + constexpr dk::Image& get() + { + return m_image; + } + + constexpr dk::ImageDescriptor const& getDescriptor() const + { + return m_descriptor; + } + + bool load(CMemPool& imagePool, CMemPool& scratchPool, dk::Device device, dk::Queue transferQueue, const char* path, uint32_t width, uint32_t height, DkImageFormat format, uint32_t flags = 0); +}; diff --git a/troposphere/daybreak/nanovg/include/nanovg/framework/CIntrusiveList.h b/troposphere/daybreak/nanovg/include/nanovg/framework/CIntrusiveList.h new file mode 100644 index 000000000..73eb5c8b5 --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/framework/CIntrusiveList.h @@ -0,0 +1,119 @@ +/* +** Sample Framework for deko3d Applications +** CIntrusiveList.h: Intrusive doubly-linked list helper class +*/ +#pragma once +#include "common.h" + +template +struct CIntrusiveListNode +{ + T *m_next, *m_prev; + + constexpr CIntrusiveListNode() : m_next{}, m_prev{} { } + constexpr operator bool() const { return m_next || m_prev; } +}; + +template T::* node_ptr> +class CIntrusiveList +{ + T *m_first, *m_last; + +public: + constexpr CIntrusiveList() : m_first{}, m_last{} { } + + constexpr T* first() const { return m_first; } + constexpr T* last() const { return m_last; } + constexpr bool empty() const { return !m_first; } + constexpr void clear() { m_first = m_last = nullptr; } + + constexpr bool isLinked(T* obj) const { return obj->*node_ptr || m_first == obj; } + constexpr T* prev(T* obj) const { return (obj->*node_ptr).m_prev; } + constexpr T* next(T* obj) const { return (obj->*node_ptr).m_next; } + + void add(T* obj) + { + return addBefore(nullptr, obj); + } + + void addBefore(T* pos, T* obj) + { + auto& node = obj->*node_ptr; + node.m_next = pos; + node.m_prev = pos ? (pos->*node_ptr).m_prev : m_last; + + if (pos) + (pos->*node_ptr).m_prev = obj; + else + m_last = obj; + + if (node.m_prev) + (node.m_prev->*node_ptr).m_next = obj; + else + m_first = obj; + } + + void addAfter(T* pos, T* obj) + { + auto& node = obj->*node_ptr; + node.m_next = pos ? (pos->*node_ptr).m_next : m_first; + node.m_prev = pos; + + if (pos) + (pos->*node_ptr).m_next = obj; + else + m_first = obj; + + if (node.m_next) + (node.m_next->*node_ptr).m_prev = obj; + else + m_last = obj; + } + + T* pop() + { + T* ret = m_first; + if (ret) + { + m_first = (ret->*node_ptr).m_next; + if (m_first) + (m_first->*node_ptr).m_prev = nullptr; + else + m_last = nullptr; + } + return ret; + } + + void remove(T* obj) + { + auto& node = obj->*node_ptr; + if (node.m_prev) + { + (node.m_prev->*node_ptr).m_next = node.m_next; + if (node.m_next) + (node.m_next->*node_ptr).m_prev = node.m_prev; + else + m_last = node.m_prev; + } else + { + m_first = node.m_next; + if (m_first) + (m_first->*node_ptr).m_prev = nullptr; + else + m_last = nullptr; + } + + node.m_next = node.m_prev = 0; + } + + template + void iterate(L lambda) const + { + T* next = nullptr; + for (T* cur = m_first; cur; cur = next) + { + next = (cur->*node_ptr).m_next; + lambda(cur); + } + } +}; diff --git a/troposphere/daybreak/nanovg/include/nanovg/framework/CIntrusiveTree.h b/troposphere/daybreak/nanovg/include/nanovg/framework/CIntrusiveTree.h new file mode 100644 index 000000000..9796ee6ba --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/framework/CIntrusiveTree.h @@ -0,0 +1,250 @@ +/* +** Sample Framework for deko3d Applications +** CIntrusiveTree.h: Intrusive red-black tree helper class +*/ +#pragma once +#include "common.h" + +#include + +struct CIntrusiveTreeNode +{ + enum Color + { + Red, + Black, + }; + + enum Leaf + { + Left, + Right, + }; + +private: + uintptr_t m_parent_color; + CIntrusiveTreeNode* m_children[2]; + +public: + constexpr CIntrusiveTreeNode() : m_parent_color{}, m_children{} { } + + constexpr CIntrusiveTreeNode* getParent() const + { + return reinterpret_cast(m_parent_color &~ 1); + } + + void setParent(CIntrusiveTreeNode* parent) + { + m_parent_color = (m_parent_color & 1) | reinterpret_cast(parent); + } + + constexpr Color getColor() const + { + return static_cast(m_parent_color & 1); + } + + void setColor(Color color) + { + m_parent_color = (m_parent_color &~ 1) | static_cast(color); + } + + constexpr CIntrusiveTreeNode*& child(Leaf leaf) + { + return m_children[leaf]; + } + + constexpr CIntrusiveTreeNode* const& child(Leaf leaf) const + { + return m_children[leaf]; + } + + //-------------------------------------- + + constexpr bool isRed() const { return getColor() == Red; } + constexpr bool isBlack() const { return getColor() == Black; } + void setRed() { setColor(Red); } + void setBlack() { setColor(Black); } + + constexpr CIntrusiveTreeNode*& left() { return child(Left); } + constexpr CIntrusiveTreeNode*& right() { return child(Right); } + constexpr CIntrusiveTreeNode* const& left() const { return child(Left); } + constexpr CIntrusiveTreeNode* const& right() const { return child(Right); } +}; + +NX_CONSTEXPR CIntrusiveTreeNode::Leaf operator!(CIntrusiveTreeNode::Leaf val) noexcept +{ + return static_cast(!static_cast(val)); +} + +class CIntrusiveTreeBase +{ + using N = CIntrusiveTreeNode; + + void rotate(N* node, N::Leaf leaf); + void recolor(N* parent, N* node); +protected: + N* m_root; + + constexpr CIntrusiveTreeBase() : m_root{} { } + + N* walk(N* node, N::Leaf leaf) const; + void insert(N* node, N* parent); + void remove(N* node); + + N* minmax(N::Leaf leaf) const + { + N* p = m_root; + if (!p) + return nullptr; + while (p->child(leaf)) + p = p->child(leaf); + return p; + } + + template + N*& navigate(N*& node, N*& parent, N::Leaf leafOnEqual, H helm) const + { + node = nullptr; + parent = nullptr; + + N** point = const_cast(&m_root); + while (*point) + { + int direction = helm(*point); + parent = *point; + if (direction < 0) + point = &(*point)->left(); + else if (direction > 0) + point = &(*point)->right(); + else + { + node = *point; + point = &(*point)->child(leafOnEqual); + } + } + return *point; + } +}; + +template +constexpr ClassT* parent_obj(MemberT* member, MemberT ClassT::* ptr) +{ + union whatever + { + MemberT ClassT::* ptr; + intptr_t offset; + }; + // This is technically UB, but basically every compiler worth using admits it as an extension + return (ClassT*)((intptr_t)member - whatever{ptr}.offset); +} + +template < + typename T, + CIntrusiveTreeNode T::* node_ptr, + typename Comparator = std::less<> +> +class CIntrusiveTree final : protected CIntrusiveTreeBase +{ + using N = CIntrusiveTreeNode; + + static constexpr T* toType(N* m) + { + return m ? parent_obj(m, node_ptr) : nullptr; + } + + static constexpr N* toNode(T* m) + { + return m ? &(m->*node_ptr) : nullptr; + } + + template + static int compare(A const& a, B const& b) + { + Comparator comp; + if (comp(a, b)) + return -1; + if (comp(b, a)) + return 1; + return 0; + } + +public: + constexpr CIntrusiveTree() : CIntrusiveTreeBase{} { } + + T* first() const { return toType(minmax(N::Left)); } + T* last() const { return toType(minmax(N::Right)); } + bool empty() const { return m_root != nullptr; } + void clear() { m_root = nullptr; } + + T* prev(T* node) const { return toType(walk(toNode(node), N::Left)); } + T* next(T* node) const { return toType(walk(toNode(node), N::Right)); } + + enum SearchMode + { + Exact = 0, + LowerBound = 1, + UpperBound = 2, + }; + + template + T* search(SearchMode mode, Lambda lambda) const + { + N *node, *parent; + N*& point = navigate(node, parent, + mode != UpperBound ? N::Left : N::Right, + [&lambda](N* curnode) { return lambda(toType(curnode)); }); + + switch (mode) + { + default: + case Exact: + break; + case LowerBound: + if (!node && parent) + { + if (&parent->left() == &point) + node = parent; + else + node = walk(parent, N::Right); + } + break; + case UpperBound: + if (node) + node = walk(node, N::Right); + else if (parent) + { + if (&parent->right() == &point) + node = walk(parent, N::Right); + else + node = parent; + } + break; + } + return toType(node); + } + + template + T* find(K const& key, SearchMode mode = Exact) const + { + return search(mode, [&key](T* obj) { return compare(key, *obj); }); + } + + T* insert(T* obj, bool allow_dupes = false) + { + N *node, *parent; + N*& point = navigate(node, parent, N::Right, + [obj](N* curnode) { return compare(*obj, *toType(curnode)); }); + + if (node && !allow_dupes) + return toType(node); + + point = toNode(obj); + CIntrusiveTreeBase::insert(point, parent); + return obj; + } + + void remove(T* obj) + { + CIntrusiveTreeBase::remove(toNode(obj)); + } +}; diff --git a/troposphere/daybreak/nanovg/include/nanovg/framework/CMemPool.h b/troposphere/daybreak/nanovg/include/nanovg/framework/CMemPool.h new file mode 100644 index 000000000..978755cd2 --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/framework/CMemPool.h @@ -0,0 +1,120 @@ +/* +** Sample Framework for deko3d Applications +** CMemPool.h: Pooled dynamic memory allocation manager class +*/ +#pragma once +#include "common.h" +#include "CIntrusiveList.h" +#include "CIntrusiveTree.h" + +class CMemPool +{ + dk::Device m_dev; + uint32_t m_flags; + uint32_t m_blockSize; + + struct Block + { + CIntrusiveListNode m_node; + dk::MemBlock m_obj; + void* m_cpuAddr; + DkGpuAddr m_gpuAddr; + + constexpr void* cpuOffset(uint32_t offset) const + { + return m_cpuAddr ? ((u8*)m_cpuAddr + offset) : nullptr; + } + + constexpr DkGpuAddr gpuOffset(uint32_t offset) const + { + return m_gpuAddr != DK_GPU_ADDR_INVALID ? (m_gpuAddr + offset) : DK_GPU_ADDR_INVALID; + } + }; + + CIntrusiveList m_blocks; + + struct Slice + { + CIntrusiveListNode m_node; + CIntrusiveTreeNode m_treenode; + CMemPool* m_pool; + Block* m_block; + uint32_t m_start; + uint32_t m_end; + + constexpr uint32_t getSize() const { return m_end - m_start; } + constexpr bool canCoalesce(Slice const& rhs) const { return m_pool == rhs.m_pool && m_block == rhs.m_block && m_end == rhs.m_start; } + + constexpr bool operator<(Slice const& rhs) const { return getSize() < rhs.getSize(); } + constexpr bool operator<(uint32_t rhs) const { return getSize() < rhs; } + }; + + friend constexpr bool operator<(uint32_t lhs, Slice const& rhs); + + CIntrusiveList m_memMap, m_sliceHeap; + CIntrusiveTree m_freeList; + + Slice* _newSlice(); + void _deleteSlice(Slice*); + + void _destroy(Slice* slice); + +public: + static constexpr uint32_t DefaultBlockSize = 0x800000; + class Handle + { + Slice* m_slice; + public: + constexpr Handle(Slice* slice = nullptr) : m_slice{slice} { } + constexpr operator bool() const { return m_slice != nullptr; } + constexpr operator Slice*() const { return m_slice; } + constexpr bool operator!() const { return !m_slice; } + constexpr bool operator==(Handle const& rhs) const { return m_slice == rhs.m_slice; } + constexpr bool operator!=(Handle const& rhs) const { return m_slice != rhs.m_slice; } + + void destroy() + { + if (m_slice) + { + m_slice->m_pool->_destroy(m_slice); + m_slice = nullptr; + } + } + + constexpr dk::MemBlock getMemBlock() const + { + return m_slice->m_block->m_obj; + } + + constexpr uint32_t getOffset() const + { + return m_slice->m_start; + } + + constexpr uint32_t getSize() const + { + return m_slice->getSize(); + } + + constexpr void* getCpuAddr() const + { + return m_slice->m_block->cpuOffset(m_slice->m_start); + } + + constexpr DkGpuAddr getGpuAddr() const + { + return m_slice->m_block->gpuOffset(m_slice->m_start); + } + }; + + CMemPool(dk::Device dev, uint32_t flags = DkMemBlockFlags_CpuUncached | DkMemBlockFlags_GpuCached, uint32_t blockSize = DefaultBlockSize) : + m_dev{dev}, m_flags{flags}, m_blockSize{blockSize}, m_blocks{}, m_memMap{}, m_sliceHeap{}, m_freeList{} { } + ~CMemPool(); + + Handle allocate(uint32_t size, uint32_t alignment = DK_CMDMEM_ALIGNMENT); +}; + +constexpr bool operator<(uint32_t lhs, CMemPool::Slice const& rhs) +{ + return lhs < rhs.getSize(); +} diff --git a/troposphere/daybreak/nanovg/include/nanovg/framework/CShader.h b/troposphere/daybreak/nanovg/include/nanovg/framework/CShader.h new file mode 100644 index 000000000..b39dfe01b --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/framework/CShader.h @@ -0,0 +1,31 @@ +/* +** Sample Framework for deko3d Applications +** CShader.h: Utility class for loading shaders from the filesystem +*/ +#pragma once +#include "common.h" +#include "CMemPool.h" + +class CShader +{ + dk::Shader m_shader; + CMemPool::Handle m_codemem; +public: + CShader() : m_shader{}, m_codemem{} { } + ~CShader() + { + m_codemem.destroy(); + } + + constexpr operator bool() const + { + return m_codemem; + } + + constexpr operator dk::Shader const*() const + { + return &m_shader; + } + + bool load(CMemPool& pool, const char* path); +}; diff --git a/troposphere/daybreak/nanovg/include/nanovg/framework/FileLoader.h b/troposphere/daybreak/nanovg/include/nanovg/framework/FileLoader.h new file mode 100644 index 000000000..3455c87d1 --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/framework/FileLoader.h @@ -0,0 +1,9 @@ +/* +** Sample Framework for deko3d Applications +** FileLoader.h: Helpers for loading data from the filesystem directly into GPU memory +*/ +#pragma once +#include "common.h" +#include "CMemPool.h" + +CMemPool::Handle LoadFile(CMemPool& pool, const char* path, uint32_t alignment = DK_CMDMEM_ALIGNMENT); diff --git a/troposphere/daybreak/nanovg/include/nanovg/framework/common.h b/troposphere/daybreak/nanovg/include/nanovg/framework/common.h new file mode 100644 index 000000000..814e49928 --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/framework/common.h @@ -0,0 +1,12 @@ +/* +** Sample Framework for deko3d Applications +** common.h: Common includes +*/ +#pragma once +#include +#include +#include + +#include + +#include diff --git a/troposphere/daybreak/nanovg/include/nanovg/nanovg_gl_utils.h b/troposphere/daybreak/nanovg/include/nanovg/nanovg_gl_utils.h new file mode 100644 index 000000000..f7d73ee53 --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/nanovg_gl_utils.h @@ -0,0 +1,158 @@ +// +// Copyright (c) 2009-2013 Mikko Mononen memon@inside.org +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// +#ifndef NANOVG_GL_UTILS_H +#define NANOVG_GL_UTILS_H + +#ifdef USE_OPENGL + +struct NVGLUframebuffer { + NVGcontext* ctx; + GLuint fbo; + GLuint rbo; + GLuint texture; + int image; +}; +typedef struct NVGLUframebuffer NVGLUframebuffer; + +// Helper function to create GL frame buffer to render to. +void nvgluBindFramebuffer(NVGLUframebuffer* fb); +NVGLUframebuffer* nvgluCreateFramebuffer(NVGcontext* ctx, int w, int h, int imageFlags); +void nvgluDeleteFramebuffer(NVGLUframebuffer* fb); + +#endif // NANOVG_GL_UTILS_H + +#ifdef NANOVG_GL_IMPLEMENTATION + +#if defined(NANOVG_GL3) || defined(NANOVG_GLES2) || defined(NANOVG_GLES3) +// FBO is core in OpenGL 3>. +# define NANOVG_FBO_VALID 1 +#elif defined(NANOVG_GL2) +// On OS X including glext defines FBO on GL2 too. +# ifdef __APPLE__ +# include +# define NANOVG_FBO_VALID 1 +# endif +#endif + +static GLint defaultFBO = -1; + +NVGLUframebuffer* nvgluCreateFramebuffer(NVGcontext* ctx, int w, int h, int imageFlags) +{ +#ifdef NANOVG_FBO_VALID + GLint defaultFBO; + GLint defaultRBO; + NVGLUframebuffer* fb = NULL; + + glGetIntegerv(GL_FRAMEBUFFER_BINDING, &defaultFBO); + glGetIntegerv(GL_RENDERBUFFER_BINDING, &defaultRBO); + + fb = (NVGLUframebuffer*)malloc(sizeof(NVGLUframebuffer)); + if (fb == NULL) goto error; + memset(fb, 0, sizeof(NVGLUframebuffer)); + + fb->image = nvgCreateImageRGBA(ctx, w, h, imageFlags | NVG_IMAGE_FLIPY | NVG_IMAGE_PREMULTIPLIED, NULL); + +#if defined NANOVG_GL2 + fb->texture = nvglImageHandleGL2(ctx, fb->image); +#elif defined NANOVG_GL3 + fb->texture = nvglImageHandleGL3(ctx, fb->image); +#elif defined NANOVG_GLES2 + fb->texture = nvglImageHandleGLES2(ctx, fb->image); +#elif defined NANOVG_GLES3 + fb->texture = nvglImageHandleGLES3(ctx, fb->image); +#endif + + fb->ctx = ctx; + + // frame buffer object + glGenFramebuffers(1, &fb->fbo); + glBindFramebuffer(GL_FRAMEBUFFER, fb->fbo); + + // render buffer object + glGenRenderbuffers(1, &fb->rbo); + glBindRenderbuffer(GL_RENDERBUFFER, fb->rbo); + glRenderbufferStorage(GL_RENDERBUFFER, GL_STENCIL_INDEX8, w, h); + + // combine all + glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, fb->texture, 0); + glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, fb->rbo); + + if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) { +#ifdef GL_DEPTH24_STENCIL8 + // If GL_STENCIL_INDEX8 is not supported, try GL_DEPTH24_STENCIL8 as a fallback. + // Some graphics cards require a depth buffer along with a stencil. + glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, w, h); + glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, fb->texture, 0); + glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, fb->rbo); + + if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE) +#endif // GL_DEPTH24_STENCIL8 + goto error; + } + + glBindFramebuffer(GL_FRAMEBUFFER, defaultFBO); + glBindRenderbuffer(GL_RENDERBUFFER, defaultRBO); + return fb; +error: + glBindFramebuffer(GL_FRAMEBUFFER, defaultFBO); + glBindRenderbuffer(GL_RENDERBUFFER, defaultRBO); + nvgluDeleteFramebuffer(fb); + return NULL; +#else + NVG_NOTUSED(ctx); + NVG_NOTUSED(w); + NVG_NOTUSED(h); + NVG_NOTUSED(imageFlags); + return NULL; +#endif +} + +void nvgluBindFramebuffer(NVGLUframebuffer* fb) +{ +#ifdef NANOVG_FBO_VALID + if (defaultFBO == -1) glGetIntegerv(GL_FRAMEBUFFER_BINDING, &defaultFBO); + glBindFramebuffer(GL_FRAMEBUFFER, fb != NULL ? fb->fbo : defaultFBO); +#else + NVG_NOTUSED(fb); +#endif +} + +void nvgluDeleteFramebuffer(NVGLUframebuffer* fb) +{ +#ifdef NANOVG_FBO_VALID + if (fb == NULL) return; + if (fb->fbo != 0) + glDeleteFramebuffers(1, &fb->fbo); + if (fb->rbo != 0) + glDeleteRenderbuffers(1, &fb->rbo); + if (fb->image >= 0) + nvgDeleteImage(fb->ctx, fb->image); + fb->ctx = NULL; + fb->fbo = 0; + fb->rbo = 0; + fb->texture = 0; + fb->image = -1; + free(fb); +#else + NVG_NOTUSED(fb); +#endif +} + +#endif + +#endif // NANOVG_GL_IMPLEMENTATION diff --git a/troposphere/daybreak/nanovg/include/nanovg/stb_image.h b/troposphere/daybreak/nanovg/include/nanovg/stb_image.h new file mode 100644 index 000000000..e06f7a1d7 --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/stb_image.h @@ -0,0 +1,6614 @@ +/* stb_image - v2.10 - public domain image loader - http://nothings.org/stb_image.h + no warranty implied; use at your own risk + + Do this: + #define STB_IMAGE_IMPLEMENTATION + before you include this file in *one* C or C++ file to create the implementation. + + // i.e. it should look like this: + #include ... + #include ... + #include ... + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + + You can #define STBI_ASSERT(x) before the #include to avoid using assert.h. + And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free + + + QUICK NOTES: + Primarily of interest to game developers and other people who can + avoid problematic images and only need the trivial interface + + JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib) + PNG 1/2/4/8-bit-per-channel (16 bpc not supported) + + TGA (not sure what subset, if a subset) + BMP non-1bpp, non-RLE + PSD (composited view only, no extra channels, 8/16 bit-per-channel) + + GIF (*comp always reports as 4-channel) + HDR (radiance rgbE format) + PIC (Softimage PIC) + PNM (PPM and PGM binary only) + + Animated GIF still needs a proper API, but here's one way to do it: + http://gist.github.com/urraka/685d9a6340b26b830d49 + + - decode from memory or through FILE (define STBI_NO_STDIO to remove code) + - decode from arbitrary I/O callbacks + - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON) + + Full documentation under "DOCUMENTATION" below. + + + Revision 2.00 release notes: + + - Progressive JPEG is now supported. + + - PPM and PGM binary formats are now supported, thanks to Ken Miller. + + - x86 platforms now make use of SSE2 SIMD instructions for + JPEG decoding, and ARM platforms can use NEON SIMD if requested. + This work was done by Fabian "ryg" Giesen. SSE2 is used by + default, but NEON must be enabled explicitly; see docs. + + With other JPEG optimizations included in this version, we see + 2x speedup on a JPEG on an x86 machine, and a 1.5x speedup + on a JPEG on an ARM machine, relative to previous versions of this + library. The same results will not obtain for all JPGs and for all + x86/ARM machines. (Note that progressive JPEGs are significantly + slower to decode than regular JPEGs.) This doesn't mean that this + is the fastest JPEG decoder in the land; rather, it brings it + closer to parity with standard libraries. If you want the fastest + decode, look elsewhere. (See "Philosophy" section of docs below.) + + See final bullet items below for more info on SIMD. + + - Added STBI_MALLOC, STBI_REALLOC, and STBI_FREE macros for replacing + the memory allocator. Unlike other STBI libraries, these macros don't + support a context parameter, so if you need to pass a context in to + the allocator, you'll have to store it in a global or a thread-local + variable. + + - Split existing STBI_NO_HDR flag into two flags, STBI_NO_HDR and + STBI_NO_LINEAR. + STBI_NO_HDR: suppress implementation of .hdr reader format + STBI_NO_LINEAR: suppress high-dynamic-range light-linear float API + + - You can suppress implementation of any of the decoders to reduce + your code footprint by #defining one or more of the following + symbols before creating the implementation. + + STBI_NO_JPEG + STBI_NO_PNG + STBI_NO_BMP + STBI_NO_PSD + STBI_NO_TGA + STBI_NO_GIF + STBI_NO_HDR + STBI_NO_PIC + STBI_NO_PNM (.ppm and .pgm) + + - You can request *only* certain decoders and suppress all other ones + (this will be more forward-compatible, as addition of new decoders + doesn't require you to disable them explicitly): + + STBI_ONLY_JPEG + STBI_ONLY_PNG + STBI_ONLY_BMP + STBI_ONLY_PSD + STBI_ONLY_TGA + STBI_ONLY_GIF + STBI_ONLY_HDR + STBI_ONLY_PIC + STBI_ONLY_PNM (.ppm and .pgm) + + Note that you can define multiples of these, and you will get all + of them ("only x" and "only y" is interpreted to mean "only x&y"). + + - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still + want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB + + - Compilation of all SIMD code can be suppressed with + #define STBI_NO_SIMD + It should not be necessary to disable SIMD unless you have issues + compiling (e.g. using an x86 compiler which doesn't support SSE + intrinsics or that doesn't support the method used to detect + SSE2 support at run-time), and even those can be reported as + bugs so I can refine the built-in compile-time checking to be + smarter. + + - The old STBI_SIMD system which allowed installing a user-defined + IDCT etc. has been removed. If you need this, don't upgrade. My + assumption is that almost nobody was doing this, and those who + were will find the built-in SIMD more satisfactory anyway. + + - RGB values computed for JPEG images are slightly different from + previous versions of stb_image. (This is due to using less + integer precision in SIMD.) The C code has been adjusted so + that the same RGB values will be computed regardless of whether + SIMD support is available, so your app should always produce + consistent results. But these results are slightly different from + previous versions. (Specifically, about 3% of available YCbCr values + will compute different RGB results from pre-1.49 versions by +-1; + most of the deviating values are one smaller in the G channel.) + + - If you must produce consistent results with previous versions of + stb_image, #define STBI_JPEG_OLD and you will get the same results + you used to; however, you will not get the SIMD speedups for + the YCbCr-to-RGB conversion step (although you should still see + significant JPEG speedup from the other changes). + + Please note that STBI_JPEG_OLD is a temporary feature; it will be + removed in future versions of the library. It is only intended for + near-term back-compatibility use. + + + Latest revision history: + 2.10 (2016-01-22) avoid warning introduced in 2.09 + 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) partial animated GIF support + limited 16-bit PSD support + minor bugs, code cleanup, and compiler warnings + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) additional corruption checking + stbi_set_flip_vertically_on_load + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPEG, including x86 SSE2 & ARM NEON SIMD + progressive JPEG + PGM/PPM support + STBI_MALLOC,STBI_REALLOC,STBI_FREE + STBI_NO_*, STBI_ONLY_* + GIF bugfix + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support (both grayscale and paletted) + optimize PNG + fix bug in interlaced PNG with user-specified channel count + + See end of file for full revision history. + + + ============================ Contributors ========================= + + Image formats Extensions, features + Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info) + Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info) + Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG) + Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks) + Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG) + Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip) + Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD) + urraka@github (animated gif) Junggon Kim (PNM comments) + Daniel Gibson (16-bit TGA) + + Optimizations & bugfixes + Fabian "ryg" Giesen + Arseny Kapoulkine + + Bug & warning fixes + Marc LeBlanc David Woo Guillaume George Martins Mozeiko + Christpher Lloyd Martin Golini Jerry Jansson Joseph Thomson + Dave Moore Roy Eltham Hayaki Saito Phil Jordan + Won Chun Luke Graham Johan Duparc Nathan Reed + the Horde3D community Thomas Ruf Ronny Chevalier Nick Verigakis + Janez Zemva John Bartholomew Michal Cichon svdijk@github + Jonathan Blow Ken Hamada Tero Hanninen Baldur Karlsson + Laurent Gomila Cort Stratton Sergio Gonzalez romigrou@github + Aruelien Pocheville Thibault Reuille Cass Everitt + Ryamond Barbiero Paul Du Bois Engin Manap + Blazej Dariusz Roszkowski + Michaelangel007@github + + +LICENSE + +This software is in the public domain. Where that dedication is not +recognized, you are granted a perpetual, irrevocable license to copy, +distribute, and modify this file as you see fit. + +*/ + +#ifndef STBI_INCLUDE_STB_IMAGE_H +#define STBI_INCLUDE_STB_IMAGE_H + +// DOCUMENTATION +// +// Limitations: +// - no 16-bit-per-channel PNG +// - no 12-bit-per-channel JPEG +// - no JPEGs with arithmetic coding +// - no 1-bit BMP +// - GIF always returns *comp=4 +// +// Basic usage (see HDR discussion below for HDR usage): +// int x,y,n; +// unsigned char *data = stbi_load(filename, &x, &y, &n, 0); +// // ... process data if not NULL ... +// // ... x = width, y = height, n = # 8-bit components per pixel ... +// // ... replace '0' with '1'..'4' to force that many components per pixel +// // ... but 'n' will always be the number that it would have been if you said 0 +// stbi_image_free(data) +// +// Standard parameters: +// int *x -- outputs image width in pixels +// int *y -- outputs image height in pixels +// int *comp -- outputs # of image components in image file +// int req_comp -- if non-zero, # of image components requested in result +// +// The return value from an image loader is an 'unsigned char *' which points +// to the pixel data, or NULL on an allocation failure or if the image is +// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels, +// with each pixel consisting of N interleaved 8-bit components; the first +// pixel pointed to is top-left-most in the image. There is no padding between +// image scanlines or between pixels, regardless of format. The number of +// components N is 'req_comp' if req_comp is non-zero, or *comp otherwise. +// If req_comp is non-zero, *comp has the number of components that _would_ +// have been output otherwise. E.g. if you set req_comp to 4, you will always +// get RGBA output, but you can check *comp to see if it's trivially opaque +// because e.g. there were only 3 channels in the source image. +// +// An output image with N components has the following components interleaved +// in this order in each pixel: +// +// N=#comp components +// 1 grey +// 2 grey, alpha +// 3 red, green, blue +// 4 red, green, blue, alpha +// +// If image loading fails for any reason, the return value will be NULL, +// and *x, *y, *comp will be unchanged. The function stbi_failure_reason() +// can be queried for an extremely brief, end-user unfriendly explanation +// of why the load failed. Define STBI_NO_FAILURE_STRINGS to avoid +// compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly +// more user-friendly ones. +// +// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized. +// +// =========================================================================== +// +// Philosophy +// +// stb libraries are designed with the following priorities: +// +// 1. easy to use +// 2. easy to maintain +// 3. good performance +// +// Sometimes I let "good performance" creep up in priority over "easy to maintain", +// and for best performance I may provide less-easy-to-use APIs that give higher +// performance, in addition to the easy to use ones. Nevertheless, it's important +// to keep in mind that from the standpoint of you, a client of this library, +// all you care about is #1 and #3, and stb libraries do not emphasize #3 above all. +// +// Some secondary priorities arise directly from the first two, some of which +// make more explicit reasons why performance can't be emphasized. +// +// - Portable ("ease of use") +// - Small footprint ("easy to maintain") +// - No dependencies ("ease of use") +// +// =========================================================================== +// +// I/O callbacks +// +// I/O callbacks allow you to read from arbitrary sources, like packaged +// files or some other source. Data read from callbacks are processed +// through a small internal buffer (currently 128 bytes) to try to reduce +// overhead. +// +// The three functions you must define are "read" (reads some bytes of data), +// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end). +// +// =========================================================================== +// +// SIMD support +// +// The JPEG decoder will try to automatically use SIMD kernels on x86 when +// supported by the compiler. For ARM Neon support, you must explicitly +// request it. +// +// (The old do-it-yourself SIMD API is no longer supported in the current +// code.) +// +// On x86, SSE2 will automatically be used when available based on a run-time +// test; if not, the generic C versions are used as a fall-back. On ARM targets, +// the typical path is to have separate builds for NEON and non-NEON devices +// (at least this is true for iOS and Android). Therefore, the NEON support is +// toggled by a build flag: define STBI_NEON to get NEON loops. +// +// The output of the JPEG decoder is slightly different from versions where +// SIMD support was introduced (that is, for versions before 1.49). The +// difference is only +-1 in the 8-bit RGB channels, and only on a small +// fraction of pixels. You can force the pre-1.49 behavior by defining +// STBI_JPEG_OLD, but this will disable some of the SIMD decoding path +// and hence cost some performance. +// +// If for some reason you do not want to use any of SIMD code, or if +// you have issues compiling it, you can disable it entirely by +// defining STBI_NO_SIMD. +// +// =========================================================================== +// +// HDR image support (disable by defining STBI_NO_HDR) +// +// stb_image now supports loading HDR images in general, and currently +// the Radiance .HDR file format, although the support is provided +// generically. You can still load any file through the existing interface; +// if you attempt to load an HDR file, it will be automatically remapped to +// LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1; +// both of these constants can be reconfigured through this interface: +// +// stbi_hdr_to_ldr_gamma(2.2f); +// stbi_hdr_to_ldr_scale(1.0f); +// +// (note, do not use _inverse_ constants; stbi_image will invert them +// appropriately). +// +// Additionally, there is a new, parallel interface for loading files as +// (linear) floats to preserve the full dynamic range: +// +// float *data = stbi_loadf(filename, &x, &y, &n, 0); +// +// If you load LDR images through this interface, those images will +// be promoted to floating point values, run through the inverse of +// constants corresponding to the above: +// +// stbi_ldr_to_hdr_scale(1.0f); +// stbi_ldr_to_hdr_gamma(2.2f); +// +// Finally, given a filename (or an open file or memory block--see header +// file for details) containing image data, you can query for the "most +// appropriate" interface to use (that is, whether the image is HDR or +// not), using: +// +// stbi_is_hdr(char *filename); +// +// =========================================================================== +// +// iPhone PNG support: +// +// By default we convert iphone-formatted PNGs back to RGB, even though +// they are internally encoded differently. You can disable this conversion +// by by calling stbi_convert_iphone_png_to_rgb(0), in which case +// you will always just get the native iphone "format" through (which +// is BGR stored in RGB). +// +// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per +// pixel to remove any premultiplied alpha *only* if the image file explicitly +// says there's premultiplied data (currently only happens in iPhone images, +// and only if iPhone convert-to-rgb processing is on). +// + + +#ifndef STBI_NO_STDIO +#include +#endif // STBI_NO_STDIO + +#define STBI_VERSION 1 + +enum +{ + STBI_default = 0, // only used for req_comp + + STBI_grey = 1, + STBI_grey_alpha = 2, + STBI_rgb = 3, + STBI_rgb_alpha = 4 +}; + +typedef unsigned char stbi_uc; + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef STB_IMAGE_STATIC +#define STBIDEF static +#else +#define STBIDEF extern +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// PRIMARY API - works on images of any type +// + +// +// load image by filename, open file, or memory buffer +// + +typedef struct +{ + int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read + void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative + int (*eof) (void *user); // returns nonzero if we are at end of file/data +} stbi_io_callbacks; + +STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *comp, int req_comp); +STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *comp, int req_comp); +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *comp, int req_comp); + +#ifndef STBI_NO_STDIO +STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *comp, int req_comp); +// for stbi_load_from_file, file pointer is left pointing immediately after image +#endif + +#ifndef STBI_NO_LINEAR + STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *comp, int req_comp); + STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp); + STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp); + + #ifndef STBI_NO_STDIO + STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *comp, int req_comp); + #endif +#endif + +#ifndef STBI_NO_HDR + STBIDEF void stbi_hdr_to_ldr_gamma(float gamma); + STBIDEF void stbi_hdr_to_ldr_scale(float scale); +#endif // STBI_NO_HDR + +#ifndef STBI_NO_LINEAR + STBIDEF void stbi_ldr_to_hdr_gamma(float gamma); + STBIDEF void stbi_ldr_to_hdr_scale(float scale); +#endif // STBI_NO_LINEAR + +// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user); +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len); +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename); +STBIDEF int stbi_is_hdr_from_file(FILE *f); +#endif // STBI_NO_STDIO + + +// get a VERY brief reason for failure +// NOT THREADSAFE +STBIDEF const char *stbi_failure_reason (void); + +// free the loaded image -- this is just free() +STBIDEF void stbi_image_free (void *retval_from_stbi_load); + +// get image dimensions & components without fully decoding +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp); + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp); +STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp); + +#endif + + + +// for image formats that explicitly notate that they have premultiplied alpha, +// we just return the colors as stored in the file. set this flag to force +// unpremultiplication. results are undefined if the unpremultiply overflow. +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply); + +// indicate whether we should process iphone images back to canonical format, +// or just pass them through "as-is" +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert); + +// flip the image vertically, so the first pixel in the output array is the bottom left +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip); + +// ZLIB client - used by PNG, available for other purposes + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen); +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header); +STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + +STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen); +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen); + + +#ifdef __cplusplus +} +#endif + +// +// +//// end header file ///////////////////////////////////////////////////// +#endif // STBI_INCLUDE_STB_IMAGE_H + +#ifdef STB_IMAGE_IMPLEMENTATION + +#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \ + || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \ + || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \ + || defined(STBI_ONLY_ZLIB) + #ifndef STBI_ONLY_JPEG + #define STBI_NO_JPEG + #endif + #ifndef STBI_ONLY_PNG + #define STBI_NO_PNG + #endif + #ifndef STBI_ONLY_BMP + #define STBI_NO_BMP + #endif + #ifndef STBI_ONLY_PSD + #define STBI_NO_PSD + #endif + #ifndef STBI_ONLY_TGA + #define STBI_NO_TGA + #endif + #ifndef STBI_ONLY_GIF + #define STBI_NO_GIF + #endif + #ifndef STBI_ONLY_HDR + #define STBI_NO_HDR + #endif + #ifndef STBI_ONLY_PIC + #define STBI_NO_PIC + #endif + #ifndef STBI_ONLY_PNM + #define STBI_NO_PNM + #endif +#endif + +#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB) +#define STBI_NO_ZLIB +#endif + + +#include +#include // ptrdiff_t on osx +#include +#include + +#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR) +#include // ldexp +#endif + +#ifndef STBI_NO_STDIO +#include +#endif + +#ifndef STBI_ASSERT +#include +#define STBI_ASSERT(x) assert(x) +#endif + + +#ifndef _MSC_VER + #ifdef __cplusplus + #define stbi_inline inline + #else + #define stbi_inline + #endif +#else + #define stbi_inline __forceinline +#endif + + +#ifdef _MSC_VER +typedef unsigned short stbi__uint16; +typedef signed short stbi__int16; +typedef unsigned int stbi__uint32; +typedef signed int stbi__int32; +#else +#include +typedef uint16_t stbi__uint16; +typedef int16_t stbi__int16; +typedef uint32_t stbi__uint32; +typedef int32_t stbi__int32; +#endif + +// should produce compiler error if size is wrong +typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1]; + +#ifdef _MSC_VER +#define STBI_NOTUSED(v) (void)(v) +#else +#define STBI_NOTUSED(v) (void)sizeof(v) +#endif + +#ifdef _MSC_VER +#define STBI_HAS_LROTL +#endif + +#ifdef STBI_HAS_LROTL + #define stbi_lrot(x,y) _lrotl(x,y) +#else + #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y)))) +#endif + +#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED)) +// ok +#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED) +// ok +#else +#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)." +#endif + +#ifndef STBI_MALLOC +#define STBI_MALLOC(sz) malloc(sz) +#define STBI_REALLOC(p,newsz) realloc(p,newsz) +#define STBI_FREE(p) free(p) +#endif + +#ifndef STBI_REALLOC_SIZED +#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz) +#endif + +// x86/x64 detection +#if defined(__x86_64__) || defined(_M_X64) +#define STBI__X64_TARGET +#elif defined(__i386) || defined(_M_IX86) +#define STBI__X86_TARGET +#endif + +#if defined(__GNUC__) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET)) && !defined(__SSE2__) && !defined(STBI_NO_SIMD) +// NOTE: not clear do we actually need this for the 64-bit path? +// gcc doesn't support sse2 intrinsics unless you compile with -msse2, +// (but compiling with -msse2 allows the compiler to use SSE2 everywhere; +// this is just broken and gcc are jerks for not fixing it properly +// http://www.virtualdub.org/blog/pivot/entry.php?id=363 ) +#define STBI_NO_SIMD +#endif + +#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD) +// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET +// +// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the +// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant. +// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not +// simultaneously enabling "-mstackrealign". +// +// See https://github.com/nothings/stb/issues/81 for more information. +// +// So default to no SSE2 on 32-bit MinGW. If you've read this far and added +// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2. +#define STBI_NO_SIMD +#endif + +#if !defined(STBI_NO_SIMD) && defined(STBI__X86_TARGET) +#define STBI_SSE2 +#include + +#ifdef _MSC_VER + +#if _MSC_VER >= 1400 // not VC6 +#include // __cpuid +static int stbi__cpuid3(void) +{ + int info[4]; + __cpuid(info,1); + return info[3]; +} +#else +static int stbi__cpuid3(void) +{ + int res; + __asm { + mov eax,1 + cpuid + mov res,edx + } + return res; +} +#endif + +#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name + +static int stbi__sse2_available() +{ + int info3 = stbi__cpuid3(); + return ((info3 >> 26) & 1) != 0; +} +#else // assume GCC-style if not VC++ +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) + +static int stbi__sse2_available() +{ +#if defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 // GCC 4.8 or later + // GCC 4.8+ has a nice way to do this + return __builtin_cpu_supports("sse2"); +#else + // portable way to do this, preferably without using GCC inline ASM? + // just bail for now. + return 0; +#endif +} +#endif +#endif + +// ARM NEON +#if defined(STBI_NO_SIMD) && defined(STBI_NEON) +#undef STBI_NEON +#endif + +#ifdef STBI_NEON +#include +// assume GCC or Clang on ARM targets +#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16))) +#endif + +#ifndef STBI_SIMD_ALIGN +#define STBI_SIMD_ALIGN(type, name) type name +#endif + +/////////////////////////////////////////////// +// +// stbi__context struct and start_xxx functions + +// stbi__context structure is our basic context used by all images, so it +// contains all the IO context, plus some basic image information +typedef struct +{ + stbi__uint32 img_x, img_y; + int img_n, img_out_n; + + stbi_io_callbacks io; + void *io_user_data; + + int read_from_callbacks; + int buflen; + stbi_uc buffer_start[128]; + + stbi_uc *img_buffer, *img_buffer_end; + stbi_uc *img_buffer_original, *img_buffer_original_end; +} stbi__context; + + +static void stbi__refill_buffer(stbi__context *s); + +// initialize a memory-decode context +static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len) +{ + s->io.read = NULL; + s->read_from_callbacks = 0; + s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer; + s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len; +} + +// initialize a callback-based context +static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user) +{ + s->io = *c; + s->io_user_data = user; + s->buflen = sizeof(s->buffer_start); + s->read_from_callbacks = 1; + s->img_buffer_original = s->buffer_start; + stbi__refill_buffer(s); + s->img_buffer_original_end = s->img_buffer_end; +} + +#ifndef STBI_NO_STDIO + +static int stbi__stdio_read(void *user, char *data, int size) +{ + return (int) fread(data,1,size,(FILE*) user); +} + +static void stbi__stdio_skip(void *user, int n) +{ + fseek((FILE*) user, n, SEEK_CUR); +} + +static int stbi__stdio_eof(void *user) +{ + return feof((FILE*) user); +} + +static stbi_io_callbacks stbi__stdio_callbacks = +{ + stbi__stdio_read, + stbi__stdio_skip, + stbi__stdio_eof, +}; + +static void stbi__start_file(stbi__context *s, FILE *f) +{ + stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f); +} + +//static void stop_file(stbi__context *s) { } + +#endif // !STBI_NO_STDIO + +static void stbi__rewind(stbi__context *s) +{ + // conceptually rewind SHOULD rewind to the beginning of the stream, + // but we just rewind to the beginning of the initial buffer, because + // we only use it after doing 'test', which only ever looks at at most 92 bytes + s->img_buffer = s->img_buffer_original; + s->img_buffer_end = s->img_buffer_original_end; +} + +#ifndef STBI_NO_JPEG +static int stbi__jpeg_test(stbi__context *s); +static stbi_uc *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNG +static int stbi__png_test(stbi__context *s); +static stbi_uc *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_BMP +static int stbi__bmp_test(stbi__context *s); +static stbi_uc *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_TGA +static int stbi__tga_test(stbi__context *s); +static stbi_uc *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s); +static stbi_uc *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_HDR +static int stbi__hdr_test(stbi__context *s); +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_test(stbi__context *s); +static stbi_uc *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_GIF +static int stbi__gif_test(stbi__context *s); +static stbi_uc *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +#ifndef STBI_NO_PNM +static int stbi__pnm_test(stbi__context *s); +static stbi_uc *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp); +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp); +#endif + +// this is not threadsafe +static const char *stbi__g_failure_reason; + +STBIDEF const char *stbi_failure_reason(void) +{ + return stbi__g_failure_reason; +} + +static int stbi__err(const char *str) +{ + stbi__g_failure_reason = str; + return 0; +} + +static void *stbi__malloc(size_t size) +{ + return STBI_MALLOC(size); +} + +// stbi__err - error +// stbi__errpf - error returning pointer to float +// stbi__errpuc - error returning pointer to unsigned char + +#ifdef STBI_NO_FAILURE_STRINGS + #define stbi__err(x,y) 0 +#elif defined(STBI_FAILURE_USERMSG) + #define stbi__err(x,y) stbi__err(y) +#else + #define stbi__err(x,y) stbi__err(x) +#endif + +#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL)) +#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL)) + +STBIDEF void stbi_image_free(void *retval_from_stbi_load) +{ + STBI_FREE(retval_from_stbi_load); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp); +#endif + +#ifndef STBI_NO_HDR +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp); +#endif + +static int stbi__vertically_flip_on_load = 0; + +STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip) +{ + stbi__vertically_flip_on_load = flag_true_if_should_flip; +} + +static unsigned char *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp); + #endif + #ifndef STBI_NO_PNG + if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp); + #endif + #ifndef STBI_NO_BMP + if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp); + #endif + #ifndef STBI_NO_GIF + if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp); + #endif + #ifndef STBI_NO_PSD + if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp); + #endif + #ifndef STBI_NO_PIC + if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp); + #endif + #ifndef STBI_NO_PNM + if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp); + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr = stbi__hdr_load(s, x,y,comp,req_comp); + return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp); + } + #endif + + #ifndef STBI_NO_TGA + // test tga last because it's a crappy test! + if (stbi__tga_test(s)) + return stbi__tga_load(s,x,y,comp,req_comp); + #endif + + return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt"); +} + +static unsigned char *stbi__load_flip(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result = stbi__load_main(s, x, y, comp, req_comp); + + if (stbi__vertically_flip_on_load && result != NULL) { + int w = *x, h = *y; + int depth = req_comp ? req_comp : *comp; + int row,col,z; + stbi_uc temp; + + // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once + for (row = 0; row < (h>>1); row++) { + for (col = 0; col < w; col++) { + for (z = 0; z < depth; z++) { + temp = result[(row * w + col) * depth + z]; + result[(row * w + col) * depth + z] = result[((h - row - 1) * w + col) * depth + z]; + result[((h - row - 1) * w + col) * depth + z] = temp; + } + } + } + } + + return result; +} + +#ifndef STBI_NO_HDR +static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp) +{ + if (stbi__vertically_flip_on_load && result != NULL) { + int w = *x, h = *y; + int depth = req_comp ? req_comp : *comp; + int row,col,z; + float temp; + + // @OPTIMIZE: use a bigger temp buffer and memcpy multiple pixels at once + for (row = 0; row < (h>>1); row++) { + for (col = 0; col < w; col++) { + for (z = 0; z < depth; z++) { + temp = result[(row * w + col) * depth + z]; + result[(row * w + col) * depth + z] = result[((h - row - 1) * w + col) * depth + z]; + result[((h - row - 1) * w + col) * depth + z] = temp; + } + } + } + } +} +#endif + +#ifndef STBI_NO_STDIO + +static FILE *stbi__fopen(char const *filename, char const *mode) +{ + FILE *f; +#if defined(_MSC_VER) && _MSC_VER >= 1400 + if (0 != fopen_s(&f, filename, mode)) + f=0; +#else + f = fopen(filename, mode); +#endif + return f; +} + + +STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + unsigned char *result; + if (!f) return stbi__errpuc("can't fopen", "Unable to open file"); + result = stbi_load_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *result; + stbi__context s; + stbi__start_file(&s,f); + result = stbi__load_flip(&s,x,y,comp,req_comp); + if (result) { + // need to 'unget' all the characters in the IO buffer + fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR); + } + return result; +} +#endif //!STBI_NO_STDIO + +STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__load_flip(&s,x,y,comp,req_comp); +} + +STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__load_flip(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_LINEAR +static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + unsigned char *data; + #ifndef STBI_NO_HDR + if (stbi__hdr_test(s)) { + float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp); + if (hdr_data) + stbi__float_postprocess(hdr_data,x,y,comp,req_comp); + return hdr_data; + } + #endif + data = stbi__load_flip(s, x, y, comp, req_comp); + if (data) + return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp); + return stbi__errpf("unknown image type", "Image not of any known type, or corrupt"); +} + +STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} + +#ifndef STBI_NO_STDIO +STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp) +{ + float *result; + FILE *f = stbi__fopen(filename, "rb"); + if (!f) return stbi__errpf("can't fopen", "Unable to open file"); + result = stbi_loadf_from_file(f,x,y,comp,req_comp); + fclose(f); + return result; +} + +STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp) +{ + stbi__context s; + stbi__start_file(&s,f); + return stbi__loadf_main(&s,x,y,comp,req_comp); +} +#endif // !STBI_NO_STDIO + +#endif // !STBI_NO_LINEAR + +// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is +// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always +// reports false! + +STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(buffer); + STBI_NOTUSED(len); + return 0; + #endif +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_is_hdr (char const *filename) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result=0; + if (f) { + result = stbi_is_hdr_from_file(f); + fclose(f); + } + return result; +} + +STBIDEF int stbi_is_hdr_from_file(FILE *f) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_file(&s,f); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(f); + return 0; + #endif +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user) +{ + #ifndef STBI_NO_HDR + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user); + return stbi__hdr_test(&s); + #else + STBI_NOTUSED(clbk); + STBI_NOTUSED(user); + return 0; + #endif +} + +#ifndef STBI_NO_LINEAR +static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f; + +STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; } +STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; } +#endif + +static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f; + +STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; } +STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; } + + +////////////////////////////////////////////////////////////////////////////// +// +// Common code used by all image loaders +// + +enum +{ + STBI__SCAN_load=0, + STBI__SCAN_type, + STBI__SCAN_header +}; + +static void stbi__refill_buffer(stbi__context *s) +{ + int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen); + if (n == 0) { + // at end of file, treat same as if from memory, but need to handle case + // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file + s->read_from_callbacks = 0; + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start+1; + *s->img_buffer = 0; + } else { + s->img_buffer = s->buffer_start; + s->img_buffer_end = s->buffer_start + n; + } +} + +stbi_inline static stbi_uc stbi__get8(stbi__context *s) +{ + if (s->img_buffer < s->img_buffer_end) + return *s->img_buffer++; + if (s->read_from_callbacks) { + stbi__refill_buffer(s); + return *s->img_buffer++; + } + return 0; +} + +stbi_inline static int stbi__at_eof(stbi__context *s) +{ + if (s->io.read) { + if (!(s->io.eof)(s->io_user_data)) return 0; + // if feof() is true, check if buffer = end + // special case: we've only got the special 0 character at the end + if (s->read_from_callbacks == 0) return 1; + } + + return s->img_buffer >= s->img_buffer_end; +} + +static void stbi__skip(stbi__context *s, int n) +{ + if (n < 0) { + s->img_buffer = s->img_buffer_end; + return; + } + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + s->img_buffer = s->img_buffer_end; + (s->io.skip)(s->io_user_data, n - blen); + return; + } + } + s->img_buffer += n; +} + +static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n) +{ + if (s->io.read) { + int blen = (int) (s->img_buffer_end - s->img_buffer); + if (blen < n) { + int res, count; + + memcpy(buffer, s->img_buffer, blen); + + count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen); + res = (count == (n-blen)); + s->img_buffer = s->img_buffer_end; + return res; + } + } + + if (s->img_buffer+n <= s->img_buffer_end) { + memcpy(buffer, s->img_buffer, n); + s->img_buffer += n; + return 1; + } else + return 0; +} + +static int stbi__get16be(stbi__context *s) +{ + int z = stbi__get8(s); + return (z << 8) + stbi__get8(s); +} + +static stbi__uint32 stbi__get32be(stbi__context *s) +{ + stbi__uint32 z = stbi__get16be(s); + return (z << 16) + stbi__get16be(s); +} + +#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF) +// nothing +#else +static int stbi__get16le(stbi__context *s) +{ + int z = stbi__get8(s); + return z + (stbi__get8(s) << 8); +} +#endif + +#ifndef STBI_NO_BMP +static stbi__uint32 stbi__get32le(stbi__context *s) +{ + stbi__uint32 z = stbi__get16le(s); + return z + (stbi__get16le(s) << 16); +} +#endif + +#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings + + +////////////////////////////////////////////////////////////////////////////// +// +// generic converter from built-in img_n to req_comp +// individual types do this automatically as much as possible (e.g. jpeg +// does all cases internally since it needs to colorspace convert anyway, +// and it never has alpha, so very few cases ). png can automatically +// interleave an alpha=255 channel, but falls back to this for other cases +// +// assume data buffer is malloced, so malloc a new one and free that one +// only failure mode is malloc failing + +static stbi_uc stbi__compute_y(int r, int g, int b) +{ + return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8); +} + +static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y) +{ + int i,j; + unsigned char *good; + + if (req_comp == img_n) return data; + STBI_ASSERT(req_comp >= 1 && req_comp <= 4); + + good = (unsigned char *) stbi__malloc(req_comp * x * y); + if (good == NULL) { + STBI_FREE(data); + return stbi__errpuc("outofmem", "Out of memory"); + } + + for (j=0; j < (int) y; ++j) { + unsigned char *src = data + j * x * img_n ; + unsigned char *dest = good + j * x * req_comp; + + #define COMBO(a,b) ((a)*8+(b)) + #define CASE(a,b) case COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b) + // convert source image with img_n components to one with req_comp components; + // avoid switch per pixel, so use switch per scanline and massive macros + switch (COMBO(img_n, req_comp)) { + CASE(1,2) dest[0]=src[0], dest[1]=255; break; + CASE(1,3) dest[0]=dest[1]=dest[2]=src[0]; break; + CASE(1,4) dest[0]=dest[1]=dest[2]=src[0], dest[3]=255; break; + CASE(2,1) dest[0]=src[0]; break; + CASE(2,3) dest[0]=dest[1]=dest[2]=src[0]; break; + CASE(2,4) dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; break; + CASE(3,4) dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=255; break; + CASE(3,1) dest[0]=stbi__compute_y(src[0],src[1],src[2]); break; + CASE(3,2) dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = 255; break; + CASE(4,1) dest[0]=stbi__compute_y(src[0],src[1],src[2]); break; + CASE(4,2) dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = src[3]; break; + CASE(4,3) dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; break; + default: STBI_ASSERT(0); + } + #undef CASE + } + + STBI_FREE(data); + return good; +} + +#ifndef STBI_NO_LINEAR +static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp) +{ + int i,k,n; + float *output = (float *) stbi__malloc(x * y * comp * sizeof(float)); + if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale); + } + if (k < comp) output[i*comp + k] = data[i*comp+k]/255.0f; + } + STBI_FREE(data); + return output; +} +#endif + +#ifndef STBI_NO_HDR +#define stbi__float2int(x) ((int) (x)) +static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp) +{ + int i,k,n; + stbi_uc *output = (stbi_uc *) stbi__malloc(x * y * comp); + if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); } + // compute number of non-alpha components + if (comp & 1) n = comp; else n = comp-1; + for (i=0; i < x*y; ++i) { + for (k=0; k < n; ++k) { + float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + if (k < comp) { + float z = data[i*comp+k] * 255 + 0.5f; + if (z < 0) z = 0; + if (z > 255) z = 255; + output[i*comp + k] = (stbi_uc) stbi__float2int(z); + } + } + STBI_FREE(data); + return output; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// "baseline" JPEG/JFIF decoder +// +// simple implementation +// - doesn't support delayed output of y-dimension +// - simple interface (only one output format: 8-bit interleaved RGB) +// - doesn't try to recover corrupt jpegs +// - doesn't allow partial loading, loading multiple at once +// - still fast on x86 (copying globals into locals doesn't help x86) +// - allocates lots of intermediate memory (full size of all components) +// - non-interleaved case requires this anyway +// - allows good upsampling (see next) +// high-quality +// - upsampled channels are bilinearly interpolated, even across blocks +// - quality integer IDCT derived from IJG's 'slow' +// performance +// - fast huffman; reasonable integer IDCT +// - some SIMD kernels for common paths on targets with SSE2/NEON +// - uses a lot of intermediate memory, could cache poorly + +#ifndef STBI_NO_JPEG + +// huffman decoding acceleration +#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache + +typedef struct +{ + stbi_uc fast[1 << FAST_BITS]; + // weirdly, repacking this into AoS is a 10% speed loss, instead of a win + stbi__uint16 code[256]; + stbi_uc values[256]; + stbi_uc size[257]; + unsigned int maxcode[18]; + int delta[17]; // old 'firstsymbol' - old 'firstcode' +} stbi__huffman; + +typedef struct +{ + stbi__context *s; + stbi__huffman huff_dc[4]; + stbi__huffman huff_ac[4]; + stbi_uc dequant[4][64]; + stbi__int16 fast_ac[4][1 << FAST_BITS]; + +// sizes for components, interleaved MCUs + int img_h_max, img_v_max; + int img_mcu_x, img_mcu_y; + int img_mcu_w, img_mcu_h; + +// definition of jpeg image component + struct + { + int id; + int h,v; + int tq; + int hd,ha; + int dc_pred; + + int x,y,w2,h2; + stbi_uc *data; + void *raw_data, *raw_coeff; + stbi_uc *linebuf; + short *coeff; // progressive only + int coeff_w, coeff_h; // number of 8x8 coefficient blocks + } img_comp[4]; + + stbi__uint32 code_buffer; // jpeg entropy-coded buffer + int code_bits; // number of valid bits + unsigned char marker; // marker seen while filling entropy buffer + int nomore; // flag if we saw a marker so must stop + + int progressive; + int spec_start; + int spec_end; + int succ_high; + int succ_low; + int eob_run; + + int scan_n, order[4]; + int restart_interval, todo; + +// kernels + void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]); + void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step); + stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs); +} stbi__jpeg; + +static int stbi__build_huffman(stbi__huffman *h, int *count) +{ + int i,j,k=0,code; + // build size list for each symbol (from JPEG spec) + for (i=0; i < 16; ++i) + for (j=0; j < count[i]; ++j) + h->size[k++] = (stbi_uc) (i+1); + h->size[k] = 0; + + // compute actual symbols (from jpeg spec) + code = 0; + k = 0; + for(j=1; j <= 16; ++j) { + // compute delta to add to code to compute symbol id + h->delta[j] = k - code; + if (h->size[k] == j) { + while (h->size[k] == j) + h->code[k++] = (stbi__uint16) (code++); + if (code-1 >= (1 << j)) return stbi__err("bad code lengths","Corrupt JPEG"); + } + // compute largest code + 1 for this size, preshifted as needed later + h->maxcode[j] = code << (16-j); + code <<= 1; + } + h->maxcode[j] = 0xffffffff; + + // build non-spec acceleration table; 255 is flag for not-accelerated + memset(h->fast, 255, 1 << FAST_BITS); + for (i=0; i < k; ++i) { + int s = h->size[i]; + if (s <= FAST_BITS) { + int c = h->code[i] << (FAST_BITS-s); + int m = 1 << (FAST_BITS-s); + for (j=0; j < m; ++j) { + h->fast[c+j] = (stbi_uc) i; + } + } + } + return 1; +} + +// build a table that decodes both magnitude and value of small ACs in +// one go. +static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h) +{ + int i; + for (i=0; i < (1 << FAST_BITS); ++i) { + stbi_uc fast = h->fast[i]; + fast_ac[i] = 0; + if (fast < 255) { + int rs = h->values[fast]; + int run = (rs >> 4) & 15; + int magbits = rs & 15; + int len = h->size[fast]; + + if (magbits && len + magbits <= FAST_BITS) { + // magnitude code followed by receive_extend code + int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits); + int m = 1 << (magbits - 1); + if (k < m) k += (-1 << magbits) + 1; + // if the result is small enough, we can fit it in fast_ac table + if (k >= -128 && k <= 127) + fast_ac[i] = (stbi__int16) ((k << 8) + (run << 4) + (len + magbits)); + } + } + } +} + +static void stbi__grow_buffer_unsafe(stbi__jpeg *j) +{ + do { + int b = j->nomore ? 0 : stbi__get8(j->s); + if (b == 0xff) { + int c = stbi__get8(j->s); + if (c != 0) { + j->marker = (unsigned char) c; + j->nomore = 1; + return; + } + } + j->code_buffer |= b << (24 - j->code_bits); + j->code_bits += 8; + } while (j->code_bits <= 24); +} + +// (1 << n) - 1 +static stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535}; + +// decode a jpeg huffman value from the bitstream +stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) +{ + unsigned int temp; + int c,k; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + // look at the top FAST_BITS and determine what symbol ID it is, + // if the code is <= FAST_BITS + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + k = h->fast[c]; + if (k < 255) { + int s = h->size[k]; + if (s > j->code_bits) + return -1; + j->code_buffer <<= s; + j->code_bits -= s; + return h->values[k]; + } + + // naive test is to shift the code_buffer down so k bits are + // valid, then test against maxcode. To speed this up, we've + // preshifted maxcode left so that it has (16-k) 0s at the + // end; in other words, regardless of the number of bits, it + // wants to be compared against something shifted to have 16; + // that way we don't need to shift inside the loop. + temp = j->code_buffer >> 16; + for (k=FAST_BITS+1 ; ; ++k) + if (temp < h->maxcode[k]) + break; + if (k == 17) { + // error! code not found + j->code_bits -= 16; + return -1; + } + + if (k > j->code_bits) + return -1; + + // convert the huffman code to the symbol id + c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); + + // convert the id to a symbol + j->code_bits -= k; + j->code_buffer <<= k; + return h->values[c]; +} + +// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j); + + sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB + k = stbi_lrot(j->code_buffer, n); + STBI_ASSERT(n >= 0 && n < (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask))); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k + (stbi__jbias[n] & ~sgn); +} + +// get some unsigned bits +stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) +{ + unsigned int k; + if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + k = stbi_lrot(j->code_buffer, n); + j->code_buffer = k & ~stbi__bmask[n]; + k &= stbi__bmask[n]; + j->code_bits -= n; + return k; +} + +stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) +{ + unsigned int k; + if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + k = j->code_buffer; + j->code_buffer <<= 1; + --j->code_bits; + return k & 0x80000000; +} + +// given a value that's at position X in the zigzag stream, +// where does it appear in the 8x8 matrix coded as row-major? +static stbi_uc stbi__jpeg_dezigzag[64+15] = +{ + 0, 1, 8, 16, 9, 2, 3, 10, + 17, 24, 32, 25, 18, 11, 4, 5, + 12, 19, 26, 33, 40, 48, 41, 34, + 27, 20, 13, 6, 7, 14, 21, 28, + 35, 42, 49, 56, 57, 50, 43, 36, + 29, 22, 15, 23, 30, 37, 44, 51, + 58, 59, 52, 45, 38, 31, 39, 46, + 53, 60, 61, 54, 47, 55, 62, 63, + // let corrupt input sample past end + 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63 +}; + +// decode one 64-entry block-- +static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi_uc *dequant) +{ + int diff,dc,k; + int t; + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + t = stbi__jpeg_huff_decode(j, hdc); + if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + + // 0 all the ac values now so we can do it 32-bits at a time + memset(data,0,64*sizeof(data[0])); + + diff = t ? stbi__extend_receive(j, t) : 0; + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc * dequant[0]); + + // decode AC components, see JPEG spec + k = 1; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) * dequant[zig]); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (rs != 0xf0) break; // end block + k += 16; + } else { + k += r; + // decode into unzigzag'd location + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]); + } + } + } while (k < 64); + return 1; +} + +static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b) +{ + int diff,dc; + int t; + if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + + if (j->succ_high == 0) { + // first scan for DC coefficient, must be first + memset(data,0,64*sizeof(data[0])); // 0 all the ac values now + t = stbi__jpeg_huff_decode(j, hdc); + diff = t ? stbi__extend_receive(j, t) : 0; + + dc = j->img_comp[b].dc_pred + diff; + j->img_comp[b].dc_pred = dc; + data[0] = (short) (dc << j->succ_low); + } else { + // refinement scan for DC coefficient + if (stbi__jpeg_get_bit(j)) + data[0] += (short) (1 << j->succ_low); + } + return 1; +} + +// @OPTIMIZE: store non-zigzagged during the decode passes, +// and only de-zigzag when dequantizing +static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac) +{ + int k; + if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); + + if (j->succ_high == 0) { + int shift = j->succ_low; + + if (j->eob_run) { + --j->eob_run; + return 1; + } + + k = j->spec_start; + do { + unsigned int zig; + int c,r,s; + if (j->code_bits < 16) stbi__grow_buffer_unsafe(j); + c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1); + r = fac[c]; + if (r) { // fast-AC path + k += (r >> 4) & 15; // run + s = r & 15; // combined length + j->code_buffer <<= s; + j->code_bits -= s; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) ((r >> 8) << shift); + } else { + int rs = stbi__jpeg_huff_decode(j, hac); + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r); + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + --j->eob_run; + break; + } + k += 16; + } else { + k += r; + zig = stbi__jpeg_dezigzag[k++]; + data[zig] = (short) (stbi__extend_receive(j,s) << shift); + } + } + } while (k <= j->spec_end); + } else { + // refinement scan for these AC coefficients + + short bit = (short) (1 << j->succ_low); + + if (j->eob_run) { + --j->eob_run; + for (k = j->spec_start; k <= j->spec_end; ++k) { + short *p = &data[stbi__jpeg_dezigzag[k]]; + if (*p != 0) + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } + } else { + k = j->spec_start; + do { + int r,s; + int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh + if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG"); + s = rs & 15; + r = rs >> 4; + if (s == 0) { + if (r < 15) { + j->eob_run = (1 << r) - 1; + if (r) + j->eob_run += stbi__jpeg_get_bits(j, r); + r = 64; // force end of block + } else { + // r=15 s=0 should write 16 0s, so we just do + // a run of 15 0s and then write s (which is 0), + // so we don't have to do anything special here + } + } else { + if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG"); + // sign bit + if (stbi__jpeg_get_bit(j)) + s = bit; + else + s = -bit; + } + + // advance by r + while (k <= j->spec_end) { + short *p = &data[stbi__jpeg_dezigzag[k++]]; + if (*p != 0) { + if (stbi__jpeg_get_bit(j)) + if ((*p & bit)==0) { + if (*p > 0) + *p += bit; + else + *p -= bit; + } + } else { + if (r == 0) { + *p = (short) s; + break; + } + --r; + } + } + } while (k <= j->spec_end); + } + } + return 1; +} + +// take a -128..127 value and stbi__clamp it and convert to 0..255 +stbi_inline static stbi_uc stbi__clamp(int x) +{ + // trick to use a single test to catch both cases + if ((unsigned int) x > 255) { + if (x < 0) return 0; + if (x > 255) return 255; + } + return (stbi_uc) x; +} + +#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5))) +#define stbi__fsh(x) ((x) << 12) + +// derived from jidctint -- DCT_ISLOW +#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \ + int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \ + p2 = s2; \ + p3 = s6; \ + p1 = (p2+p3) * stbi__f2f(0.5411961f); \ + t2 = p1 + p3*stbi__f2f(-1.847759065f); \ + t3 = p1 + p2*stbi__f2f( 0.765366865f); \ + p2 = s0; \ + p3 = s4; \ + t0 = stbi__fsh(p2+p3); \ + t1 = stbi__fsh(p2-p3); \ + x0 = t0+t3; \ + x3 = t0-t3; \ + x1 = t1+t2; \ + x2 = t1-t2; \ + t0 = s7; \ + t1 = s5; \ + t2 = s3; \ + t3 = s1; \ + p3 = t0+t2; \ + p4 = t1+t3; \ + p1 = t0+t3; \ + p2 = t1+t2; \ + p5 = (p3+p4)*stbi__f2f( 1.175875602f); \ + t0 = t0*stbi__f2f( 0.298631336f); \ + t1 = t1*stbi__f2f( 2.053119869f); \ + t2 = t2*stbi__f2f( 3.072711026f); \ + t3 = t3*stbi__f2f( 1.501321110f); \ + p1 = p5 + p1*stbi__f2f(-0.899976223f); \ + p2 = p5 + p2*stbi__f2f(-2.562915447f); \ + p3 = p3*stbi__f2f(-1.961570560f); \ + p4 = p4*stbi__f2f(-0.390180644f); \ + t3 += p1+p4; \ + t2 += p2+p3; \ + t1 += p2+p4; \ + t0 += p1+p3; + +static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64]) +{ + int i,val[64],*v=val; + stbi_uc *o; + short *d = data; + + // columns + for (i=0; i < 8; ++i,++d, ++v) { + // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing + if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0 + && d[40]==0 && d[48]==0 && d[56]==0) { + // no shortcut 0 seconds + // (1|2|3|4|5|6|7)==0 0 seconds + // all separate -0.047 seconds + // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds + int dcterm = d[0] << 2; + v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm; + } else { + STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56]) + // constants scaled things up by 1<<12; let's bring them back + // down, but keep 2 extra bits of precision + x0 += 512; x1 += 512; x2 += 512; x3 += 512; + v[ 0] = (x0+t3) >> 10; + v[56] = (x0-t3) >> 10; + v[ 8] = (x1+t2) >> 10; + v[48] = (x1-t2) >> 10; + v[16] = (x2+t1) >> 10; + v[40] = (x2-t1) >> 10; + v[24] = (x3+t0) >> 10; + v[32] = (x3-t0) >> 10; + } + } + + for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) { + // no fast case since the first 1D IDCT spread components out + STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]) + // constants scaled things up by 1<<12, plus we had 1<<2 from first + // loop, plus horizontal and vertical each scale by sqrt(8) so together + // we've got an extra 1<<3, so 1<<17 total we need to remove. + // so we want to round that, which means adding 0.5 * 1<<17, + // aka 65536. Also, we'll end up with -128 to 127 that we want + // to encode as 0..255 by adding 128, so we'll add that before the shift + x0 += 65536 + (128<<17); + x1 += 65536 + (128<<17); + x2 += 65536 + (128<<17); + x3 += 65536 + (128<<17); + // tried computing the shifts into temps, or'ing the temps to see + // if any were out of range, but that was slower + o[0] = stbi__clamp((x0+t3) >> 17); + o[7] = stbi__clamp((x0-t3) >> 17); + o[1] = stbi__clamp((x1+t2) >> 17); + o[6] = stbi__clamp((x1-t2) >> 17); + o[2] = stbi__clamp((x2+t1) >> 17); + o[5] = stbi__clamp((x2-t1) >> 17); + o[3] = stbi__clamp((x3+t0) >> 17); + o[4] = stbi__clamp((x3-t0) >> 17); + } +} + +#ifdef STBI_SSE2 +// sse2 integer IDCT. not the fastest possible implementation but it +// produces bit-identical results to the generic C version so it's +// fully "transparent". +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + // This is constructed to match our regular (generic) integer IDCT exactly. + __m128i row0, row1, row2, row3, row4, row5, row6, row7; + __m128i tmp; + + // dot product constant: even elems=x, odd elems=y + #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y)) + + // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit) + // out(1) = c1[even]*x + c1[odd]*y + #define dct_rot(out0,out1, x,y,c0,c1) \ + __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \ + __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \ + __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \ + __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \ + __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \ + __m128i out1##_h = _mm_madd_epi16(c0##hi, c1) + + // out = in << 12 (in 16-bit, out 32-bit) + #define dct_widen(out, in) \ + __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \ + __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4) + + // wide add + #define dct_wadd(out, a, b) \ + __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_add_epi32(a##_h, b##_h) + + // wide sub + #define dct_wsub(out, a, b) \ + __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \ + __m128i out##_h = _mm_sub_epi32(a##_h, b##_h) + + // butterfly a/b, add bias, then shift by "s" and pack + #define dct_bfly32o(out0, out1, a,b,bias,s) \ + { \ + __m128i abiased_l = _mm_add_epi32(a##_l, bias); \ + __m128i abiased_h = _mm_add_epi32(a##_h, bias); \ + dct_wadd(sum, abiased, b); \ + dct_wsub(dif, abiased, b); \ + out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \ + out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \ + } + + // 8-bit interleave step (for transposes) + #define dct_interleave8(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi8(a, b); \ + b = _mm_unpackhi_epi8(tmp, b) + + // 16-bit interleave step (for transposes) + #define dct_interleave16(a, b) \ + tmp = a; \ + a = _mm_unpacklo_epi16(a, b); \ + b = _mm_unpackhi_epi16(tmp, b) + + #define dct_pass(bias,shift) \ + { \ + /* even part */ \ + dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \ + __m128i sum04 = _mm_add_epi16(row0, row4); \ + __m128i dif04 = _mm_sub_epi16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \ + dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \ + __m128i sum17 = _mm_add_epi16(row1, row7); \ + __m128i sum35 = _mm_add_epi16(row3, row5); \ + dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \ + dct_wadd(x4, y0o, y4o); \ + dct_wadd(x5, y1o, y5o); \ + dct_wadd(x6, y2o, y5o); \ + dct_wadd(x7, y3o, y4o); \ + dct_bfly32o(row0,row7, x0,x7,bias,shift); \ + dct_bfly32o(row1,row6, x1,x6,bias,shift); \ + dct_bfly32o(row2,row5, x2,x5,bias,shift); \ + dct_bfly32o(row3,row4, x3,x4,bias,shift); \ + } + + __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f)); + __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f)); + __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f)); + __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f)); + __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f)); + __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f)); + __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f)); + __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f)); + + // rounding biases in column/row passes, see stbi__idct_block for explanation. + __m128i bias_0 = _mm_set1_epi32(512); + __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17)); + + // load + row0 = _mm_load_si128((const __m128i *) (data + 0*8)); + row1 = _mm_load_si128((const __m128i *) (data + 1*8)); + row2 = _mm_load_si128((const __m128i *) (data + 2*8)); + row3 = _mm_load_si128((const __m128i *) (data + 3*8)); + row4 = _mm_load_si128((const __m128i *) (data + 4*8)); + row5 = _mm_load_si128((const __m128i *) (data + 5*8)); + row6 = _mm_load_si128((const __m128i *) (data + 6*8)); + row7 = _mm_load_si128((const __m128i *) (data + 7*8)); + + // column pass + dct_pass(bias_0, 10); + + { + // 16bit 8x8 transpose pass 1 + dct_interleave16(row0, row4); + dct_interleave16(row1, row5); + dct_interleave16(row2, row6); + dct_interleave16(row3, row7); + + // transpose pass 2 + dct_interleave16(row0, row2); + dct_interleave16(row1, row3); + dct_interleave16(row4, row6); + dct_interleave16(row5, row7); + + // transpose pass 3 + dct_interleave16(row0, row1); + dct_interleave16(row2, row3); + dct_interleave16(row4, row5); + dct_interleave16(row6, row7); + } + + // row pass + dct_pass(bias_1, 17); + + { + // pack + __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7 + __m128i p1 = _mm_packus_epi16(row2, row3); + __m128i p2 = _mm_packus_epi16(row4, row5); + __m128i p3 = _mm_packus_epi16(row6, row7); + + // 8bit 8x8 transpose pass 1 + dct_interleave8(p0, p2); // a0e0a1e1... + dct_interleave8(p1, p3); // c0g0c1g1... + + // transpose pass 2 + dct_interleave8(p0, p1); // a0c0e0g0... + dct_interleave8(p2, p3); // b0d0f0h0... + + // transpose pass 3 + dct_interleave8(p0, p2); // a0b0c0d0... + dct_interleave8(p1, p3); // a4b4c4d4... + + // store + _mm_storel_epi64((__m128i *) out, p0); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p2); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p1); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride; + _mm_storel_epi64((__m128i *) out, p3); out += out_stride; + _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e)); + } + +#undef dct_const +#undef dct_rot +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_interleave8 +#undef dct_interleave16 +#undef dct_pass +} + +#endif // STBI_SSE2 + +#ifdef STBI_NEON + +// NEON integer IDCT. should produce bit-identical +// results to the generic C version. +static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64]) +{ + int16x8_t row0, row1, row2, row3, row4, row5, row6, row7; + + int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f)); + int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f)); + int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f)); + int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f)); + int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f)); + int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f)); + int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f)); + int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f)); + int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f)); + int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f)); + int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f)); + int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f)); + +#define dct_long_mul(out, inq, coeff) \ + int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff) + +#define dct_long_mac(out, acc, inq, coeff) \ + int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \ + int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff) + +#define dct_widen(out, inq) \ + int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \ + int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12) + +// wide add +#define dct_wadd(out, a, b) \ + int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vaddq_s32(a##_h, b##_h) + +// wide sub +#define dct_wsub(out, a, b) \ + int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \ + int32x4_t out##_h = vsubq_s32(a##_h, b##_h) + +// butterfly a/b, then shift using "shiftop" by "s" and pack +#define dct_bfly32o(out0,out1, a,b,shiftop,s) \ + { \ + dct_wadd(sum, a, b); \ + dct_wsub(dif, a, b); \ + out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \ + out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \ + } + +#define dct_pass(shiftop, shift) \ + { \ + /* even part */ \ + int16x8_t sum26 = vaddq_s16(row2, row6); \ + dct_long_mul(p1e, sum26, rot0_0); \ + dct_long_mac(t2e, p1e, row6, rot0_1); \ + dct_long_mac(t3e, p1e, row2, rot0_2); \ + int16x8_t sum04 = vaddq_s16(row0, row4); \ + int16x8_t dif04 = vsubq_s16(row0, row4); \ + dct_widen(t0e, sum04); \ + dct_widen(t1e, dif04); \ + dct_wadd(x0, t0e, t3e); \ + dct_wsub(x3, t0e, t3e); \ + dct_wadd(x1, t1e, t2e); \ + dct_wsub(x2, t1e, t2e); \ + /* odd part */ \ + int16x8_t sum15 = vaddq_s16(row1, row5); \ + int16x8_t sum17 = vaddq_s16(row1, row7); \ + int16x8_t sum35 = vaddq_s16(row3, row5); \ + int16x8_t sum37 = vaddq_s16(row3, row7); \ + int16x8_t sumodd = vaddq_s16(sum17, sum35); \ + dct_long_mul(p5o, sumodd, rot1_0); \ + dct_long_mac(p1o, p5o, sum17, rot1_1); \ + dct_long_mac(p2o, p5o, sum35, rot1_2); \ + dct_long_mul(p3o, sum37, rot2_0); \ + dct_long_mul(p4o, sum15, rot2_1); \ + dct_wadd(sump13o, p1o, p3o); \ + dct_wadd(sump24o, p2o, p4o); \ + dct_wadd(sump23o, p2o, p3o); \ + dct_wadd(sump14o, p1o, p4o); \ + dct_long_mac(x4, sump13o, row7, rot3_0); \ + dct_long_mac(x5, sump24o, row5, rot3_1); \ + dct_long_mac(x6, sump23o, row3, rot3_2); \ + dct_long_mac(x7, sump14o, row1, rot3_3); \ + dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \ + dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \ + dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \ + dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \ + } + + // load + row0 = vld1q_s16(data + 0*8); + row1 = vld1q_s16(data + 1*8); + row2 = vld1q_s16(data + 2*8); + row3 = vld1q_s16(data + 3*8); + row4 = vld1q_s16(data + 4*8); + row5 = vld1q_s16(data + 5*8); + row6 = vld1q_s16(data + 6*8); + row7 = vld1q_s16(data + 7*8); + + // add DC bias + row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0)); + + // column pass + dct_pass(vrshrn_n_s32, 10); + + // 16bit 8x8 transpose + { +// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively. +// whether compilers actually get this is another story, sadly. +#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); } +#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); } + + // pass 1 + dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6 + dct_trn16(row2, row3); + dct_trn16(row4, row5); + dct_trn16(row6, row7); + + // pass 2 + dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4 + dct_trn32(row1, row3); + dct_trn32(row4, row6); + dct_trn32(row5, row7); + + // pass 3 + dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0 + dct_trn64(row1, row5); + dct_trn64(row2, row6); + dct_trn64(row3, row7); + +#undef dct_trn16 +#undef dct_trn32 +#undef dct_trn64 + } + + // row pass + // vrshrn_n_s32 only supports shifts up to 16, we need + // 17. so do a non-rounding shift of 16 first then follow + // up with a rounding shift by 1. + dct_pass(vshrn_n_s32, 16); + + { + // pack and round + uint8x8_t p0 = vqrshrun_n_s16(row0, 1); + uint8x8_t p1 = vqrshrun_n_s16(row1, 1); + uint8x8_t p2 = vqrshrun_n_s16(row2, 1); + uint8x8_t p3 = vqrshrun_n_s16(row3, 1); + uint8x8_t p4 = vqrshrun_n_s16(row4, 1); + uint8x8_t p5 = vqrshrun_n_s16(row5, 1); + uint8x8_t p6 = vqrshrun_n_s16(row6, 1); + uint8x8_t p7 = vqrshrun_n_s16(row7, 1); + + // again, these can translate into one instruction, but often don't. +#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; } +#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); } +#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); } + + // sadly can't use interleaved stores here since we only write + // 8 bytes to each scan line! + + // 8x8 8-bit transpose pass 1 + dct_trn8_8(p0, p1); + dct_trn8_8(p2, p3); + dct_trn8_8(p4, p5); + dct_trn8_8(p6, p7); + + // pass 2 + dct_trn8_16(p0, p2); + dct_trn8_16(p1, p3); + dct_trn8_16(p4, p6); + dct_trn8_16(p5, p7); + + // pass 3 + dct_trn8_32(p0, p4); + dct_trn8_32(p1, p5); + dct_trn8_32(p2, p6); + dct_trn8_32(p3, p7); + + // store + vst1_u8(out, p0); out += out_stride; + vst1_u8(out, p1); out += out_stride; + vst1_u8(out, p2); out += out_stride; + vst1_u8(out, p3); out += out_stride; + vst1_u8(out, p4); out += out_stride; + vst1_u8(out, p5); out += out_stride; + vst1_u8(out, p6); out += out_stride; + vst1_u8(out, p7); + +#undef dct_trn8_8 +#undef dct_trn8_16 +#undef dct_trn8_32 + } + +#undef dct_long_mul +#undef dct_long_mac +#undef dct_widen +#undef dct_wadd +#undef dct_wsub +#undef dct_bfly32o +#undef dct_pass +} + +#endif // STBI_NEON + +#define STBI__MARKER_none 0xff +// if there's a pending marker from the entropy stream, return that +// otherwise, fetch from the stream and get a marker. if there's no +// marker, return 0xff, which is never a valid marker value +static stbi_uc stbi__get_marker(stbi__jpeg *j) +{ + stbi_uc x; + if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; } + x = stbi__get8(j->s); + if (x != 0xff) return STBI__MARKER_none; + while (x == 0xff) + x = stbi__get8(j->s); + return x; +} + +// in each scan, we'll have scan_n components, and the order +// of the components is specified by order[] +#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7) + +// after a restart interval, stbi__jpeg_reset the entropy decoder and +// the dc prediction +static void stbi__jpeg_reset(stbi__jpeg *j) +{ + j->code_bits = 0; + j->code_buffer = 0; + j->nomore = 0; + j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = 0; + j->marker = STBI__MARKER_none; + j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff; + j->eob_run = 0; + // no more than 1<<31 MCUs if no restart_interal? that's plenty safe, + // since we don't even allow 1<<30 pixels +} + +static int stbi__parse_entropy_coded_data(stbi__jpeg *z) +{ + stbi__jpeg_reset(z); + if (!z->progressive) { + if (z->scan_n == 1) { + int i,j; + STBI_SIMD_ALIGN(short, data[64]); + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + // if it's NOT a restart, then just bail, so we get corrupt data + // rather than no data + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + STBI_SIMD_ALIGN(short, data[64]); + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x)*8; + int y2 = (j*z->img_comp[n].v + y)*8; + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0; + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data); + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } else { + if (z->scan_n == 1) { + int i,j; + int n = z->order[0]; + // non-interleaved data, we just need to process one block at a time, + // in trivial scanline order + // number of blocks to do just depends on how many actual "pixels" this + // component has, independent of interleaved MCU blocking and such + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + if (z->spec_start == 0) { + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } else { + int ha = z->img_comp[n].ha; + if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha])) + return 0; + } + // every data block is an MCU, so countdown the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } else { // interleaved + int i,j,k,x,y; + for (j=0; j < z->img_mcu_y; ++j) { + for (i=0; i < z->img_mcu_x; ++i) { + // scan an interleaved mcu... process scan_n components in order + for (k=0; k < z->scan_n; ++k) { + int n = z->order[k]; + // scan out an mcu's worth of this component; that's just determined + // by the basic H and V specified for the component + for (y=0; y < z->img_comp[n].v; ++y) { + for (x=0; x < z->img_comp[n].h; ++x) { + int x2 = (i*z->img_comp[n].h + x); + int y2 = (j*z->img_comp[n].v + y); + short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w); + if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n)) + return 0; + } + } + } + // after all interleaved components, that's an interleaved MCU, + // so now count down the restart interval + if (--z->todo <= 0) { + if (z->code_bits < 24) stbi__grow_buffer_unsafe(z); + if (!STBI__RESTART(z->marker)) return 1; + stbi__jpeg_reset(z); + } + } + } + return 1; + } + } +} + +static void stbi__jpeg_dequantize(short *data, stbi_uc *dequant) +{ + int i; + for (i=0; i < 64; ++i) + data[i] *= dequant[i]; +} + +static void stbi__jpeg_finish(stbi__jpeg *z) +{ + if (z->progressive) { + // dequantize and idct the data + int i,j,n; + for (n=0; n < z->s->img_n; ++n) { + int w = (z->img_comp[n].x+7) >> 3; + int h = (z->img_comp[n].y+7) >> 3; + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) { + short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w); + stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]); + z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data); + } + } + } + } +} + +static int stbi__process_marker(stbi__jpeg *z, int m) +{ + int L; + switch (m) { + case STBI__MARKER_none: // no marker found + return stbi__err("expected marker","Corrupt JPEG"); + + case 0xDD: // DRI - specify restart interval + if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG"); + z->restart_interval = stbi__get16be(z->s); + return 1; + + case 0xDB: // DQT - define quantization table + L = stbi__get16be(z->s)-2; + while (L > 0) { + int q = stbi__get8(z->s); + int p = q >> 4; + int t = q & 15,i; + if (p != 0) return stbi__err("bad DQT type","Corrupt JPEG"); + if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG"); + for (i=0; i < 64; ++i) + z->dequant[t][stbi__jpeg_dezigzag[i]] = stbi__get8(z->s); + L -= 65; + } + return L==0; + + case 0xC4: // DHT - define huffman table + L = stbi__get16be(z->s)-2; + while (L > 0) { + stbi_uc *v; + int sizes[16],i,n=0; + int q = stbi__get8(z->s); + int tc = q >> 4; + int th = q & 15; + if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG"); + for (i=0; i < 16; ++i) { + sizes[i] = stbi__get8(z->s); + n += sizes[i]; + } + L -= 17; + if (tc == 0) { + if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; + v = z->huff_dc[th].values; + } else { + if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0; + v = z->huff_ac[th].values; + } + for (i=0; i < n; ++i) + v[i] = stbi__get8(z->s); + if (tc != 0) + stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th); + L -= n; + } + return L==0; + } + // check for comment block or APP blocks + if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) { + stbi__skip(z->s, stbi__get16be(z->s)-2); + return 1; + } + return 0; +} + +// after we see SOS +static int stbi__process_scan_header(stbi__jpeg *z) +{ + int i; + int Ls = stbi__get16be(z->s); + z->scan_n = stbi__get8(z->s); + if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG"); + if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG"); + for (i=0; i < z->scan_n; ++i) { + int id = stbi__get8(z->s), which; + int q = stbi__get8(z->s); + for (which = 0; which < z->s->img_n; ++which) + if (z->img_comp[which].id == id) + break; + if (which == z->s->img_n) return 0; // no match + z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG"); + z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG"); + z->order[i] = which; + } + + { + int aa; + z->spec_start = stbi__get8(z->s); + z->spec_end = stbi__get8(z->s); // should be 63, but might be 0 + aa = stbi__get8(z->s); + z->succ_high = (aa >> 4); + z->succ_low = (aa & 15); + if (z->progressive) { + if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13) + return stbi__err("bad SOS", "Corrupt JPEG"); + } else { + if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG"); + if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG"); + z->spec_end = 63; + } + } + + return 1; +} + +static int stbi__process_frame_header(stbi__jpeg *z, int scan) +{ + stbi__context *s = z->s; + int Lf,p,i,q, h_max=1,v_max=1,c; + Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG + p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline + s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG + s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires + c = stbi__get8(s); + if (c != 3 && c != 1) return stbi__err("bad component count","Corrupt JPEG"); // JFIF requires + s->img_n = c; + for (i=0; i < c; ++i) { + z->img_comp[i].data = NULL; + z->img_comp[i].linebuf = NULL; + } + + if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG"); + + for (i=0; i < s->img_n; ++i) { + z->img_comp[i].id = stbi__get8(s); + if (z->img_comp[i].id != i+1) // JFIF requires + if (z->img_comp[i].id != i) // some version of jpegtran outputs non-JFIF-compliant files! + return stbi__err("bad component ID","Corrupt JPEG"); + q = stbi__get8(s); + z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG"); + z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG"); + z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG"); + } + + if (scan != STBI__SCAN_load) return 1; + + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + + for (i=0; i < s->img_n; ++i) { + if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h; + if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v; + } + + // compute interleaved mcu info + z->img_h_max = h_max; + z->img_v_max = v_max; + z->img_mcu_w = h_max * 8; + z->img_mcu_h = v_max * 8; + z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w; + z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h; + + for (i=0; i < s->img_n; ++i) { + // number of effective pixels (e.g. for non-interleaved MCU) + z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max; + z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max; + // to simplify generation, we'll allocate enough memory to decode + // the bogus oversized data from using interleaved MCUs and their + // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't + // discard the extra data until colorspace conversion + z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8; + z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8; + z->img_comp[i].raw_data = stbi__malloc(z->img_comp[i].w2 * z->img_comp[i].h2+15); + + if (z->img_comp[i].raw_data == NULL) { + for(--i; i >= 0; --i) { + STBI_FREE(z->img_comp[i].raw_data); + z->img_comp[i].raw_data = NULL; + } + return stbi__err("outofmem", "Out of memory"); + } + // align blocks for idct using mmx/sse + z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15); + z->img_comp[i].linebuf = NULL; + if (z->progressive) { + z->img_comp[i].coeff_w = (z->img_comp[i].w2 + 7) >> 3; + z->img_comp[i].coeff_h = (z->img_comp[i].h2 + 7) >> 3; + z->img_comp[i].raw_coeff = STBI_MALLOC(z->img_comp[i].coeff_w * z->img_comp[i].coeff_h * 64 * sizeof(short) + 15); + z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15); + } else { + z->img_comp[i].coeff = 0; + z->img_comp[i].raw_coeff = 0; + } + } + + return 1; +} + +// use comparisons since in some cases we handle more than one case (e.g. SOF) +#define stbi__DNL(x) ((x) == 0xdc) +#define stbi__SOI(x) ((x) == 0xd8) +#define stbi__EOI(x) ((x) == 0xd9) +#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2) +#define stbi__SOS(x) ((x) == 0xda) + +#define stbi__SOF_progressive(x) ((x) == 0xc2) + +static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) +{ + int m; + z->marker = STBI__MARKER_none; // initialize cached marker to empty + m = stbi__get_marker(z); + if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG"); + if (scan == STBI__SCAN_type) return 1; + m = stbi__get_marker(z); + while (!stbi__SOF(m)) { + if (!stbi__process_marker(z,m)) return 0; + m = stbi__get_marker(z); + while (m == STBI__MARKER_none) { + // some files have extra padding after their blocks, so ok, we'll scan + if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG"); + m = stbi__get_marker(z); + } + } + z->progressive = stbi__SOF_progressive(m); + if (!stbi__process_frame_header(z, scan)) return 0; + return 1; +} + +// decode image to YCbCr format +static int stbi__decode_jpeg_image(stbi__jpeg *j) +{ + int m; + for (m = 0; m < 4; m++) { + j->img_comp[m].raw_data = NULL; + j->img_comp[m].raw_coeff = NULL; + } + j->restart_interval = 0; + if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0; + m = stbi__get_marker(j); + while (!stbi__EOI(m)) { + if (stbi__SOS(m)) { + if (!stbi__process_scan_header(j)) return 0; + if (!stbi__parse_entropy_coded_data(j)) return 0; + if (j->marker == STBI__MARKER_none ) { + // handle 0s at the end of image data from IP Kamera 9060 + while (!stbi__at_eof(j->s)) { + int x = stbi__get8(j->s); + if (x == 255) { + j->marker = stbi__get8(j->s); + break; + } else if (x != 0) { + return stbi__err("junk before marker", "Corrupt JPEG"); + } + } + // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 + } + } else { + if (!stbi__process_marker(j, m)) return 0; + } + m = stbi__get_marker(j); + } + if (j->progressive) + stbi__jpeg_finish(j); + return 1; +} + +// static jfif-centered resampling (across block boundaries) + +typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1, + int w, int hs); + +#define stbi__div4(x) ((stbi_uc) ((x) >> 2)) + +static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + STBI_NOTUSED(out); + STBI_NOTUSED(in_far); + STBI_NOTUSED(w); + STBI_NOTUSED(hs); + return in_near; +} + +static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples vertically for every one in input + int i; + STBI_NOTUSED(hs); + for (i=0; i < w; ++i) + out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2); + return out; +} + +static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate two samples horizontally for every one in input + int i; + stbi_uc *input = in_near; + + if (w == 1) { + // if only one sample, can't do any interpolation + out[0] = out[1] = input[0]; + return out; + } + + out[0] = input[0]; + out[1] = stbi__div4(input[0]*3 + input[1] + 2); + for (i=1; i < w-1; ++i) { + int n = 3*input[i]+2; + out[i*2+0] = stbi__div4(n+input[i-1]); + out[i*2+1] = stbi__div4(n+input[i+1]); + } + out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2); + out[i*2+1] = input[w-1]; + + STBI_NOTUSED(in_far); + STBI_NOTUSED(hs); + + return out; +} + +#define stbi__div16(x) ((stbi_uc) ((x) >> 4)) + +static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i,t0,t1; + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + out[0] = stbi__div4(t1+2); + for (i=1; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // need to generate 2x2 samples for every one in input + int i=0,t0,t1; + + if (w == 1) { + out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2); + return out; + } + + t1 = 3*in_near[0] + in_far[0]; + // process groups of 8 pixels for as long as we can. + // note we can't handle the last pixel in a row in this loop + // because we need to handle the filter boundary conditions. + for (; i < ((w-1) & ~7); i += 8) { +#if defined(STBI_SSE2) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + __m128i zero = _mm_setzero_si128(); + __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i)); + __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i)); + __m128i farw = _mm_unpacklo_epi8(farb, zero); + __m128i nearw = _mm_unpacklo_epi8(nearb, zero); + __m128i diff = _mm_sub_epi16(farw, nearw); + __m128i nears = _mm_slli_epi16(nearw, 2); + __m128i curr = _mm_add_epi16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + __m128i prv0 = _mm_slli_si128(curr, 2); + __m128i nxt0 = _mm_srli_si128(curr, 2); + __m128i prev = _mm_insert_epi16(prv0, t1, 0); + __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + __m128i bias = _mm_set1_epi16(8); + __m128i curs = _mm_slli_epi16(curr, 2); + __m128i prvd = _mm_sub_epi16(prev, curr); + __m128i nxtd = _mm_sub_epi16(next, curr); + __m128i curb = _mm_add_epi16(curs, bias); + __m128i even = _mm_add_epi16(prvd, curb); + __m128i odd = _mm_add_epi16(nxtd, curb); + + // interleave even and odd pixels, then undo scaling. + __m128i int0 = _mm_unpacklo_epi16(even, odd); + __m128i int1 = _mm_unpackhi_epi16(even, odd); + __m128i de0 = _mm_srli_epi16(int0, 4); + __m128i de1 = _mm_srli_epi16(int1, 4); + + // pack and write output + __m128i outv = _mm_packus_epi16(de0, de1); + _mm_storeu_si128((__m128i *) (out + i*2), outv); +#elif defined(STBI_NEON) + // load and perform the vertical filtering pass + // this uses 3*x + y = 4*x + (y - x) + uint8x8_t farb = vld1_u8(in_far + i); + uint8x8_t nearb = vld1_u8(in_near + i); + int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb)); + int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2)); + int16x8_t curr = vaddq_s16(nears, diff); // current row + + // horizontal filter works the same based on shifted vers of current + // row. "prev" is current row shifted right by 1 pixel; we need to + // insert the previous pixel value (from t1). + // "next" is current row shifted left by 1 pixel, with first pixel + // of next block of 8 pixels added in. + int16x8_t prv0 = vextq_s16(curr, curr, 7); + int16x8_t nxt0 = vextq_s16(curr, curr, 1); + int16x8_t prev = vsetq_lane_s16(t1, prv0, 0); + int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7); + + // horizontal filter, polyphase implementation since it's convenient: + // even pixels = 3*cur + prev = cur*4 + (prev - cur) + // odd pixels = 3*cur + next = cur*4 + (next - cur) + // note the shared term. + int16x8_t curs = vshlq_n_s16(curr, 2); + int16x8_t prvd = vsubq_s16(prev, curr); + int16x8_t nxtd = vsubq_s16(next, curr); + int16x8_t even = vaddq_s16(curs, prvd); + int16x8_t odd = vaddq_s16(curs, nxtd); + + // undo scaling and round, then store with even/odd phases interleaved + uint8x8x2_t o; + o.val[0] = vqrshrun_n_s16(even, 4); + o.val[1] = vqrshrun_n_s16(odd, 4); + vst2_u8(out + i*2, o); +#endif + + // "previous" value for next iter + t1 = 3*in_near[i+7] + in_far[i+7]; + } + + t0 = t1; + t1 = 3*in_near[i] + in_far[i]; + out[i*2] = stbi__div16(3*t1 + t0 + 8); + + for (++i; i < w; ++i) { + t0 = t1; + t1 = 3*in_near[i]+in_far[i]; + out[i*2-1] = stbi__div16(3*t0 + t1 + 8); + out[i*2 ] = stbi__div16(3*t1 + t0 + 8); + } + out[w*2-1] = stbi__div4(t1+2); + + STBI_NOTUSED(hs); + + return out; +} +#endif + +static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs) +{ + // resample with nearest-neighbor + int i,j; + STBI_NOTUSED(in_far); + for (i=0; i < w; ++i) + for (j=0; j < hs; ++j) + out[i*hs+j] = in_near[i]; + return out; +} + +#ifdef STBI_JPEG_OLD +// this is the same YCbCr-to-RGB calculation that stb_image has used +// historically before the algorithm changes in 1.49 +#define float2fixed(x) ((int) ((x) * 65536 + 0.5)) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 16) + 32768; // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr*float2fixed(1.40200f); + g = y_fixed - cr*float2fixed(0.71414f) - cb*float2fixed(0.34414f); + b = y_fixed + cb*float2fixed(1.77200f); + r >>= 16; + g >>= 16; + b >>= 16; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#else +// this is a reduced-precision calculation of YCbCr-to-RGB introduced +// to make sure the code produces the same results in both SIMD and scalar +#define float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8) +static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step) +{ + int i; + for (i=0; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* float2fixed(1.40200f); + g = y_fixed + (cr*-float2fixed(0.71414f)) + ((cb*-float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +#if defined(STBI_SSE2) || defined(STBI_NEON) +static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step) +{ + int i = 0; + +#ifdef STBI_SSE2 + // step == 3 is pretty ugly on the final interleave, and i'm not convinced + // it's useful in practice (you wouldn't use it for textures, for example). + // so just accelerate step == 4 case. + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + __m128i signflip = _mm_set1_epi8(-0x80); + __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f)); + __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f)); + __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f)); + __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f)); + __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128); + __m128i xw = _mm_set1_epi16(255); // alpha channel + + for (; i+7 < count; i += 8) { + // load + __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i)); + __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i)); + __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i)); + __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128 + __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128 + + // unpack to short (and left-shift cr, cb by 8) + __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes); + __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased); + __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased); + + // color transform + __m128i yws = _mm_srli_epi16(yw, 4); + __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw); + __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw); + __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1); + __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1); + __m128i rws = _mm_add_epi16(cr0, yws); + __m128i gwt = _mm_add_epi16(cb0, yws); + __m128i bws = _mm_add_epi16(yws, cb1); + __m128i gws = _mm_add_epi16(gwt, cr1); + + // descale + __m128i rw = _mm_srai_epi16(rws, 4); + __m128i bw = _mm_srai_epi16(bws, 4); + __m128i gw = _mm_srai_epi16(gws, 4); + + // back to byte, set up for transpose + __m128i brb = _mm_packus_epi16(rw, bw); + __m128i gxb = _mm_packus_epi16(gw, xw); + + // transpose to interleave channels + __m128i t0 = _mm_unpacklo_epi8(brb, gxb); + __m128i t1 = _mm_unpackhi_epi8(brb, gxb); + __m128i o0 = _mm_unpacklo_epi16(t0, t1); + __m128i o1 = _mm_unpackhi_epi16(t0, t1); + + // store + _mm_storeu_si128((__m128i *) (out + 0), o0); + _mm_storeu_si128((__m128i *) (out + 16), o1); + out += 32; + } + } +#endif + +#ifdef STBI_NEON + // in this version, step=3 support would be easy to add. but is there demand? + if (step == 4) { + // this is a fairly straightforward implementation and not super-optimized. + uint8x8_t signflip = vdup_n_u8(0x80); + int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f)); + int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f)); + int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f)); + int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f)); + + for (; i+7 < count; i += 8) { + // load + uint8x8_t y_bytes = vld1_u8(y + i); + uint8x8_t cr_bytes = vld1_u8(pcr + i); + uint8x8_t cb_bytes = vld1_u8(pcb + i); + int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip)); + int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip)); + + // expand to s16 + int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4)); + int16x8_t crw = vshll_n_s8(cr_biased, 7); + int16x8_t cbw = vshll_n_s8(cb_biased, 7); + + // color transform + int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0); + int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0); + int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1); + int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1); + int16x8_t rws = vaddq_s16(yws, cr0); + int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1); + int16x8_t bws = vaddq_s16(yws, cb1); + + // undo scaling, round, convert to byte + uint8x8x4_t o; + o.val[0] = vqrshrun_n_s16(rws, 4); + o.val[1] = vqrshrun_n_s16(gws, 4); + o.val[2] = vqrshrun_n_s16(bws, 4); + o.val[3] = vdup_n_u8(255); + + // store, interleaving r/g/b/a + vst4_u8(out, o); + out += 8*4; + } + } +#endif + + for (; i < count; ++i) { + int y_fixed = (y[i] << 20) + (1<<19); // rounding + int r,g,b; + int cr = pcr[i] - 128; + int cb = pcb[i] - 128; + r = y_fixed + cr* float2fixed(1.40200f); + g = y_fixed + cr*-float2fixed(0.71414f) + ((cb*-float2fixed(0.34414f)) & 0xffff0000); + b = y_fixed + cb* float2fixed(1.77200f); + r >>= 20; + g >>= 20; + b >>= 20; + if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; } + if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; } + if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; } + out[0] = (stbi_uc)r; + out[1] = (stbi_uc)g; + out[2] = (stbi_uc)b; + out[3] = 255; + out += step; + } +} +#endif + +// set up the kernels +static void stbi__setup_jpeg(stbi__jpeg *j) +{ + j->idct_block_kernel = stbi__idct_block; + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row; + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2; + +#ifdef STBI_SSE2 + if (stbi__sse2_available()) { + j->idct_block_kernel = stbi__idct_simd; + #ifndef STBI_JPEG_OLD + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + #endif + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; + } +#endif + +#ifdef STBI_NEON + j->idct_block_kernel = stbi__idct_simd; + #ifndef STBI_JPEG_OLD + j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd; + #endif + j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd; +#endif +} + +// clean up the temporary component buffers +static void stbi__cleanup_jpeg(stbi__jpeg *j) +{ + int i; + for (i=0; i < j->s->img_n; ++i) { + if (j->img_comp[i].raw_data) { + STBI_FREE(j->img_comp[i].raw_data); + j->img_comp[i].raw_data = NULL; + j->img_comp[i].data = NULL; + } + if (j->img_comp[i].raw_coeff) { + STBI_FREE(j->img_comp[i].raw_coeff); + j->img_comp[i].raw_coeff = 0; + j->img_comp[i].coeff = 0; + } + if (j->img_comp[i].linebuf) { + STBI_FREE(j->img_comp[i].linebuf); + j->img_comp[i].linebuf = NULL; + } + } +} + +typedef struct +{ + resample_row_func resample; + stbi_uc *line0,*line1; + int hs,vs; // expansion factor in each axis + int w_lores; // horizontal pixels pre-expansion + int ystep; // how far through vertical expansion we are + int ypos; // which pre-expansion row we're on +} stbi__resample; + +static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp) +{ + int n, decode_n; + z->s->img_n = 0; // make stbi__cleanup_jpeg safe + + // validate req_comp + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + + // load a jpeg image from whichever source, but leave in YCbCr format + if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; } + + // determine actual number of components to generate + n = req_comp ? req_comp : z->s->img_n; + + if (z->s->img_n == 3 && n < 3) + decode_n = 1; + else + decode_n = z->s->img_n; + + // resample and color-convert + { + int k; + unsigned int i,j; + stbi_uc *output; + stbi_uc *coutput[4]; + + stbi__resample res_comp[4]; + + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + + // allocate line buffer big enough for upsampling off the edges + // with upsample factor of 4 + z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3); + if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + r->hs = z->img_h_max / z->img_comp[k].h; + r->vs = z->img_v_max / z->img_comp[k].v; + r->ystep = r->vs >> 1; + r->w_lores = (z->s->img_x + r->hs-1) / r->hs; + r->ypos = 0; + r->line0 = r->line1 = z->img_comp[k].data; + + if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1; + else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2; + else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2; + else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel; + else r->resample = stbi__resample_row_generic; + } + + // can't error after this so, this is safe + output = (stbi_uc *) stbi__malloc(n * z->s->img_x * z->s->img_y + 1); + if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); } + + // now go ahead and resample + for (j=0; j < z->s->img_y; ++j) { + stbi_uc *out = output + n * z->s->img_x * j; + for (k=0; k < decode_n; ++k) { + stbi__resample *r = &res_comp[k]; + int y_bot = r->ystep >= (r->vs >> 1); + coutput[k] = r->resample(z->img_comp[k].linebuf, + y_bot ? r->line1 : r->line0, + y_bot ? r->line0 : r->line1, + r->w_lores, r->hs); + if (++r->ystep >= r->vs) { + r->ystep = 0; + r->line0 = r->line1; + if (++r->ypos < z->img_comp[k].y) + r->line1 += z->img_comp[k].w2; + } + } + if (n >= 3) { + stbi_uc *y = coutput[0]; + if (z->s->img_n == 3) { + z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n); + } else + for (i=0; i < z->s->img_x; ++i) { + out[0] = out[1] = out[2] = y[i]; + out[3] = 255; // not used if n==3 + out += n; + } + } else { + stbi_uc *y = coutput[0]; + if (n == 1) + for (i=0; i < z->s->img_x; ++i) out[i] = y[i]; + else + for (i=0; i < z->s->img_x; ++i) *out++ = y[i], *out++ = 255; + } + } + stbi__cleanup_jpeg(z); + *out_x = z->s->img_x; + *out_y = z->s->img_y; + if (comp) *comp = z->s->img_n; // report original components, not output + return output; + } +} + +static unsigned char *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__jpeg j; + j.s = s; + stbi__setup_jpeg(&j); + return load_jpeg_image(&j, x,y,comp,req_comp); +} + +static int stbi__jpeg_test(stbi__context *s) +{ + int r; + stbi__jpeg j; + j.s = s; + stbi__setup_jpeg(&j); + r = stbi__decode_jpeg_header(&j, STBI__SCAN_type); + stbi__rewind(s); + return r; +} + +static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp) +{ + if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) { + stbi__rewind( j->s ); + return 0; + } + if (x) *x = j->s->img_x; + if (y) *y = j->s->img_y; + if (comp) *comp = j->s->img_n; + return 1; +} + +static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__jpeg j; + j.s = s; + return stbi__jpeg_info_raw(&j, x, y, comp); +} +#endif + +// public domain zlib decode v0.2 Sean Barrett 2006-11-18 +// simple implementation +// - all input must be provided in an upfront buffer +// - all output is written to a single output buffer (can malloc/realloc) +// performance +// - fast huffman + +#ifndef STBI_NO_ZLIB + +// fast-way is faster to check than jpeg huffman, but slow way is slower +#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables +#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1) + +// zlib-style huffman encoding +// (jpegs packs from left, zlib from right, so can't share code) +typedef struct +{ + stbi__uint16 fast[1 << STBI__ZFAST_BITS]; + stbi__uint16 firstcode[16]; + int maxcode[17]; + stbi__uint16 firstsymbol[16]; + stbi_uc size[288]; + stbi__uint16 value[288]; +} stbi__zhuffman; + +stbi_inline static int stbi__bitreverse16(int n) +{ + n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1); + n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2); + n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4); + n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8); + return n; +} + +stbi_inline static int stbi__bit_reverse(int v, int bits) +{ + STBI_ASSERT(bits <= 16); + // to bit reverse n bits, reverse 16 and shift + // e.g. 11 bits, bit reverse and shift away 5 + return stbi__bitreverse16(v) >> (16-bits); +} + +static int stbi__zbuild_huffman(stbi__zhuffman *z, stbi_uc *sizelist, int num) +{ + int i,k=0; + int code, next_code[16], sizes[17]; + + // DEFLATE spec for generating codes + memset(sizes, 0, sizeof(sizes)); + memset(z->fast, 0, sizeof(z->fast)); + for (i=0; i < num; ++i) + ++sizes[sizelist[i]]; + sizes[0] = 0; + for (i=1; i < 16; ++i) + if (sizes[i] > (1 << i)) + return stbi__err("bad sizes", "Corrupt PNG"); + code = 0; + for (i=1; i < 16; ++i) { + next_code[i] = code; + z->firstcode[i] = (stbi__uint16) code; + z->firstsymbol[i] = (stbi__uint16) k; + code = (code + sizes[i]); + if (sizes[i]) + if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG"); + z->maxcode[i] = code << (16-i); // preshift for inner loop + code <<= 1; + k += sizes[i]; + } + z->maxcode[16] = 0x10000; // sentinel + for (i=0; i < num; ++i) { + int s = sizelist[i]; + if (s) { + int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s]; + stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i); + z->size [c] = (stbi_uc ) s; + z->value[c] = (stbi__uint16) i; + if (s <= STBI__ZFAST_BITS) { + int j = stbi__bit_reverse(next_code[s],s); + while (j < (1 << STBI__ZFAST_BITS)) { + z->fast[j] = fastv; + j += (1 << s); + } + } + ++next_code[s]; + } + } + return 1; +} + +// zlib-from-memory implementation for PNG reading +// because PNG allows splitting the zlib stream arbitrarily, +// and it's annoying structurally to have PNG call ZLIB call PNG, +// we require PNG read all the IDATs and combine them into a single +// memory buffer + +typedef struct +{ + stbi_uc *zbuffer, *zbuffer_end; + int num_bits; + stbi__uint32 code_buffer; + + char *zout; + char *zout_start; + char *zout_end; + int z_expandable; + + stbi__zhuffman z_length, z_distance; +} stbi__zbuf; + +stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z) +{ + if (z->zbuffer >= z->zbuffer_end) return 0; + return *z->zbuffer++; +} + +static void stbi__fill_bits(stbi__zbuf *z) +{ + do { + STBI_ASSERT(z->code_buffer < (1U << z->num_bits)); + z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits; + z->num_bits += 8; + } while (z->num_bits <= 24); +} + +stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n) +{ + unsigned int k; + if (z->num_bits < n) stbi__fill_bits(z); + k = z->code_buffer & ((1 << n) - 1); + z->code_buffer >>= n; + z->num_bits -= n; + return k; +} + +static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s,k; + // not resolved by fast table, so compute it the slow way + // use jpeg approach, which requires MSbits at top + k = stbi__bit_reverse(a->code_buffer, 16); + for (s=STBI__ZFAST_BITS+1; ; ++s) + if (k < z->maxcode[s]) + break; + if (s == 16) return -1; // invalid code! + // code size is s, so: + b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s]; + STBI_ASSERT(z->size[b] == s); + a->code_buffer >>= s; + a->num_bits -= s; + return z->value[b]; +} + +stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) +{ + int b,s; + if (a->num_bits < 16) stbi__fill_bits(a); + b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; + if (b) { + s = b >> 9; + a->code_buffer >>= s; + a->num_bits -= s; + return b & 511; + } + return stbi__zhuffman_decode_slowpath(a, z); +} + +static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes +{ + char *q; + int cur, limit, old_limit; + z->zout = zout; + if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG"); + cur = (int) (z->zout - z->zout_start); + limit = old_limit = (int) (z->zout_end - z->zout_start); + while (cur + n > limit) + limit *= 2; + q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit); + STBI_NOTUSED(old_limit); + if (q == NULL) return stbi__err("outofmem", "Out of memory"); + z->zout_start = q; + z->zout = q + cur; + z->zout_end = q + limit; + return 1; +} + +static int stbi__zlength_base[31] = { + 3,4,5,6,7,8,9,10,11,13, + 15,17,19,23,27,31,35,43,51,59, + 67,83,99,115,131,163,195,227,258,0,0 }; + +static int stbi__zlength_extra[31]= +{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 }; + +static int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193, +257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0}; + +static int stbi__zdist_extra[32] = +{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; + +static int stbi__parse_huffman_block(stbi__zbuf *a) +{ + char *zout = a->zout; + for(;;) { + int z = stbi__zhuffman_decode(a, &a->z_length); + if (z < 256) { + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes + if (zout >= a->zout_end) { + if (!stbi__zexpand(a, zout, 1)) return 0; + zout = a->zout; + } + *zout++ = (char) z; + } else { + stbi_uc *p; + int len,dist; + if (z == 256) { + a->zout = zout; + return 1; + } + z -= 257; + len = stbi__zlength_base[z]; + if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); + z = stbi__zhuffman_decode(a, &a->z_distance); + if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); + dist = stbi__zdist_base[z]; + if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); + if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); + if (zout + len > a->zout_end) { + if (!stbi__zexpand(a, zout, len)) return 0; + zout = a->zout; + } + p = (stbi_uc *) (zout - dist); + if (dist == 1) { // run of one byte; common in images. + stbi_uc v = *p; + if (len) { do *zout++ = v; while (--len); } + } else { + if (len) { do *zout++ = *p++; while (--len); } + } + } + } +} + +static int stbi__compute_huffman_codes(stbi__zbuf *a) +{ + static stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 }; + stbi__zhuffman z_codelength; + stbi_uc lencodes[286+32+137];//padding for maximum single op + stbi_uc codelength_sizes[19]; + int i,n; + + int hlit = stbi__zreceive(a,5) + 257; + int hdist = stbi__zreceive(a,5) + 1; + int hclen = stbi__zreceive(a,4) + 4; + + memset(codelength_sizes, 0, sizeof(codelength_sizes)); + for (i=0; i < hclen; ++i) { + int s = stbi__zreceive(a,3); + codelength_sizes[length_dezigzag[i]] = (stbi_uc) s; + } + if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0; + + n = 0; + while (n < hlit + hdist) { + int c = stbi__zhuffman_decode(a, &z_codelength); + if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG"); + if (c < 16) + lencodes[n++] = (stbi_uc) c; + else if (c == 16) { + c = stbi__zreceive(a,2)+3; + memset(lencodes+n, lencodes[n-1], c); + n += c; + } else if (c == 17) { + c = stbi__zreceive(a,3)+3; + memset(lencodes+n, 0, c); + n += c; + } else { + STBI_ASSERT(c == 18); + c = stbi__zreceive(a,7)+11; + memset(lencodes+n, 0, c); + n += c; + } + } + if (n != hlit+hdist) return stbi__err("bad codelengths","Corrupt PNG"); + if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0; + return 1; +} + +static int stbi__parse_uncomperssed_block(stbi__zbuf *a) +{ + stbi_uc header[4]; + int len,nlen,k; + if (a->num_bits & 7) + stbi__zreceive(a, a->num_bits & 7); // discard + // drain the bit-packed data into header + k = 0; + while (a->num_bits > 0) { + header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check + a->code_buffer >>= 8; + a->num_bits -= 8; + } + STBI_ASSERT(a->num_bits == 0); + // now fill header the normal way + while (k < 4) + header[k++] = stbi__zget8(a); + len = header[1] * 256 + header[0]; + nlen = header[3] * 256 + header[2]; + if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG"); + if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG"); + if (a->zout + len > a->zout_end) + if (!stbi__zexpand(a, a->zout, len)) return 0; + memcpy(a->zout, a->zbuffer, len); + a->zbuffer += len; + a->zout += len; + return 1; +} + +static int stbi__parse_zlib_header(stbi__zbuf *a) +{ + int cmf = stbi__zget8(a); + int cm = cmf & 15; + /* int cinfo = cmf >> 4; */ + int flg = stbi__zget8(a); + if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec + if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png + if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png + // window = 1 << (8 + cinfo)... but who cares, we fully buffer output + return 1; +} + +// @TODO: should statically initialize these for optimal thread safety +static stbi_uc stbi__zdefault_length[288], stbi__zdefault_distance[32]; +static void stbi__init_zdefaults(void) +{ + int i; // use <= to match clearly with spec + for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8; + for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9; + for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7; + for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8; + + for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5; +} + +static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) +{ + int final, type; + if (parse_header) + if (!stbi__parse_zlib_header(a)) return 0; + a->num_bits = 0; + a->code_buffer = 0; + do { + final = stbi__zreceive(a,1); + type = stbi__zreceive(a,2); + if (type == 0) { + if (!stbi__parse_uncomperssed_block(a)) return 0; + } else if (type == 3) { + return 0; + } else { + if (type == 1) { + // use fixed code lengths + if (!stbi__zdefault_distance[31]) stbi__init_zdefaults(); + if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0; + if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0; + } else { + if (!stbi__compute_huffman_codes(a)) return 0; + } + if (!stbi__parse_huffman_block(a)) return 0; + } + } while (!final); + return 1; +} + +static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header) +{ + a->zout_start = obuf; + a->zout = obuf; + a->zout_end = obuf + olen; + a->z_expandable = exp; + + return stbi__parse_zlib(a, parse_header); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, 1)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen) +{ + return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen); +} + +STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(initial_size); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer + len; + if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 1)) + return (int) (a.zout - a.zout_start); + else + return -1; +} + +STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen) +{ + stbi__zbuf a; + char *p = (char *) stbi__malloc(16384); + if (p == NULL) return NULL; + a.zbuffer = (stbi_uc *) buffer; + a.zbuffer_end = (stbi_uc *) buffer+len; + if (stbi__do_zlib(&a, p, 16384, 1, 0)) { + if (outlen) *outlen = (int) (a.zout - a.zout_start); + return a.zout_start; + } else { + STBI_FREE(a.zout_start); + return NULL; + } +} + +STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen) +{ + stbi__zbuf a; + a.zbuffer = (stbi_uc *) ibuffer; + a.zbuffer_end = (stbi_uc *) ibuffer + ilen; + if (stbi__do_zlib(&a, obuffer, olen, 0, 0)) + return (int) (a.zout - a.zout_start); + else + return -1; +} +#endif + +// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18 +// simple implementation +// - only 8-bit samples +// - no CRC checking +// - allocates lots of intermediate memory +// - avoids problem of streaming data between subsystems +// - avoids explicit window management +// performance +// - uses stb_zlib, a PD zlib implementation with fast huffman decoding + +#ifndef STBI_NO_PNG +typedef struct +{ + stbi__uint32 length; + stbi__uint32 type; +} stbi__pngchunk; + +static stbi__pngchunk stbi__get_chunk_header(stbi__context *s) +{ + stbi__pngchunk c; + c.length = stbi__get32be(s); + c.type = stbi__get32be(s); + return c; +} + +static int stbi__check_png_header(stbi__context *s) +{ + static stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 }; + int i; + for (i=0; i < 8; ++i) + if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG"); + return 1; +} + +typedef struct +{ + stbi__context *s; + stbi_uc *idata, *expanded, *out; +} stbi__png; + + +enum { + STBI__F_none=0, + STBI__F_sub=1, + STBI__F_up=2, + STBI__F_avg=3, + STBI__F_paeth=4, + // synthetic filters used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static stbi_uc first_row_filter[5] = +{ + STBI__F_none, + STBI__F_sub, + STBI__F_none, + STBI__F_avg_first, + STBI__F_paeth_first +}; + +static int stbi__paeth(int a, int b, int c) +{ + int p = a + b - c; + int pa = abs(p-a); + int pb = abs(p-b); + int pc = abs(p-c); + if (pa <= pb && pa <= pc) return a; + if (pb <= pc) return b; + return c; +} + +static stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; + +// create the png data from post-deflated data +static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) +{ + stbi__context *s = a->s; + stbi__uint32 i,j,stride = x*out_n; + stbi__uint32 img_len, img_width_bytes; + int k; + int img_n = s->img_n; // copy it into a local for later + + STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1); + a->out = (stbi_uc *) stbi__malloc(x * y * out_n); // extra bytes to write off the end into + if (!a->out) return stbi__err("outofmem", "Out of memory"); + + img_width_bytes = (((img_n * x * depth) + 7) >> 3); + img_len = (img_width_bytes + 1) * y; + if (s->img_x == x && s->img_y == y) { + if (raw_len != img_len) return stbi__err("not enough pixels","Corrupt PNG"); + } else { // interlaced: + if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + } + + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *prior = cur - stride; + int filter = *raw++; + int filter_bytes = img_n; + int width = x; + if (filter > 4) + return stbi__err("invalid filter","Corrupt PNG"); + + if (depth < 8) { + STBI_ASSERT(img_width_bytes <= x); + cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place + filter_bytes = 1; + width = img_width_bytes; + } + + // if first row, use special filter that doesn't sample previous row + if (j == 0) filter = first_row_filter[filter]; + + // handle first byte explicitly + for (k=0; k < filter_bytes; ++k) { + switch (filter) { + case STBI__F_none : cur[k] = raw[k]; break; + case STBI__F_sub : cur[k] = raw[k]; break; + case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; + case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; + case STBI__F_avg_first : cur[k] = raw[k]; break; + case STBI__F_paeth_first: cur[k] = raw[k]; break; + } + } + + if (depth == 8) { + if (img_n != out_n) + cur[img_n] = 255; // first pixel + raw += img_n; + cur += out_n; + prior += out_n; + } else { + raw += 1; + cur += 1; + prior += 1; + } + + // this is a little gross, so that we don't switch per-pixel or per-component + if (depth < 8 || img_n == out_n) { + int nk = (width - 1)*img_n; + #define CASE(f) \ + case f: \ + for (k=0; k < nk; ++k) + switch (filter) { + // "none" filter turns into a memcpy here; make that explicit. + case STBI__F_none: memcpy(cur, raw, nk); break; + CASE(STBI__F_sub) cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); break; + CASE(STBI__F_up) cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + CASE(STBI__F_avg) cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); break; + CASE(STBI__F_paeth) cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); break; + CASE(STBI__F_avg_first) cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); break; + CASE(STBI__F_paeth_first) cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); break; + } + #undef CASE + raw += nk; + } else { + STBI_ASSERT(img_n+1 == out_n); + #define CASE(f) \ + case f: \ + for (i=x-1; i >= 1; --i, cur[img_n]=255,raw+=img_n,cur+=out_n,prior+=out_n) \ + for (k=0; k < img_n; ++k) + switch (filter) { + CASE(STBI__F_none) cur[k] = raw[k]; break; + CASE(STBI__F_sub) cur[k] = STBI__BYTECAST(raw[k] + cur[k-out_n]); break; + CASE(STBI__F_up) cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; + CASE(STBI__F_avg) cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-out_n])>>1)); break; + CASE(STBI__F_paeth) cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-out_n],prior[k],prior[k-out_n])); break; + CASE(STBI__F_avg_first) cur[k] = STBI__BYTECAST(raw[k] + (cur[k-out_n] >> 1)); break; + CASE(STBI__F_paeth_first) cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-out_n],0,0)); break; + } + #undef CASE + } + } + + // we make a separate pass to expand bits to pixels; for performance, + // this could run two scanlines behind the above code, so it won't + // intefere with filtering but will still be in the cache. + if (depth < 8) { + for (j=0; j < y; ++j) { + stbi_uc *cur = a->out + stride*j; + stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; + // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit + // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + + // note that the final byte might overshoot and write more data than desired. + // we can allocate enough data that this never writes out of memory, but it + // could also overwrite the next scanline. can it overwrite non-empty data + // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. + // so we need to explicitly clamp the final ones + + if (depth == 4) { + for (k=x*img_n; k >= 2; k-=2, ++in) { + *cur++ = scale * ((*in >> 4) ); + *cur++ = scale * ((*in ) & 0x0f); + } + if (k > 0) *cur++ = scale * ((*in >> 4) ); + } else if (depth == 2) { + for (k=x*img_n; k >= 4; k-=4, ++in) { + *cur++ = scale * ((*in >> 6) ); + *cur++ = scale * ((*in >> 4) & 0x03); + *cur++ = scale * ((*in >> 2) & 0x03); + *cur++ = scale * ((*in ) & 0x03); + } + if (k > 0) *cur++ = scale * ((*in >> 6) ); + if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); + if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); + } else if (depth == 1) { + for (k=x*img_n; k >= 8; k-=8, ++in) { + *cur++ = scale * ((*in >> 7) ); + *cur++ = scale * ((*in >> 6) & 0x01); + *cur++ = scale * ((*in >> 5) & 0x01); + *cur++ = scale * ((*in >> 4) & 0x01); + *cur++ = scale * ((*in >> 3) & 0x01); + *cur++ = scale * ((*in >> 2) & 0x01); + *cur++ = scale * ((*in >> 1) & 0x01); + *cur++ = scale * ((*in ) & 0x01); + } + if (k > 0) *cur++ = scale * ((*in >> 7) ); + if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); + if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); + if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); + if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); + if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); + if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); + } + if (img_n != out_n) { + int q; + // insert alpha = 255 + cur = a->out + stride*j; + if (img_n == 1) { + for (q=x-1; q >= 0; --q) { + cur[q*2+1] = 255; + cur[q*2+0] = cur[q]; + } + } else { + STBI_ASSERT(img_n == 3); + for (q=x-1; q >= 0; --q) { + cur[q*4+3] = 255; + cur[q*4+2] = cur[q*3+2]; + cur[q*4+1] = cur[q*3+1]; + cur[q*4+0] = cur[q*3+0]; + } + } + } + } + } + + return 1; +} + +static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced) +{ + stbi_uc *final; + int p; + if (!interlaced) + return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color); + + // de-interlacing + final = (stbi_uc *) stbi__malloc(a->s->img_x * a->s->img_y * out_n); + for (p=0; p < 7; ++p) { + int xorig[] = { 0,4,0,2,0,1,0 }; + int yorig[] = { 0,0,4,0,2,0,1 }; + int xspc[] = { 8,8,4,4,2,2,1 }; + int yspc[] = { 8,8,8,4,4,2,2 }; + int i,j,x,y; + // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1 + x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p]; + y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p]; + if (x && y) { + stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y; + if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) { + STBI_FREE(final); + return 0; + } + for (j=0; j < y; ++j) { + for (i=0; i < x; ++i) { + int out_y = j*yspc[p]+yorig[p]; + int out_x = i*xspc[p]+xorig[p]; + memcpy(final + out_y*a->s->img_x*out_n + out_x*out_n, + a->out + (j*x+i)*out_n, out_n); + } + } + STBI_FREE(a->out); + image_data += img_len; + image_data_len -= img_len; + } + } + a->out = final; + + return 1; +} + +static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + // compute color-based transparency, assuming we've + // already got 255 as the alpha value in the output + STBI_ASSERT(out_n == 2 || out_n == 4); + + if (out_n == 2) { + for (i=0; i < pixel_count; ++i) { + p[1] = (p[0] == tc[0] ? 0 : 255); + p += 2; + } + } else { + for (i=0; i < pixel_count; ++i) { + if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2]) + p[3] = 0; + p += 4; + } + } + return 1; +} + +static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n) +{ + stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y; + stbi_uc *p, *temp_out, *orig = a->out; + + p = (stbi_uc *) stbi__malloc(pixel_count * pal_img_n); + if (p == NULL) return stbi__err("outofmem", "Out of memory"); + + // between here and free(out) below, exitting would leak + temp_out = p; + + if (pal_img_n == 3) { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p += 3; + } + } else { + for (i=0; i < pixel_count; ++i) { + int n = orig[i]*4; + p[0] = palette[n ]; + p[1] = palette[n+1]; + p[2] = palette[n+2]; + p[3] = palette[n+3]; + p += 4; + } + } + STBI_FREE(a->out); + a->out = temp_out; + + STBI_NOTUSED(len); + + return 1; +} + +static int stbi__unpremultiply_on_load = 0; +static int stbi__de_iphone_flag = 0; + +STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply) +{ + stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply; +} + +STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) +{ + stbi__de_iphone_flag = flag_true_if_should_convert; +} + +static void stbi__de_iphone(stbi__png *z) +{ + stbi__context *s = z->s; + stbi__uint32 i, pixel_count = s->img_x * s->img_y; + stbi_uc *p = z->out; + + if (s->img_out_n == 3) { // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 3; + } + } else { + STBI_ASSERT(s->img_out_n == 4); + if (stbi__unpremultiply_on_load) { + // convert bgr to rgb and unpremultiply + for (i=0; i < pixel_count; ++i) { + stbi_uc a = p[3]; + stbi_uc t = p[0]; + if (a) { + p[0] = p[2] * 255 / a; + p[1] = p[1] * 255 / a; + p[2] = t * 255 / a; + } else { + p[0] = p[2]; + p[2] = t; + } + p += 4; + } + } else { + // convert bgr to rgb + for (i=0; i < pixel_count; ++i) { + stbi_uc t = p[0]; + p[0] = p[2]; + p[2] = t; + p += 4; + } + } + } +} + +#define STBI__PNG_TYPE(a,b,c,d) (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) + +static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) +{ + stbi_uc palette[1024], pal_img_n=0; + stbi_uc has_trans=0, tc[3]; + stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0; + int first=1,k,interlace=0, color=0, depth=0, is_iphone=0; + stbi__context *s = z->s; + + z->expanded = NULL; + z->idata = NULL; + z->out = NULL; + + if (!stbi__check_png_header(s)) return 0; + + if (scan == STBI__SCAN_type) return 1; + + for (;;) { + stbi__pngchunk c = stbi__get_chunk_header(s); + switch (c.type) { + case STBI__PNG_TYPE('C','g','B','I'): + is_iphone = 1; + stbi__skip(s, c.length); + break; + case STBI__PNG_TYPE('I','H','D','R'): { + int comp,filter; + if (!first) return stbi__err("multiple IHDR","Corrupt PNG"); + first = 0; + if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG"); + s->img_x = stbi__get32be(s); if (s->img_x > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + s->img_y = stbi__get32be(s); if (s->img_y > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)"); + depth = stbi__get8(s); if (depth != 1 && depth != 2 && depth != 4 && depth != 8) return stbi__err("1/2/4/8-bit only","PNG not supported: 1/2/4/8-bit only"); + color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG"); + if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG"); + comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG"); + filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG"); + interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG"); + if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG"); + if (!pal_img_n) { + s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); + if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); + if (scan == STBI__SCAN_header) return 1; + } else { + // if paletted, then pal_n is our final components, and + // img_n is # components to decompress/filter. + s->img_n = 1; + if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); + // if SCAN_header, have to scan to see if we have a tRNS + } + break; + } + + case STBI__PNG_TYPE('P','L','T','E'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG"); + pal_len = c.length / 3; + if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG"); + for (i=0; i < pal_len; ++i) { + palette[i*4+0] = stbi__get8(s); + palette[i*4+1] = stbi__get8(s); + palette[i*4+2] = stbi__get8(s); + palette[i*4+3] = 255; + } + break; + } + + case STBI__PNG_TYPE('t','R','N','S'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG"); + if (pal_img_n) { + if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; } + if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG"); + if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG"); + pal_img_n = 4; + for (i=0; i < c.length; ++i) + palette[i*4+3] = stbi__get8(s); + } else { + if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); + if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); + has_trans = 1; + for (k=0; k < s->img_n; ++k) + tc[k] = (stbi_uc) (stbi__get16be(s) & 255) * stbi__depth_scale_table[depth]; // non 8-bit images will be larger + } + break; + } + + case STBI__PNG_TYPE('I','D','A','T'): { + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); + if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if ((int)(ioff + c.length) < (int)ioff) return 0; + if (ioff + c.length > idata_limit) { + stbi__uint32 idata_limit_old = idata_limit; + stbi_uc *p; + if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096; + while (ioff + c.length > idata_limit) + idata_limit *= 2; + STBI_NOTUSED(idata_limit_old); + p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory"); + z->idata = p; + } + if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG"); + ioff += c.length; + break; + } + + case STBI__PNG_TYPE('I','E','N','D'): { + stbi__uint32 raw_len, bpl; + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if (scan != STBI__SCAN_load) return 1; + if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG"); + // initial guess for decoded data size to avoid unnecessary reallocs + bpl = (s->img_x * depth + 7) / 8; // bytes per line, per component + raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */; + z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone); + if (z->expanded == NULL) return 0; // zlib should set error + STBI_FREE(z->idata); z->idata = NULL; + if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans) + s->img_out_n = s->img_n+1; + else + s->img_out_n = s->img_n; + if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, depth, color, interlace)) return 0; + if (has_trans) + if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0; + if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2) + stbi__de_iphone(z); + if (pal_img_n) { + // pal_img_n == 3 or 4 + s->img_n = pal_img_n; // record the actual colors we had + s->img_out_n = pal_img_n; + if (req_comp >= 3) s->img_out_n = req_comp; + if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n)) + return 0; + } + STBI_FREE(z->expanded); z->expanded = NULL; + return 1; + } + + default: + // if critical, fail + if (first) return stbi__err("first not IHDR", "Corrupt PNG"); + if ((c.type & (1 << 29)) == 0) { + #ifndef STBI_NO_FAILURE_STRINGS + // not threadsafe + static char invalid_chunk[] = "XXXX PNG chunk not known"; + invalid_chunk[0] = STBI__BYTECAST(c.type >> 24); + invalid_chunk[1] = STBI__BYTECAST(c.type >> 16); + invalid_chunk[2] = STBI__BYTECAST(c.type >> 8); + invalid_chunk[3] = STBI__BYTECAST(c.type >> 0); + #endif + return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type"); + } + stbi__skip(s, c.length); + break; + } + // end of PNG chunk, read and skip CRC + stbi__get32be(s); + } +} + +static unsigned char *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp) +{ + unsigned char *result=NULL; + if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error"); + if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) { + result = p->out; + p->out = NULL; + if (req_comp && req_comp != p->s->img_out_n) { + result = stbi__convert_format(result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y); + p->s->img_out_n = req_comp; + if (result == NULL) return result; + } + *x = p->s->img_x; + *y = p->s->img_y; + if (n) *n = p->s->img_out_n; + } + STBI_FREE(p->out); p->out = NULL; + STBI_FREE(p->expanded); p->expanded = NULL; + STBI_FREE(p->idata); p->idata = NULL; + + return result; +} + +static unsigned char *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi__png p; + p.s = s; + return stbi__do_png(&p, x,y,comp,req_comp); +} + +static int stbi__png_test(stbi__context *s) +{ + int r; + r = stbi__check_png_header(s); + stbi__rewind(s); + return r; +} + +static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp) +{ + if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) { + stbi__rewind( p->s ); + return 0; + } + if (x) *x = p->s->img_x; + if (y) *y = p->s->img_y; + if (comp) *comp = p->s->img_n; + return 1; +} + +static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__png p; + p.s = s; + return stbi__png_info_raw(&p, x, y, comp); +} +#endif + +// Microsoft/Windows BMP image + +#ifndef STBI_NO_BMP +static int stbi__bmp_test_raw(stbi__context *s) +{ + int r; + int sz; + if (stbi__get8(s) != 'B') return 0; + if (stbi__get8(s) != 'M') return 0; + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + stbi__get32le(s); // discard data offset + sz = stbi__get32le(s); + r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124); + return r; +} + +static int stbi__bmp_test(stbi__context *s) +{ + int r = stbi__bmp_test_raw(s); + stbi__rewind(s); + return r; +} + + +// returns 0..31 for the highest set bit +static int stbi__high_bit(unsigned int z) +{ + int n=0; + if (z == 0) return -1; + if (z >= 0x10000) n += 16, z >>= 16; + if (z >= 0x00100) n += 8, z >>= 8; + if (z >= 0x00010) n += 4, z >>= 4; + if (z >= 0x00004) n += 2, z >>= 2; + if (z >= 0x00002) n += 1, z >>= 1; + return n; +} + +static int stbi__bitcount(unsigned int a) +{ + a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2 + a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4 + a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits + a = (a + (a >> 8)); // max 16 per 8 bits + a = (a + (a >> 16)); // max 32 per 8 bits + return a & 0xff; +} + +static int stbi__shiftsigned(int v, int shift, int bits) +{ + int result; + int z=0; + + if (shift < 0) v <<= -shift; + else v >>= shift; + result = v; + + z = bits; + while (z < 8) { + result += v >> z; + z += bits; + } + return result; +} + +typedef struct +{ + int bpp, offset, hsz; + unsigned int mr,mg,mb,ma, all_a; +} stbi__bmp_data; + +static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info) +{ + int hsz; + if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP"); + stbi__get32le(s); // discard filesize + stbi__get16le(s); // discard reserved + stbi__get16le(s); // discard reserved + info->offset = stbi__get32le(s); + info->hsz = hsz = stbi__get32le(s); + + if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown"); + if (hsz == 12) { + s->img_x = stbi__get16le(s); + s->img_y = stbi__get16le(s); + } else { + s->img_x = stbi__get32le(s); + s->img_y = stbi__get32le(s); + } + if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP"); + info->bpp = stbi__get16le(s); + if (info->bpp == 1) return stbi__errpuc("monochrome", "BMP type not supported: 1-bit"); + if (hsz != 12) { + int compress = stbi__get32le(s); + if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE"); + stbi__get32le(s); // discard sizeof + stbi__get32le(s); // discard hres + stbi__get32le(s); // discard vres + stbi__get32le(s); // discard colorsused + stbi__get32le(s); // discard max important + if (hsz == 40 || hsz == 56) { + if (hsz == 56) { + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + stbi__get32le(s); + } + if (info->bpp == 16 || info->bpp == 32) { + info->mr = info->mg = info->mb = 0; + if (compress == 0) { + if (info->bpp == 32) { + info->mr = 0xffu << 16; + info->mg = 0xffu << 8; + info->mb = 0xffu << 0; + info->ma = 0xffu << 24; + info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0 + } else { + info->mr = 31u << 10; + info->mg = 31u << 5; + info->mb = 31u << 0; + } + } else if (compress == 3) { + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + // not documented, but generated by photoshop and handled by mspaint + if (info->mr == info->mg && info->mg == info->mb) { + // ?!?!? + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else + return stbi__errpuc("bad BMP", "bad BMP"); + } + } else { + int i; + if (hsz != 108 && hsz != 124) + return stbi__errpuc("bad BMP", "bad BMP"); + info->mr = stbi__get32le(s); + info->mg = stbi__get32le(s); + info->mb = stbi__get32le(s); + info->ma = stbi__get32le(s); + stbi__get32le(s); // discard color space + for (i=0; i < 12; ++i) + stbi__get32le(s); // discard color space parameters + if (hsz == 124) { + stbi__get32le(s); // discard rendering intent + stbi__get32le(s); // discard offset of profile data + stbi__get32le(s); // discard size of profile data + stbi__get32le(s); // discard reserved + } + } + } + return (void *) 1; +} + + +static stbi_uc *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi_uc *out; + unsigned int mr=0,mg=0,mb=0,ma=0, all_a; + stbi_uc pal[256][4]; + int psize=0,i,j,width; + int flip_vertically, pad, target; + stbi__bmp_data info; + + info.all_a = 255; + if (stbi__bmp_parse_header(s, &info) == NULL) + return NULL; // error code already set + + flip_vertically = ((int) s->img_y) > 0; + s->img_y = abs((int) s->img_y); + + mr = info.mr; + mg = info.mg; + mb = info.mb; + ma = info.ma; + all_a = info.all_a; + + if (info.hsz == 12) { + if (info.bpp < 24) + psize = (info.offset - 14 - 24) / 3; + } else { + if (info.bpp < 16) + psize = (info.offset - 14 - info.hsz) >> 2; + } + + s->img_n = ma ? 4 : 3; + if (req_comp && req_comp >= 3) // we can directly decode 3 or 4 + target = req_comp; + else + target = s->img_n; // if they want monochrome, we'll post-convert + + out = (stbi_uc *) stbi__malloc(target * s->img_x * s->img_y); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + if (info.bpp < 16) { + int z=0; + if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); } + for (i=0; i < psize; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + if (info.hsz != 12) stbi__get8(s); + pal[i][3] = 255; + } + stbi__skip(s, info.offset - 14 - info.hsz - psize * (info.hsz == 12 ? 3 : 4)); + if (info.bpp == 4) width = (s->img_x + 1) >> 1; + else if (info.bpp == 8) width = s->img_x; + else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); } + pad = (-width)&3; + for (j=0; j < (int) s->img_y; ++j) { + for (i=0; i < (int) s->img_x; i += 2) { + int v=stbi__get8(s),v2=0; + if (info.bpp == 4) { + v2 = v & 15; + v >>= 4; + } + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + if (i+1 == (int) s->img_x) break; + v = (info.bpp == 8) ? stbi__get8(s) : v2; + out[z++] = pal[v][0]; + out[z++] = pal[v][1]; + out[z++] = pal[v][2]; + if (target == 4) out[z++] = 255; + } + stbi__skip(s, pad); + } + } else { + int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0; + int z = 0; + int easy=0; + stbi__skip(s, info.offset - 14 - info.hsz); + if (info.bpp == 24) width = 3 * s->img_x; + else if (info.bpp == 16) width = 2*s->img_x; + else /* bpp = 32 and pad = 0 */ width=0; + pad = (-width) & 3; + if (info.bpp == 24) { + easy = 1; + } else if (info.bpp == 32) { + if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000) + easy = 2; + } + if (!easy) { + if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); } + // right shift amt to put high bit in position #7 + rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr); + gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg); + bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb); + ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma); + } + for (j=0; j < (int) s->img_y; ++j) { + if (easy) { + for (i=0; i < (int) s->img_x; ++i) { + unsigned char a; + out[z+2] = stbi__get8(s); + out[z+1] = stbi__get8(s); + out[z+0] = stbi__get8(s); + z += 3; + a = (easy == 2 ? stbi__get8(s) : 255); + all_a |= a; + if (target == 4) out[z++] = a; + } + } else { + int bpp = info.bpp; + for (i=0; i < (int) s->img_x; ++i) { + stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s)); + int a; + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount)); + out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount)); + a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255); + all_a |= a; + if (target == 4) out[z++] = STBI__BYTECAST(a); + } + } + stbi__skip(s, pad); + } + } + + // if alpha channel is all 0s, replace with all 255s + if (target == 4 && all_a == 0) + for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4) + out[i] = 255; + + if (flip_vertically) { + stbi_uc t; + for (j=0; j < (int) s->img_y>>1; ++j) { + stbi_uc *p1 = out + j *s->img_x*target; + stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target; + for (i=0; i < (int) s->img_x*target; ++i) { + t = p1[i], p1[i] = p2[i], p2[i] = t; + } + } + } + + if (req_comp && req_comp != target) { + out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + *x = s->img_x; + *y = s->img_y; + if (comp) *comp = s->img_n; + return out; +} +#endif + +// Targa Truevision - TGA +// by Jonathan Dummer +#ifndef STBI_NO_TGA +// returns STBI_rgb or whatever, 0 on error +static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16) +{ + // only RGB or RGBA (incl. 16bit) or grey allowed + if(is_rgb16) *is_rgb16 = 0; + switch(bits_per_pixel) { + case 8: return STBI_grey; + case 16: if(is_grey) return STBI_grey_alpha; + // else: fall-through + case 15: if(is_rgb16) *is_rgb16 = 1; + return STBI_rgb; + case 24: // fall-through + case 32: return bits_per_pixel/8; + default: return 0; + } +} + +static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp) +{ + int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp; + int sz, tga_colormap_type; + stbi__get8(s); // discard Offset + tga_colormap_type = stbi__get8(s); // colormap type + if( tga_colormap_type > 1 ) { + stbi__rewind(s); + return 0; // only RGB or indexed allowed + } + tga_image_type = stbi__get8(s); // image type + if ( tga_colormap_type == 1 ) { // colormapped (paletted) image + if (tga_image_type != 1 && tga_image_type != 9) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) { + stbi__rewind(s); + return 0; + } + stbi__skip(s,4); // skip image x and y origin + tga_colormap_bpp = sz; + } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE + if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) { + stbi__rewind(s); + return 0; // only RGB or grey allowed, +/- RLE + } + stbi__skip(s,9); // skip colormap specification and image x/y origin + tga_colormap_bpp = 0; + } + tga_w = stbi__get16le(s); + if( tga_w < 1 ) { + stbi__rewind(s); + return 0; // test width + } + tga_h = stbi__get16le(s); + if( tga_h < 1 ) { + stbi__rewind(s); + return 0; // test height + } + tga_bits_per_pixel = stbi__get8(s); // bits per pixel + stbi__get8(s); // ignore alpha bits + if (tga_colormap_bpp != 0) { + if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) { + // when using a colormap, tga_bits_per_pixel is the size of the indexes + // I don't think anything but 8 or 16bit indexes makes sense + stbi__rewind(s); + return 0; + } + tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL); + } else { + tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL); + } + if(!tga_comp) { + stbi__rewind(s); + return 0; + } + if (x) *x = tga_w; + if (y) *y = tga_h; + if (comp) *comp = tga_comp; + return 1; // seems to have passed everything +} + +static int stbi__tga_test(stbi__context *s) +{ + int res = 0; + int sz, tga_color_type; + stbi__get8(s); // discard Offset + tga_color_type = stbi__get8(s); // color type + if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed + sz = stbi__get8(s); // image type + if ( tga_color_type == 1 ) { // colormapped (paletted) image + if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9 + stbi__skip(s,4); // skip index of first colormap entry and number of entries + sz = stbi__get8(s); // check bits per palette color entry + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + stbi__skip(s,4); // skip image x and y origin + } else { // "normal" image w/o colormap + if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE + stbi__skip(s,9); // skip colormap specification and image x/y origin + } + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width + if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height + sz = stbi__get8(s); // bits per pixel + if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index + if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd; + + res = 1; // if we got this far, everything's good and we can return 1 instead of 0 + +errorEnd: + stbi__rewind(s); + return res; +} + +// read 16bit value and convert to 24bit RGB +void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out) +{ + stbi__uint16 px = stbi__get16le(s); + stbi__uint16 fiveBitMask = 31; + // we have 3 channels with 5bits each + int r = (px >> 10) & fiveBitMask; + int g = (px >> 5) & fiveBitMask; + int b = px & fiveBitMask; + // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later + out[0] = (r * 255)/31; + out[1] = (g * 255)/31; + out[2] = (b * 255)/31; + + // some people claim that the most significant bit might be used for alpha + // (possibly if an alpha-bit is set in the "image descriptor byte") + // but that only made 16bit test images completely translucent.. + // so let's treat all 15 and 16bit TGAs as RGB with no alpha. +} + +static stbi_uc *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + // read in the TGA header stuff + int tga_offset = stbi__get8(s); + int tga_indexed = stbi__get8(s); + int tga_image_type = stbi__get8(s); + int tga_is_RLE = 0; + int tga_palette_start = stbi__get16le(s); + int tga_palette_len = stbi__get16le(s); + int tga_palette_bits = stbi__get8(s); + int tga_x_origin = stbi__get16le(s); + int tga_y_origin = stbi__get16le(s); + int tga_width = stbi__get16le(s); + int tga_height = stbi__get16le(s); + int tga_bits_per_pixel = stbi__get8(s); + int tga_comp, tga_rgb16=0; + int tga_inverted = stbi__get8(s); + // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?) + // image data + unsigned char *tga_data; + unsigned char *tga_palette = NULL; + int i, j; + unsigned char raw_data[4]; + int RLE_count = 0; + int RLE_repeating = 0; + int read_next_pixel = 1; + + // do a tiny bit of precessing + if ( tga_image_type >= 8 ) + { + tga_image_type -= 8; + tga_is_RLE = 1; + } + tga_inverted = 1 - ((tga_inverted >> 5) & 1); + + // If I'm paletted, then I'll use the number of bits from the palette + if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16); + else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16); + + if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency + return stbi__errpuc("bad format", "Can't find out TGA pixelformat"); + + // tga info + *x = tga_width; + *y = tga_height; + if (comp) *comp = tga_comp; + + tga_data = (unsigned char*)stbi__malloc( (size_t)tga_width * tga_height * tga_comp ); + if (!tga_data) return stbi__errpuc("outofmem", "Out of memory"); + + // skip to the data's starting position (offset usually = 0) + stbi__skip(s, tga_offset ); + + if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) { + for (i=0; i < tga_height; ++i) { + int row = tga_inverted ? tga_height -i - 1 : i; + stbi_uc *tga_row = tga_data + row*tga_width*tga_comp; + stbi__getn(s, tga_row, tga_width * tga_comp); + } + } else { + // do I need to load a palette? + if ( tga_indexed) + { + // any data to skip? (offset usually = 0) + stbi__skip(s, tga_palette_start ); + // load the palette + tga_palette = (unsigned char*)stbi__malloc( tga_palette_len * tga_comp ); + if (!tga_palette) { + STBI_FREE(tga_data); + return stbi__errpuc("outofmem", "Out of memory"); + } + if (tga_rgb16) { + stbi_uc *pal_entry = tga_palette; + STBI_ASSERT(tga_comp == STBI_rgb); + for (i=0; i < tga_palette_len; ++i) { + stbi__tga_read_rgb16(s, pal_entry); + pal_entry += tga_comp; + } + } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) { + STBI_FREE(tga_data); + STBI_FREE(tga_palette); + return stbi__errpuc("bad palette", "Corrupt TGA"); + } + } + // load the data + for (i=0; i < tga_width * tga_height; ++i) + { + // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk? + if ( tga_is_RLE ) + { + if ( RLE_count == 0 ) + { + // yep, get the next byte as a RLE command + int RLE_cmd = stbi__get8(s); + RLE_count = 1 + (RLE_cmd & 127); + RLE_repeating = RLE_cmd >> 7; + read_next_pixel = 1; + } else if ( !RLE_repeating ) + { + read_next_pixel = 1; + } + } else + { + read_next_pixel = 1; + } + // OK, if I need to read a pixel, do it now + if ( read_next_pixel ) + { + // load however much data we did have + if ( tga_indexed ) + { + // read in index, then perform the lookup + int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s); + if ( pal_idx >= tga_palette_len ) { + // invalid index + pal_idx = 0; + } + pal_idx *= tga_comp; + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = tga_palette[pal_idx+j]; + } + } else if(tga_rgb16) { + STBI_ASSERT(tga_comp == STBI_rgb); + stbi__tga_read_rgb16(s, raw_data); + } else { + // read in the data raw + for (j = 0; j < tga_comp; ++j) { + raw_data[j] = stbi__get8(s); + } + } + // clear the reading flag for the next pixel + read_next_pixel = 0; + } // end of reading a pixel + + // copy data + for (j = 0; j < tga_comp; ++j) + tga_data[i*tga_comp+j] = raw_data[j]; + + // in case we're in RLE mode, keep counting down + --RLE_count; + } + // do I need to invert the image? + if ( tga_inverted ) + { + for (j = 0; j*2 < tga_height; ++j) + { + int index1 = j * tga_width * tga_comp; + int index2 = (tga_height - 1 - j) * tga_width * tga_comp; + for (i = tga_width * tga_comp; i > 0; --i) + { + unsigned char temp = tga_data[index1]; + tga_data[index1] = tga_data[index2]; + tga_data[index2] = temp; + ++index1; + ++index2; + } + } + } + // clear my palette, if I had one + if ( tga_palette != NULL ) + { + STBI_FREE( tga_palette ); + } + } + + // swap RGB - if the source data was RGB16, it already is in the right order + if (tga_comp >= 3 && !tga_rgb16) + { + unsigned char* tga_pixel = tga_data; + for (i=0; i < tga_width * tga_height; ++i) + { + unsigned char temp = tga_pixel[0]; + tga_pixel[0] = tga_pixel[2]; + tga_pixel[2] = temp; + tga_pixel += tga_comp; + } + } + + // convert to target component count + if (req_comp && req_comp != tga_comp) + tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height); + + // the things I do to get rid of an error message, and yet keep + // Microsoft's C compilers happy... [8^( + tga_palette_start = tga_palette_len = tga_palette_bits = + tga_x_origin = tga_y_origin = 0; + // OK, done + return tga_data; +} +#endif + +// ************************************************************************************************* +// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB + +#ifndef STBI_NO_PSD +static int stbi__psd_test(stbi__context *s) +{ + int r = (stbi__get32be(s) == 0x38425053); + stbi__rewind(s); + return r; +} + +static stbi_uc *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + int pixelCount; + int channelCount, compression; + int channel, i, count, len; + int bitdepth; + int w,h; + stbi_uc *out; + + // Check identifier + if (stbi__get32be(s) != 0x38425053) // "8BPS" + return stbi__errpuc("not PSD", "Corrupt PSD image"); + + // Check file type version. + if (stbi__get16be(s) != 1) + return stbi__errpuc("wrong version", "Unsupported version of PSD image"); + + // Skip 6 reserved bytes. + stbi__skip(s, 6 ); + + // Read the number of channels (R, G, B, A, etc). + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) + return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image"); + + // Read the rows and columns of the image. + h = stbi__get32be(s); + w = stbi__get32be(s); + + // Make sure the depth is 8 bits. + bitdepth = stbi__get16be(s); + if (bitdepth != 8 && bitdepth != 16) + return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit"); + + // Make sure the color mode is RGB. + // Valid options are: + // 0: Bitmap + // 1: Grayscale + // 2: Indexed color + // 3: RGB color + // 4: CMYK color + // 7: Multichannel + // 8: Duotone + // 9: Lab color + if (stbi__get16be(s) != 3) + return stbi__errpuc("wrong color format", "PSD is not in RGB color format"); + + // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.) + stbi__skip(s,stbi__get32be(s) ); + + // Skip the image resources. (resolution, pen tool paths, etc) + stbi__skip(s, stbi__get32be(s) ); + + // Skip the reserved data. + stbi__skip(s, stbi__get32be(s) ); + + // Find out if the data is compressed. + // Known values: + // 0: no compression + // 1: RLE compressed + compression = stbi__get16be(s); + if (compression > 1) + return stbi__errpuc("bad compression", "PSD has an unknown compression format"); + + // Create the destination image. + out = (stbi_uc *) stbi__malloc(4 * w*h); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + pixelCount = w*h; + + // Initialize the data to zero. + //memset( out, 0, pixelCount * 4 ); + + // Finally, the image data. + if (compression) { + // RLE as used by .PSD and .TIFF + // Loop until you get the number of unpacked bytes you are expecting: + // Read the next source byte into n. + // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally. + // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times. + // Else if n is 128, noop. + // Endloop + + // The RLE-compressed data is preceeded by a 2-byte data count for each row in the data, + // which we're going to just skip. + stbi__skip(s, h * channelCount * 2 ); + + // Read the RLE data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out+channel; + if (channel >= channelCount) { + // Fill this channel with default data. + for (i = 0; i < pixelCount; i++, p += 4) + *p = (channel == 3 ? 255 : 0); + } else { + // Read the RLE data. + count = 0; + while (count < pixelCount) { + len = stbi__get8(s); + if (len == 128) { + // No-op. + } else if (len < 128) { + // Copy next len+1 bytes literally. + len++; + count += len; + while (len) { + *p = stbi__get8(s); + p += 4; + len--; + } + } else if (len > 128) { + stbi_uc val; + // Next -len+1 bytes in the dest are replicated from next source byte. + // (Interpret len as a negative 8-bit int.) + len ^= 0x0FF; + len += 2; + val = stbi__get8(s); + count += len; + while (len) { + *p = val; + p += 4; + len--; + } + } + } + } + } + + } else { + // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...) + // where each channel consists of an 8-bit value for each pixel in the image. + + // Read the data by channel. + for (channel = 0; channel < 4; channel++) { + stbi_uc *p; + + p = out + channel; + if (channel >= channelCount) { + // Fill this channel with default data. + stbi_uc val = channel == 3 ? 255 : 0; + for (i = 0; i < pixelCount; i++, p += 4) + *p = val; + } else { + // Read the data. + if (bitdepth == 16) { + for (i = 0; i < pixelCount; i++, p += 4) + *p = (stbi_uc) (stbi__get16be(s) >> 8); + } else { + for (i = 0; i < pixelCount; i++, p += 4) + *p = stbi__get8(s); + } + } + } + } + + if (req_comp && req_comp != 4) { + out = stbi__convert_format(out, 4, req_comp, w, h); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + + if (comp) *comp = 4; + *y = h; + *x = w; + + return out; +} +#endif + +// ************************************************************************************************* +// Softimage PIC loader +// by Tom Seddon +// +// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format +// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/ + +#ifndef STBI_NO_PIC +static int stbi__pic_is4(stbi__context *s,const char *str) +{ + int i; + for (i=0; i<4; ++i) + if (stbi__get8(s) != (stbi_uc)str[i]) + return 0; + + return 1; +} + +static int stbi__pic_test_core(stbi__context *s) +{ + int i; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) + return 0; + + for(i=0;i<84;++i) + stbi__get8(s); + + if (!stbi__pic_is4(s,"PICT")) + return 0; + + return 1; +} + +typedef struct +{ + stbi_uc size,type,channel; +} stbi__pic_packet; + +static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest) +{ + int mask=0x80, i; + + for (i=0; i<4; ++i, mask>>=1) { + if (channel & mask) { + if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short"); + dest[i]=stbi__get8(s); + } + } + + return dest; +} + +static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src) +{ + int mask=0x80,i; + + for (i=0;i<4; ++i, mask>>=1) + if (channel&mask) + dest[i]=src[i]; +} + +static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result) +{ + int act_comp=0,num_packets=0,y,chained; + stbi__pic_packet packets[10]; + + // this will (should...) cater for even some bizarre stuff like having data + // for the same channel in multiple packets. + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return stbi__errpuc("bad format","too many packets"); + + packet = &packets[num_packets++]; + + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + + act_comp |= packet->channel; + + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)"); + if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp"); + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel? + + for(y=0; ytype) { + default: + return stbi__errpuc("bad format","packet has bad compression type"); + + case 0: {//uncompressed + int x; + + for(x=0;xchannel,dest)) + return 0; + break; + } + + case 1://Pure RLE + { + int left=width, i; + + while (left>0) { + stbi_uc count,value[4]; + + count=stbi__get8(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)"); + + if (count > left) + count = (stbi_uc) left; + + if (!stbi__readval(s,packet->channel,value)) return 0; + + for(i=0; ichannel,dest,value); + left -= count; + } + } + break; + + case 2: {//Mixed RLE + int left=width; + while (left>0) { + int count = stbi__get8(s), i; + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)"); + + if (count >= 128) { // Repeated + stbi_uc value[4]; + + if (count==128) + count = stbi__get16be(s); + else + count -= 127; + if (count > left) + return stbi__errpuc("bad file","scanline overrun"); + + if (!stbi__readval(s,packet->channel,value)) + return 0; + + for(i=0;ichannel,dest,value); + } else { // Raw + ++count; + if (count>left) return stbi__errpuc("bad file","scanline overrun"); + + for(i=0;ichannel,dest)) + return 0; + } + left-=count; + } + break; + } + } + } + } + + return result; +} + +static stbi_uc *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp) +{ + stbi_uc *result; + int i, x,y; + + for (i=0; i<92; ++i) + stbi__get8(s); + + x = stbi__get16be(s); + y = stbi__get16be(s); + if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)"); + if ((1 << 28) / x < y) return stbi__errpuc("too large", "Image too large to decode"); + + stbi__get32be(s); //skip `ratio' + stbi__get16be(s); //skip `fields' + stbi__get16be(s); //skip `pad' + + // intermediate buffer is RGBA + result = (stbi_uc *) stbi__malloc(x*y*4); + memset(result, 0xff, x*y*4); + + if (!stbi__pic_load_core(s,x,y,comp, result)) { + STBI_FREE(result); + result=0; + } + *px = x; + *py = y; + if (req_comp == 0) req_comp = *comp; + result=stbi__convert_format(result,4,req_comp,x,y); + + return result; +} + +static int stbi__pic_test(stbi__context *s) +{ + int r = stbi__pic_test_core(s); + stbi__rewind(s); + return r; +} +#endif + +// ************************************************************************************************* +// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb + +#ifndef STBI_NO_GIF +typedef struct +{ + stbi__int16 prefix; + stbi_uc first; + stbi_uc suffix; +} stbi__gif_lzw; + +typedef struct +{ + int w,h; + stbi_uc *out, *old_out; // output buffer (always 4 components) + int flags, bgindex, ratio, transparent, eflags, delay; + stbi_uc pal[256][4]; + stbi_uc lpal[256][4]; + stbi__gif_lzw codes[4096]; + stbi_uc *color_table; + int parse, step; + int lflags; + int start_x, start_y; + int max_x, max_y; + int cur_x, cur_y; + int line_size; +} stbi__gif; + +static int stbi__gif_test_raw(stbi__context *s) +{ + int sz; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0; + sz = stbi__get8(s); + if (sz != '9' && sz != '7') return 0; + if (stbi__get8(s) != 'a') return 0; + return 1; +} + +static int stbi__gif_test(stbi__context *s) +{ + int r = stbi__gif_test_raw(s); + stbi__rewind(s); + return r; +} + +static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp) +{ + int i; + for (i=0; i < num_entries; ++i) { + pal[i][2] = stbi__get8(s); + pal[i][1] = stbi__get8(s); + pal[i][0] = stbi__get8(s); + pal[i][3] = transp == i ? 0 : 255; + } +} + +static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info) +{ + stbi_uc version; + if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') + return stbi__err("not GIF", "Corrupt GIF"); + + version = stbi__get8(s); + if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF"); + if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF"); + + stbi__g_failure_reason = ""; + g->w = stbi__get16le(s); + g->h = stbi__get16le(s); + g->flags = stbi__get8(s); + g->bgindex = stbi__get8(s); + g->ratio = stbi__get8(s); + g->transparent = -1; + + if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments + + if (is_info) return 1; + + if (g->flags & 0x80) + stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1); + + return 1; +} + +static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp) +{ + stbi__gif g; + if (!stbi__gif_header(s, &g, comp, 1)) { + stbi__rewind( s ); + return 0; + } + if (x) *x = g.w; + if (y) *y = g.h; + return 1; +} + +static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code) +{ + stbi_uc *p, *c; + + // recurse to decode the prefixes, since the linked-list is backwards, + // and working backwards through an interleaved image would be nasty + if (g->codes[code].prefix >= 0) + stbi__out_gif_code(g, g->codes[code].prefix); + + if (g->cur_y >= g->max_y) return; + + p = &g->out[g->cur_x + g->cur_y]; + c = &g->color_table[g->codes[code].suffix * 4]; + + if (c[3] >= 128) { + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = c[3]; + } + g->cur_x += 4; + + if (g->cur_x >= g->max_x) { + g->cur_x = g->start_x; + g->cur_y += g->step; + + while (g->cur_y >= g->max_y && g->parse > 0) { + g->step = (1 << g->parse) * g->line_size; + g->cur_y = g->start_y + (g->step >> 1); + --g->parse; + } + } +} + +static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g) +{ + stbi_uc lzw_cs; + stbi__int32 len, init_code; + stbi__uint32 first; + stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear; + stbi__gif_lzw *p; + + lzw_cs = stbi__get8(s); + if (lzw_cs > 12) return NULL; + clear = 1 << lzw_cs; + first = 1; + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + bits = 0; + valid_bits = 0; + for (init_code = 0; init_code < clear; init_code++) { + g->codes[init_code].prefix = -1; + g->codes[init_code].first = (stbi_uc) init_code; + g->codes[init_code].suffix = (stbi_uc) init_code; + } + + // support no starting clear code + avail = clear+2; + oldcode = -1; + + len = 0; + for(;;) { + if (valid_bits < codesize) { + if (len == 0) { + len = stbi__get8(s); // start new block + if (len == 0) + return g->out; + } + --len; + bits |= (stbi__int32) stbi__get8(s) << valid_bits; + valid_bits += 8; + } else { + stbi__int32 code = bits & codemask; + bits >>= codesize; + valid_bits -= codesize; + // @OPTIMIZE: is there some way we can accelerate the non-clear path? + if (code == clear) { // clear code + codesize = lzw_cs + 1; + codemask = (1 << codesize) - 1; + avail = clear + 2; + oldcode = -1; + first = 0; + } else if (code == clear + 1) { // end of stream code + stbi__skip(s, len); + while ((len = stbi__get8(s)) > 0) + stbi__skip(s,len); + return g->out; + } else if (code <= avail) { + if (first) return stbi__errpuc("no clear code", "Corrupt GIF"); + + if (oldcode >= 0) { + p = &g->codes[avail++]; + if (avail > 4096) return stbi__errpuc("too many codes", "Corrupt GIF"); + p->prefix = (stbi__int16) oldcode; + p->first = g->codes[oldcode].first; + p->suffix = (code == avail) ? p->first : g->codes[code].first; + } else if (code == avail) + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + + stbi__out_gif_code(g, (stbi__uint16) code); + + if ((avail & codemask) == 0 && avail <= 0x0FFF) { + codesize++; + codemask = (1 << codesize) - 1; + } + + oldcode = code; + } else { + return stbi__errpuc("illegal code in raster", "Corrupt GIF"); + } + } + } +} + +static void stbi__fill_gif_background(stbi__gif *g, int x0, int y0, int x1, int y1) +{ + int x, y; + stbi_uc *c = g->pal[g->bgindex]; + for (y = y0; y < y1; y += 4 * g->w) { + for (x = x0; x < x1; x += 4) { + stbi_uc *p = &g->out[y + x]; + p[0] = c[2]; + p[1] = c[1]; + p[2] = c[0]; + p[3] = 0; + } + } +} + +// this function is designed to support animated gifs, although stb_image doesn't support it +static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp) +{ + int i; + stbi_uc *prev_out = 0; + + if (g->out == 0 && !stbi__gif_header(s, g, comp,0)) + return 0; // stbi__g_failure_reason set by stbi__gif_header + + prev_out = g->out; + g->out = (stbi_uc *) stbi__malloc(4 * g->w * g->h); + if (g->out == 0) return stbi__errpuc("outofmem", "Out of memory"); + + switch ((g->eflags & 0x1C) >> 2) { + case 0: // unspecified (also always used on 1st frame) + stbi__fill_gif_background(g, 0, 0, 4 * g->w, 4 * g->w * g->h); + break; + case 1: // do not dispose + if (prev_out) memcpy(g->out, prev_out, 4 * g->w * g->h); + g->old_out = prev_out; + break; + case 2: // dispose to background + if (prev_out) memcpy(g->out, prev_out, 4 * g->w * g->h); + stbi__fill_gif_background(g, g->start_x, g->start_y, g->max_x, g->max_y); + break; + case 3: // dispose to previous + if (g->old_out) { + for (i = g->start_y; i < g->max_y; i += 4 * g->w) + memcpy(&g->out[i + g->start_x], &g->old_out[i + g->start_x], g->max_x - g->start_x); + } + break; + } + + for (;;) { + switch (stbi__get8(s)) { + case 0x2C: /* Image Descriptor */ + { + int prev_trans = -1; + stbi__int32 x, y, w, h; + stbi_uc *o; + + x = stbi__get16le(s); + y = stbi__get16le(s); + w = stbi__get16le(s); + h = stbi__get16le(s); + if (((x + w) > (g->w)) || ((y + h) > (g->h))) + return stbi__errpuc("bad Image Descriptor", "Corrupt GIF"); + + g->line_size = g->w * 4; + g->start_x = x * 4; + g->start_y = y * g->line_size; + g->max_x = g->start_x + w * 4; + g->max_y = g->start_y + h * g->line_size; + g->cur_x = g->start_x; + g->cur_y = g->start_y; + + g->lflags = stbi__get8(s); + + if (g->lflags & 0x40) { + g->step = 8 * g->line_size; // first interlaced spacing + g->parse = 3; + } else { + g->step = g->line_size; + g->parse = 0; + } + + if (g->lflags & 0x80) { + stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1); + g->color_table = (stbi_uc *) g->lpal; + } else if (g->flags & 0x80) { + if (g->transparent >= 0 && (g->eflags & 0x01)) { + prev_trans = g->pal[g->transparent][3]; + g->pal[g->transparent][3] = 0; + } + g->color_table = (stbi_uc *) g->pal; + } else + return stbi__errpuc("missing color table", "Corrupt GIF"); + + o = stbi__process_gif_raster(s, g); + if (o == NULL) return NULL; + + if (prev_trans != -1) + g->pal[g->transparent][3] = (stbi_uc) prev_trans; + + return o; + } + + case 0x21: // Comment Extension. + { + int len; + if (stbi__get8(s) == 0xF9) { // Graphic Control Extension. + len = stbi__get8(s); + if (len == 4) { + g->eflags = stbi__get8(s); + g->delay = stbi__get16le(s); + g->transparent = stbi__get8(s); + } else { + stbi__skip(s, len); + break; + } + } + while ((len = stbi__get8(s)) != 0) + stbi__skip(s, len); + break; + } + + case 0x3B: // gif stream termination code + return (stbi_uc *) s; // using '1' causes warning on some compilers + + default: + return stbi__errpuc("unknown code", "Corrupt GIF"); + } + } + + STBI_NOTUSED(req_comp); +} + +static stbi_uc *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi_uc *u = 0; + stbi__gif g; + memset(&g, 0, sizeof(g)); + + u = stbi__gif_load_next(s, &g, comp, req_comp); + if (u == (stbi_uc *) s) u = 0; // end of animated gif marker + if (u) { + *x = g.w; + *y = g.h; + if (req_comp && req_comp != 4) + u = stbi__convert_format(u, 4, req_comp, g.w, g.h); + } + else if (g.out) + STBI_FREE(g.out); + + return u; +} + +static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp) +{ + return stbi__gif_info_raw(s,x,y,comp); +} +#endif + +// ************************************************************************************************* +// Radiance RGBE HDR loader +// originally by Nicolas Schulz +#ifndef STBI_NO_HDR +static int stbi__hdr_test_core(stbi__context *s) +{ + const char *signature = "#?RADIANCE\n"; + int i; + for (i=0; signature[i]; ++i) + if (stbi__get8(s) != signature[i]) + return 0; + return 1; +} + +static int stbi__hdr_test(stbi__context* s) +{ + int r = stbi__hdr_test_core(s); + stbi__rewind(s); + return r; +} + +#define STBI__HDR_BUFLEN 1024 +static char *stbi__hdr_gettoken(stbi__context *z, char *buffer) +{ + int len=0; + char c = '\0'; + + c = (char) stbi__get8(z); + + while (!stbi__at_eof(z) && c != '\n') { + buffer[len++] = c; + if (len == STBI__HDR_BUFLEN-1) { + // flush to end of line + while (!stbi__at_eof(z) && stbi__get8(z) != '\n') + ; + break; + } + c = (char) stbi__get8(z); + } + + buffer[len] = 0; + return buffer; +} + +static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp) +{ + if ( input[3] != 0 ) { + float f1; + // Exponent + f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8)); + if (req_comp <= 2) + output[0] = (input[0] + input[1] + input[2]) * f1 / 3; + else { + output[0] = input[0] * f1; + output[1] = input[1] * f1; + output[2] = input[2] * f1; + } + if (req_comp == 2) output[1] = 1; + if (req_comp == 4) output[3] = 1; + } else { + switch (req_comp) { + case 4: output[3] = 1; /* fallthrough */ + case 3: output[0] = output[1] = output[2] = 0; + break; + case 2: output[1] = 1; /* fallthrough */ + case 1: output[0] = 0; + break; + } + } +} + +static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + int width, height; + stbi_uc *scanline; + float *hdr_data; + int len; + unsigned char count, value; + int i, j, k, c1,c2, z; + + + // Check identifier + if (strcmp(stbi__hdr_gettoken(s,buffer), "#?RADIANCE") != 0) + return stbi__errpf("not HDR", "Corrupt HDR image"); + + // Parse header + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format"); + + // Parse width and height + // can't use sscanf() if we're not using stdio! + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + height = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format"); + token += 3; + width = (int) strtol(token, NULL, 10); + + *x = width; + *y = height; + + if (comp) *comp = 3; + if (req_comp == 0) req_comp = 3; + + // Read data + hdr_data = (float *) stbi__malloc(height * width * req_comp * sizeof(float)); + + // Load image data + // image data is stored as some number of sca + if ( width < 8 || width >= 32768) { + // Read flat data + for (j=0; j < height; ++j) { + for (i=0; i < width; ++i) { + stbi_uc rgbe[4]; + main_decode_loop: + stbi__getn(s, rgbe, 4); + stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp); + } + } + } else { + // Read RLE-encoded data + scanline = NULL; + + for (j = 0; j < height; ++j) { + c1 = stbi__get8(s); + c2 = stbi__get8(s); + len = stbi__get8(s); + if (c1 != 2 || c2 != 2 || (len & 0x80)) { + // not run-length encoded, so we have to actually use THIS data as a decoded + // pixel (note this can't be a valid pixel--one of RGB must be >= 128) + stbi_uc rgbe[4]; + rgbe[0] = (stbi_uc) c1; + rgbe[1] = (stbi_uc) c2; + rgbe[2] = (stbi_uc) len; + rgbe[3] = (stbi_uc) stbi__get8(s); + stbi__hdr_convert(hdr_data, rgbe, req_comp); + i = 1; + j = 0; + STBI_FREE(scanline); + goto main_decode_loop; // yes, this makes no sense + } + len <<= 8; + len |= stbi__get8(s); + if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); } + if (scanline == NULL) scanline = (stbi_uc *) stbi__malloc(width * 4); + + for (k = 0; k < 4; ++k) { + i = 0; + while (i < width) { + count = stbi__get8(s); + if (count > 128) { + // Run + value = stbi__get8(s); + count -= 128; + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = value; + } else { + // Dump + for (z = 0; z < count; ++z) + scanline[i++ * 4 + k] = stbi__get8(s); + } + } + } + for (i=0; i < width; ++i) + stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp); + } + STBI_FREE(scanline); + } + + return hdr_data; +} + +static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp) +{ + char buffer[STBI__HDR_BUFLEN]; + char *token; + int valid = 0; + + if (stbi__hdr_test(s) == 0) { + stbi__rewind( s ); + return 0; + } + + for(;;) { + token = stbi__hdr_gettoken(s,buffer); + if (token[0] == 0) break; + if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1; + } + + if (!valid) { + stbi__rewind( s ); + return 0; + } + token = stbi__hdr_gettoken(s,buffer); + if (strncmp(token, "-Y ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *y = (int) strtol(token, &token, 10); + while (*token == ' ') ++token; + if (strncmp(token, "+X ", 3)) { + stbi__rewind( s ); + return 0; + } + token += 3; + *x = (int) strtol(token, NULL, 10); + *comp = 3; + return 1; +} +#endif // STBI_NO_HDR + +#ifndef STBI_NO_BMP +static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp) +{ + void *p; + stbi__bmp_data info; + + info.all_a = 255; + p = stbi__bmp_parse_header(s, &info); + stbi__rewind( s ); + if (p == NULL) + return 0; + *x = s->img_x; + *y = s->img_y; + *comp = info.ma ? 4 : 3; + return 1; +} +#endif + +#ifndef STBI_NO_PSD +static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp) +{ + int channelCount; + if (stbi__get32be(s) != 0x38425053) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 1) { + stbi__rewind( s ); + return 0; + } + stbi__skip(s, 6); + channelCount = stbi__get16be(s); + if (channelCount < 0 || channelCount > 16) { + stbi__rewind( s ); + return 0; + } + *y = stbi__get32be(s); + *x = stbi__get32be(s); + if (stbi__get16be(s) != 8) { + stbi__rewind( s ); + return 0; + } + if (stbi__get16be(s) != 3) { + stbi__rewind( s ); + return 0; + } + *comp = 4; + return 1; +} +#endif + +#ifndef STBI_NO_PIC +static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp) +{ + int act_comp=0,num_packets=0,chained; + stbi__pic_packet packets[10]; + + if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) { + stbi__rewind(s); + return 0; + } + + stbi__skip(s, 88); + + *x = stbi__get16be(s); + *y = stbi__get16be(s); + if (stbi__at_eof(s)) { + stbi__rewind( s); + return 0; + } + if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) { + stbi__rewind( s ); + return 0; + } + + stbi__skip(s, 8); + + do { + stbi__pic_packet *packet; + + if (num_packets==sizeof(packets)/sizeof(packets[0])) + return 0; + + packet = &packets[num_packets++]; + chained = stbi__get8(s); + packet->size = stbi__get8(s); + packet->type = stbi__get8(s); + packet->channel = stbi__get8(s); + act_comp |= packet->channel; + + if (stbi__at_eof(s)) { + stbi__rewind( s ); + return 0; + } + if (packet->size != 8) { + stbi__rewind( s ); + return 0; + } + } while (chained); + + *comp = (act_comp & 0x10 ? 4 : 3); + + return 1; +} +#endif + +// ************************************************************************************************* +// Portable Gray Map and Portable Pixel Map loader +// by Ken Miller +// +// PGM: http://netpbm.sourceforge.net/doc/pgm.html +// PPM: http://netpbm.sourceforge.net/doc/ppm.html +// +// Known limitations: +// Does not support comments in the header section +// Does not support ASCII image data (formats P2 and P3) +// Does not support 16-bit-per-channel + +#ifndef STBI_NO_PNM + +static int stbi__pnm_test(stbi__context *s) +{ + char p, t; + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + return 1; +} + +static stbi_uc *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp) +{ + stbi_uc *out; + if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n)) + return 0; + *x = s->img_x; + *y = s->img_y; + *comp = s->img_n; + + out = (stbi_uc *) stbi__malloc(s->img_n * s->img_x * s->img_y); + if (!out) return stbi__errpuc("outofmem", "Out of memory"); + stbi__getn(s, out, s->img_n * s->img_x * s->img_y); + + if (req_comp && req_comp != s->img_n) { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (out == NULL) return out; // stbi__convert_format frees input on failure + } + return out; +} + +static int stbi__pnm_isspace(char c) +{ + return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r'; +} + +static void stbi__pnm_skip_whitespace(stbi__context *s, char *c) +{ + for (;;) { + while (!stbi__at_eof(s) && stbi__pnm_isspace(*c)) + *c = (char) stbi__get8(s); + + if (stbi__at_eof(s) || *c != '#') + break; + + while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' ) + *c = (char) stbi__get8(s); + } +} + +static int stbi__pnm_isdigit(char c) +{ + return c >= '0' && c <= '9'; +} + +static int stbi__pnm_getinteger(stbi__context *s, char *c) +{ + int value = 0; + + while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { + value = value*10 + (*c - '0'); + *c = (char) stbi__get8(s); + } + + return value; +} + +static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) +{ + int maxv; + char c, p, t; + + stbi__rewind( s ); + + // Get identifier + p = (char) stbi__get8(s); + t = (char) stbi__get8(s); + if (p != 'P' || (t != '5' && t != '6')) { + stbi__rewind( s ); + return 0; + } + + *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm + + c = (char) stbi__get8(s); + stbi__pnm_skip_whitespace(s, &c); + + *x = stbi__pnm_getinteger(s, &c); // read width + stbi__pnm_skip_whitespace(s, &c); + + *y = stbi__pnm_getinteger(s, &c); // read height + stbi__pnm_skip_whitespace(s, &c); + + maxv = stbi__pnm_getinteger(s, &c); // read max value + + if (maxv > 255) + return stbi__err("max value > 255", "PPM image not 8-bit"); + else + return 1; +} +#endif + +static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp) +{ + #ifndef STBI_NO_JPEG + if (stbi__jpeg_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNG + if (stbi__png_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_GIF + if (stbi__gif_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_BMP + if (stbi__bmp_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PSD + if (stbi__psd_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PIC + if (stbi__pic_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_PNM + if (stbi__pnm_info(s, x, y, comp)) return 1; + #endif + + #ifndef STBI_NO_HDR + if (stbi__hdr_info(s, x, y, comp)) return 1; + #endif + + // test tga last because it's a crappy test! + #ifndef STBI_NO_TGA + if (stbi__tga_info(s, x, y, comp)) + return 1; + #endif + return stbi__err("unknown image type", "Image not of any known type, or corrupt"); +} + +#ifndef STBI_NO_STDIO +STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp) +{ + FILE *f = stbi__fopen(filename, "rb"); + int result; + if (!f) return stbi__err("can't fopen", "Unable to open file"); + result = stbi_info_from_file(f, x, y, comp); + fclose(f); + return result; +} + +STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp) +{ + int r; + stbi__context s; + long pos = ftell(f); + stbi__start_file(&s, f); + r = stbi__info_main(&s,x,y,comp); + fseek(f,pos,SEEK_SET); + return r; +} +#endif // !STBI_NO_STDIO + +STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_mem(&s,buffer,len); + return stbi__info_main(&s,x,y,comp); +} + +STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp) +{ + stbi__context s; + stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user); + return stbi__info_main(&s,x,y,comp); +} + +#endif // STB_IMAGE_IMPLEMENTATION + +/* + revision history: + 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED + 2.09 (2016-01-16) allow comments in PNM files + 16-bit-per-pixel TGA (not bit-per-component) + info() for TGA could break due to .hdr handling + info() for BMP to shares code instead of sloppy parse + can use STBI_REALLOC_SIZED if allocator doesn't support realloc + code cleanup + 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA + 2.07 (2015-09-13) fix compiler warnings + partial animated GIF support + limited 16-bpc PSD support + #ifdef unused functions + bug with < 92 byte PIC,PNM,HDR,TGA + 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value + 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning + 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit + 2.03 (2015-04-12) extra corruption checking (mmozeiko) + stbi_set_flip_vertically_on_load (nguillemot) + fix NEON support; fix mingw support + 2.02 (2015-01-19) fix incorrect assert, fix warning + 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2 + 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG + 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg) + progressive JPEG (stb) + PGM/PPM support (Ken Miller) + STBI_MALLOC,STBI_REALLOC,STBI_FREE + GIF bugfix -- seemingly never worked + STBI_NO_*, STBI_ONLY_* + 1.48 (2014-12-14) fix incorrectly-named assert() + 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb) + optimize PNG (ryg) + fix bug in interlaced PNG with user-specified channel count (stb) + 1.46 (2014-08-26) + fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG + 1.45 (2014-08-16) + fix MSVC-ARM internal compiler error by wrapping malloc + 1.44 (2014-08-07) + various warning fixes from Ronny Chevalier + 1.43 (2014-07-15) + fix MSVC-only compiler problem in code changed in 1.42 + 1.42 (2014-07-09) + don't define _CRT_SECURE_NO_WARNINGS (affects user code) + fixes to stbi__cleanup_jpeg path + added STBI_ASSERT to avoid requiring assert.h + 1.41 (2014-06-25) + fix search&replace from 1.36 that messed up comments/error messages + 1.40 (2014-06-22) + fix gcc struct-initialization warning + 1.39 (2014-06-15) + fix to TGA optimization when req_comp != number of components in TGA; + fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite) + add support for BMP version 5 (more ignored fields) + 1.38 (2014-06-06) + suppress MSVC warnings on integer casts truncating values + fix accidental rename of 'skip' field of I/O + 1.37 (2014-06-04) + remove duplicate typedef + 1.36 (2014-06-03) + convert to header file single-file library + if de-iphone isn't set, load iphone images color-swapped instead of returning NULL + 1.35 (2014-05-27) + various warnings + fix broken STBI_SIMD path + fix bug where stbi_load_from_file no longer left file pointer in correct place + fix broken non-easy path for 32-bit BMP (possibly never used) + TGA optimization by Arseny Kapoulkine + 1.34 (unknown) + use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case + 1.33 (2011-07-14) + make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements + 1.32 (2011-07-13) + support for "info" function for all supported filetypes (SpartanJ) + 1.31 (2011-06-20) + a few more leak fixes, bug in PNG handling (SpartanJ) + 1.30 (2011-06-11) + added ability to load files via callbacks to accomidate custom input streams (Ben Wenger) + removed deprecated format-specific test/load functions + removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway + error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha) + fix inefficiency in decoding 32-bit BMP (David Woo) + 1.29 (2010-08-16) + various warning fixes from Aurelien Pocheville + 1.28 (2010-08-01) + fix bug in GIF palette transparency (SpartanJ) + 1.27 (2010-08-01) + cast-to-stbi_uc to fix warnings + 1.26 (2010-07-24) + fix bug in file buffering for PNG reported by SpartanJ + 1.25 (2010-07-17) + refix trans_data warning (Won Chun) + 1.24 (2010-07-12) + perf improvements reading from files on platforms with lock-heavy fgetc() + minor perf improvements for jpeg + deprecated type-specific functions so we'll get feedback if they're needed + attempt to fix trans_data warning (Won Chun) + 1.23 fixed bug in iPhone support + 1.22 (2010-07-10) + removed image *writing* support + stbi_info support from Jetro Lauha + GIF support from Jean-Marc Lienher + iPhone PNG-extensions from James Brown + warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva) + 1.21 fix use of 'stbi_uc' in header (reported by jon blow) + 1.20 added support for Softimage PIC, by Tom Seddon + 1.19 bug in interlaced PNG corruption check (found by ryg) + 1.18 (2008-08-02) + fix a threading bug (local mutable static) + 1.17 support interlaced PNG + 1.16 major bugfix - stbi__convert_format converted one too many pixels + 1.15 initialize some fields for thread safety + 1.14 fix threadsafe conversion bug + header-file-only version (#define STBI_HEADER_FILE_ONLY before including) + 1.13 threadsafe + 1.12 const qualifiers in the API + 1.11 Support installable IDCT, colorspace conversion routines + 1.10 Fixes for 64-bit (don't use "unsigned long") + optimized upsampling by Fabian "ryg" Giesen + 1.09 Fix format-conversion for PSD code (bad global variables!) + 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz + 1.07 attempt to fix C++ warning/errors again + 1.06 attempt to fix C++ warning/errors again + 1.05 fix TGA loading to return correct *comp and use good luminance calc + 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free + 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR + 1.02 support for (subset of) HDR files, float interface for preferred access to them + 1.01 fix bug: possible bug in handling right-side up bmps... not sure + fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all + 1.00 interface to zlib that skips zlib header + 0.99 correct handling of alpha in palette + 0.98 TGA loader by lonesock; dynamically add loaders (untested) + 0.97 jpeg errors on too large a file; also catch another malloc failure + 0.96 fix detection of invalid v value - particleman@mollyrocket forum + 0.95 during header scan, seek to markers in case of padding + 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same + 0.93 handle jpegtran output; verbose errors + 0.92 read 4,8,16,24,32-bit BMP files of several formats + 0.91 output 24-bit Windows 3.0 BMP files + 0.90 fix a few more warnings; bump version number to approach 1.0 + 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd + 0.60 fix compiling as c++ + 0.59 fix warnings: merge Dave Moore's -Wall fixes + 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian + 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available + 0.56 fix bug: zlib uncompressed mode len vs. nlen + 0.55 fix bug: restart_interval not initialized to 0 + 0.54 allow NULL for 'int *comp' + 0.53 fix bug in png 3->4; speedup png decoding + 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments + 0.51 obey req_comp requests, 1-component jpegs return as 1-component, + on 'test' only check type, not whether we support this variant + 0.50 (2006-11-19) + first released version +*/ diff --git a/troposphere/daybreak/nanovg/include/nanovg/stb_truetype.h b/troposphere/daybreak/nanovg/include/nanovg/stb_truetype.h new file mode 100644 index 000000000..52e1c9461 --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg/stb_truetype.h @@ -0,0 +1,5011 @@ +// stb_truetype.h - v1.24 - public domain +// authored from 2009-2020 by Sean Barrett / RAD Game Tools +// +// ======================================================================= +// +// NO SECURITY GUARANTEE -- DO NOT USE THIS ON UNTRUSTED FONT FILES +// +// This library does no range checking of the offsets found in the file, +// meaning an attacker can use it to read arbitrary memory. +// +// ======================================================================= +// +// This library processes TrueType files: +// parse files +// extract glyph metrics +// extract glyph shapes +// render glyphs to one-channel bitmaps with antialiasing (box filter) +// render glyphs to one-channel SDF bitmaps (signed-distance field/function) +// +// Todo: +// non-MS cmaps +// crashproof on bad data +// hinting? (no longer patented) +// cleartype-style AA? +// optimize: use simple memory allocator for intermediates +// optimize: build edge-list directly from curves +// optimize: rasterize directly from curves? +// +// ADDITIONAL CONTRIBUTORS +// +// Mikko Mononen: compound shape support, more cmap formats +// Tor Andersson: kerning, subpixel rendering +// Dougall Johnson: OpenType / Type 2 font handling +// Daniel Ribeiro Maciel: basic GPOS-based kerning +// +// Misc other: +// Ryan Gordon +// Simon Glass +// github:IntellectualKitty +// Imanol Celaya +// Daniel Ribeiro Maciel +// +// Bug/warning reports/fixes: +// "Zer" on mollyrocket Fabian "ryg" Giesen github:NiLuJe +// Cass Everitt Martins Mozeiko github:aloucks +// stoiko (Haemimont Games) Cap Petschulat github:oyvindjam +// Brian Hook Omar Cornut github:vassvik +// Walter van Niftrik Ryan Griege +// David Gow Peter LaValle +// David Given Sergey Popov +// Ivan-Assen Ivanov Giumo X. Clanjor +// Anthony Pesch Higor Euripedes +// Johan Duparc Thomas Fields +// Hou Qiming Derek Vinyard +// Rob Loach Cort Stratton +// Kenney Phillis Jr. Brian Costabile +// Ken Voskuil (kaesve) +// +// VERSION HISTORY +// +// 1.24 (2020-02-05) fix warning +// 1.23 (2020-02-02) query SVG data for glyphs; query whole kerning table (but only kern not GPOS) +// 1.22 (2019-08-11) minimize missing-glyph duplication; fix kerning if both 'GPOS' and 'kern' are defined +// 1.21 (2019-02-25) fix warning +// 1.20 (2019-02-07) PackFontRange skips missing codepoints; GetScaleFontVMetrics() +// 1.19 (2018-02-11) GPOS kerning, STBTT_fmod +// 1.18 (2018-01-29) add missing function +// 1.17 (2017-07-23) make more arguments const; doc fix +// 1.16 (2017-07-12) SDF support +// 1.15 (2017-03-03) make more arguments const +// 1.14 (2017-01-16) num-fonts-in-TTC function +// 1.13 (2017-01-02) support OpenType fonts, certain Apple fonts +// 1.12 (2016-10-25) suppress warnings about casting away const with -Wcast-qual +// 1.11 (2016-04-02) fix unused-variable warning +// 1.10 (2016-04-02) user-defined fabs(); rare memory leak; remove duplicate typedef +// 1.09 (2016-01-16) warning fix; avoid crash on outofmem; use allocation userdata properly +// 1.08 (2015-09-13) document stbtt_Rasterize(); fixes for vertical & horizontal edges +// 1.07 (2015-08-01) allow PackFontRanges to accept arrays of sparse codepoints; +// variant PackFontRanges to pack and render in separate phases; +// fix stbtt_GetFontOFfsetForIndex (never worked for non-0 input?); +// fixed an assert() bug in the new rasterizer +// replace assert() with STBTT_assert() in new rasterizer +// +// Full history can be found at the end of this file. +// +// LICENSE +// +// See end of file for license information. +// +// USAGE +// +// Include this file in whatever places need to refer to it. In ONE C/C++ +// file, write: +// #define STB_TRUETYPE_IMPLEMENTATION +// before the #include of this file. This expands out the actual +// implementation into that C/C++ file. +// +// To make the implementation private to the file that generates the implementation, +// #define STBTT_STATIC +// +// Simple 3D API (don't ship this, but it's fine for tools and quick start) +// stbtt_BakeFontBitmap() -- bake a font to a bitmap for use as texture +// stbtt_GetBakedQuad() -- compute quad to draw for a given char +// +// Improved 3D API (more shippable): +// #include "stb_rect_pack.h" -- optional, but you really want it +// stbtt_PackBegin() +// stbtt_PackSetOversampling() -- for improved quality on small fonts +// stbtt_PackFontRanges() -- pack and renders +// stbtt_PackEnd() +// stbtt_GetPackedQuad() +// +// "Load" a font file from a memory buffer (you have to keep the buffer loaded) +// stbtt_InitFont() +// stbtt_GetFontOffsetForIndex() -- indexing for TTC font collections +// stbtt_GetNumberOfFonts() -- number of fonts for TTC font collections +// +// Render a unicode codepoint to a bitmap +// stbtt_GetCodepointBitmap() -- allocates and returns a bitmap +// stbtt_MakeCodepointBitmap() -- renders into bitmap you provide +// stbtt_GetCodepointBitmapBox() -- how big the bitmap must be +// +// Character advance/positioning +// stbtt_GetCodepointHMetrics() +// stbtt_GetFontVMetrics() +// stbtt_GetFontVMetricsOS2() +// stbtt_GetCodepointKernAdvance() +// +// Starting with version 1.06, the rasterizer was replaced with a new, +// faster and generally-more-precise rasterizer. The new rasterizer more +// accurately measures pixel coverage for anti-aliasing, except in the case +// where multiple shapes overlap, in which case it overestimates the AA pixel +// coverage. Thus, anti-aliasing of intersecting shapes may look wrong. If +// this turns out to be a problem, you can re-enable the old rasterizer with +// #define STBTT_RASTERIZER_VERSION 1 +// which will incur about a 15% speed hit. +// +// ADDITIONAL DOCUMENTATION +// +// Immediately after this block comment are a series of sample programs. +// +// After the sample programs is the "header file" section. This section +// includes documentation for each API function. +// +// Some important concepts to understand to use this library: +// +// Codepoint +// Characters are defined by unicode codepoints, e.g. 65 is +// uppercase A, 231 is lowercase c with a cedilla, 0x7e30 is +// the hiragana for "ma". +// +// Glyph +// A visual character shape (every codepoint is rendered as +// some glyph) +// +// Glyph index +// A font-specific integer ID representing a glyph +// +// Baseline +// Glyph shapes are defined relative to a baseline, which is the +// bottom of uppercase characters. Characters extend both above +// and below the baseline. +// +// Current Point +// As you draw text to the screen, you keep track of a "current point" +// which is the origin of each character. The current point's vertical +// position is the baseline. Even "baked fonts" use this model. +// +// Vertical Font Metrics +// The vertical qualities of the font, used to vertically position +// and space the characters. See docs for stbtt_GetFontVMetrics. +// +// Font Size in Pixels or Points +// The preferred interface for specifying font sizes in stb_truetype +// is to specify how tall the font's vertical extent should be in pixels. +// If that sounds good enough, skip the next paragraph. +// +// Most font APIs instead use "points", which are a common typographic +// measurement for describing font size, defined as 72 points per inch. +// stb_truetype provides a point API for compatibility. However, true +// "per inch" conventions don't make much sense on computer displays +// since different monitors have different number of pixels per +// inch. For example, Windows traditionally uses a convention that +// there are 96 pixels per inch, thus making 'inch' measurements have +// nothing to do with inches, and thus effectively defining a point to +// be 1.333 pixels. Additionally, the TrueType font data provides +// an explicit scale factor to scale a given font's glyphs to points, +// but the author has observed that this scale factor is often wrong +// for non-commercial fonts, thus making fonts scaled in points +// according to the TrueType spec incoherently sized in practice. +// +// DETAILED USAGE: +// +// Scale: +// Select how high you want the font to be, in points or pixels. +// Call ScaleForPixelHeight or ScaleForMappingEmToPixels to compute +// a scale factor SF that will be used by all other functions. +// +// Baseline: +// You need to select a y-coordinate that is the baseline of where +// your text will appear. Call GetFontBoundingBox to get the baseline-relative +// bounding box for all characters. SF*-y0 will be the distance in pixels +// that the worst-case character could extend above the baseline, so if +// you want the top edge of characters to appear at the top of the +// screen where y=0, then you would set the baseline to SF*-y0. +// +// Current point: +// Set the current point where the first character will appear. The +// first character could extend left of the current point; this is font +// dependent. You can either choose a current point that is the leftmost +// point and hope, or add some padding, or check the bounding box or +// left-side-bearing of the first character to be displayed and set +// the current point based on that. +// +// Displaying a character: +// Compute the bounding box of the character. It will contain signed values +// relative to . I.e. if it returns x0,y0,x1,y1, +// then the character should be displayed in the rectangle from +// to = 32 && *text < 128) { + stbtt_aligned_quad q; + stbtt_GetBakedQuad(cdata, 512,512, *text-32, &x,&y,&q,1);//1=opengl & d3d10+,0=d3d9 + glTexCoord2f(q.s0,q.t1); glVertex2f(q.x0,q.y0); + glTexCoord2f(q.s1,q.t1); glVertex2f(q.x1,q.y0); + glTexCoord2f(q.s1,q.t0); glVertex2f(q.x1,q.y1); + glTexCoord2f(q.s0,q.t0); glVertex2f(q.x0,q.y1); + } + ++text; + } + glEnd(); +} +#endif +// +// +////////////////////////////////////////////////////////////////////////////// +// +// Complete program (this compiles): get a single bitmap, print as ASCII art +// +#if 0 +#include +#define STB_TRUETYPE_IMPLEMENTATION // force following include to generate implementation +#include "stb_truetype.h" + +char ttf_buffer[1<<25]; + +int main(int argc, char **argv) +{ + stbtt_fontinfo font; + unsigned char *bitmap; + int w,h,i,j,c = (argc > 1 ? atoi(argv[1]) : 'a'), s = (argc > 2 ? atoi(argv[2]) : 20); + + fread(ttf_buffer, 1, 1<<25, fopen(argc > 3 ? argv[3] : "c:/windows/fonts/arialbd.ttf", "rb")); + + stbtt_InitFont(&font, ttf_buffer, stbtt_GetFontOffsetForIndex(ttf_buffer,0)); + bitmap = stbtt_GetCodepointBitmap(&font, 0,stbtt_ScaleForPixelHeight(&font, s), c, &w, &h, 0,0); + + for (j=0; j < h; ++j) { + for (i=0; i < w; ++i) + putchar(" .:ioVM@"[bitmap[j*w+i]>>5]); + putchar('\n'); + } + return 0; +} +#endif +// +// Output: +// +// .ii. +// @@@@@@. +// V@Mio@@o +// :i. V@V +// :oM@@M +// :@@@MM@M +// @@o o@M +// :@@. M@M +// @@@o@@@@ +// :M@@V:@@. +// +////////////////////////////////////////////////////////////////////////////// +// +// Complete program: print "Hello World!" banner, with bugs +// +#if 0 +char buffer[24<<20]; +unsigned char screen[20][79]; + +int main(int arg, char **argv) +{ + stbtt_fontinfo font; + int i,j,ascent,baseline,ch=0; + float scale, xpos=2; // leave a little padding in case the character extends left + char *text = "Heljo World!"; // intentionally misspelled to show 'lj' brokenness + + fread(buffer, 1, 1000000, fopen("c:/windows/fonts/arialbd.ttf", "rb")); + stbtt_InitFont(&font, buffer, 0); + + scale = stbtt_ScaleForPixelHeight(&font, 15); + stbtt_GetFontVMetrics(&font, &ascent,0,0); + baseline = (int) (ascent*scale); + + while (text[ch]) { + int advance,lsb,x0,y0,x1,y1; + float x_shift = xpos - (float) floor(xpos); + stbtt_GetCodepointHMetrics(&font, text[ch], &advance, &lsb); + stbtt_GetCodepointBitmapBoxSubpixel(&font, text[ch], scale,scale,x_shift,0, &x0,&y0,&x1,&y1); + stbtt_MakeCodepointBitmapSubpixel(&font, &screen[baseline + y0][(int) xpos + x0], x1-x0,y1-y0, 79, scale,scale,x_shift,0, text[ch]); + // note that this stomps the old data, so where character boxes overlap (e.g. 'lj') it's wrong + // because this API is really for baking character bitmaps into textures. if you want to render + // a sequence of characters, you really need to render each bitmap to a temp buffer, then + // "alpha blend" that into the working buffer + xpos += (advance * scale); + if (text[ch+1]) + xpos += scale*stbtt_GetCodepointKernAdvance(&font, text[ch],text[ch+1]); + ++ch; + } + + for (j=0; j < 20; ++j) { + for (i=0; i < 78; ++i) + putchar(" .:ioVM@"[screen[j][i]>>5]); + putchar('\n'); + } + + return 0; +} +#endif + + +////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////// +//// +//// INTEGRATION WITH YOUR CODEBASE +//// +//// The following sections allow you to supply alternate definitions +//// of C library functions used by stb_truetype, e.g. if you don't +//// link with the C runtime library. + +#ifdef STB_TRUETYPE_IMPLEMENTATION + // #define your own (u)stbtt_int8/16/32 before including to override this + #ifndef stbtt_uint8 + typedef unsigned char stbtt_uint8; + typedef signed char stbtt_int8; + typedef unsigned short stbtt_uint16; + typedef signed short stbtt_int16; + typedef unsigned int stbtt_uint32; + typedef signed int stbtt_int32; + #endif + + typedef char stbtt__check_size32[sizeof(stbtt_int32)==4 ? 1 : -1]; + typedef char stbtt__check_size16[sizeof(stbtt_int16)==2 ? 1 : -1]; + + // e.g. #define your own STBTT_ifloor/STBTT_iceil() to avoid math.h + #ifndef STBTT_ifloor + #include + #define STBTT_ifloor(x) ((int) floor(x)) + #define STBTT_iceil(x) ((int) ceil(x)) + #endif + + #ifndef STBTT_sqrt + #include + #define STBTT_sqrt(x) sqrt(x) + #define STBTT_pow(x,y) pow(x,y) + #endif + + #ifndef STBTT_fmod + #include + #define STBTT_fmod(x,y) fmod(x,y) + #endif + + #ifndef STBTT_cos + #include + #define STBTT_cos(x) cos(x) + #define STBTT_acos(x) acos(x) + #endif + + #ifndef STBTT_fabs + #include + #define STBTT_fabs(x) fabs(x) + #endif + + // #define your own functions "STBTT_malloc" / "STBTT_free" to avoid malloc.h + #ifndef STBTT_malloc + #include + #define STBTT_malloc(x,u) ((void)(u),malloc(x)) + #define STBTT_free(x,u) ((void)(u),free(x)) + #endif + + #ifndef STBTT_assert + #include + #define STBTT_assert(x) assert(x) + #endif + + #ifndef STBTT_strlen + #include + #define STBTT_strlen(x) strlen(x) + #endif + + #ifndef STBTT_memcpy + #include + #define STBTT_memcpy memcpy + #define STBTT_memset memset + #endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// +//// +//// INTERFACE +//// +//// + +#ifndef __STB_INCLUDE_STB_TRUETYPE_H__ +#define __STB_INCLUDE_STB_TRUETYPE_H__ + +#ifdef STBTT_STATIC +#define STBTT_DEF static +#else +#define STBTT_DEF extern +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +// private structure +typedef struct +{ + unsigned char *data; + int cursor; + int size; +} stbtt__buf; + +////////////////////////////////////////////////////////////////////////////// +// +// TEXTURE BAKING API +// +// If you use this API, you only have to call two functions ever. +// + +typedef struct +{ + unsigned short x0,y0,x1,y1; // coordinates of bbox in bitmap + float xoff,yoff,xadvance; +} stbtt_bakedchar; + +STBTT_DEF int stbtt_BakeFontBitmap(const unsigned char *data, int offset, // font location (use offset=0 for plain .ttf) + float pixel_height, // height of font in pixels + unsigned char *pixels, int pw, int ph, // bitmap to be filled in + int first_char, int num_chars, // characters to bake + stbtt_bakedchar *chardata); // you allocate this, it's num_chars long +// if return is positive, the first unused row of the bitmap +// if return is negative, returns the negative of the number of characters that fit +// if return is 0, no characters fit and no rows were used +// This uses a very crappy packing. + +typedef struct +{ + float x0,y0,s0,t0; // top-left + float x1,y1,s1,t1; // bottom-right +} stbtt_aligned_quad; + +STBTT_DEF void stbtt_GetBakedQuad(const stbtt_bakedchar *chardata, int pw, int ph, // same data as above + int char_index, // character to display + float *xpos, float *ypos, // pointers to current position in screen pixel space + stbtt_aligned_quad *q, // output: quad to draw + int opengl_fillrule); // true if opengl fill rule; false if DX9 or earlier +// Call GetBakedQuad with char_index = 'character - first_char', and it +// creates the quad you need to draw and advances the current position. +// +// The coordinate system used assumes y increases downwards. +// +// Characters will extend both above and below the current position; +// see discussion of "BASELINE" above. +// +// It's inefficient; you might want to c&p it and optimize it. + +STBTT_DEF void stbtt_GetScaledFontVMetrics(const unsigned char *fontdata, int index, float size, float *ascent, float *descent, float *lineGap); +// Query the font vertical metrics without having to create a font first. + + +////////////////////////////////////////////////////////////////////////////// +// +// NEW TEXTURE BAKING API +// +// This provides options for packing multiple fonts into one atlas, not +// perfectly but better than nothing. + +typedef struct +{ + unsigned short x0,y0,x1,y1; // coordinates of bbox in bitmap + float xoff,yoff,xadvance; + float xoff2,yoff2; +} stbtt_packedchar; + +typedef struct stbtt_pack_context stbtt_pack_context; +typedef struct stbtt_fontinfo stbtt_fontinfo; +#ifndef STB_RECT_PACK_VERSION +typedef struct stbrp_rect stbrp_rect; +#endif + +STBTT_DEF int stbtt_PackBegin(stbtt_pack_context *spc, unsigned char *pixels, int width, int height, int stride_in_bytes, int padding, void *alloc_context); +// Initializes a packing context stored in the passed-in stbtt_pack_context. +// Future calls using this context will pack characters into the bitmap passed +// in here: a 1-channel bitmap that is width * height. stride_in_bytes is +// the distance from one row to the next (or 0 to mean they are packed tightly +// together). "padding" is the amount of padding to leave between each +// character (normally you want '1' for bitmaps you'll use as textures with +// bilinear filtering). +// +// Returns 0 on failure, 1 on success. + +STBTT_DEF void stbtt_PackEnd (stbtt_pack_context *spc); +// Cleans up the packing context and frees all memory. + +#define STBTT_POINT_SIZE(x) (-(x)) + +STBTT_DEF int stbtt_PackFontRange(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, float font_size, + int first_unicode_char_in_range, int num_chars_in_range, stbtt_packedchar *chardata_for_range); +// Creates character bitmaps from the font_index'th font found in fontdata (use +// font_index=0 if you don't know what that is). It creates num_chars_in_range +// bitmaps for characters with unicode values starting at first_unicode_char_in_range +// and increasing. Data for how to render them is stored in chardata_for_range; +// pass these to stbtt_GetPackedQuad to get back renderable quads. +// +// font_size is the full height of the character from ascender to descender, +// as computed by stbtt_ScaleForPixelHeight. To use a point size as computed +// by stbtt_ScaleForMappingEmToPixels, wrap the point size in STBTT_POINT_SIZE() +// and pass that result as 'font_size': +// ..., 20 , ... // font max minus min y is 20 pixels tall +// ..., STBTT_POINT_SIZE(20), ... // 'M' is 20 pixels tall + +typedef struct +{ + float font_size; + int first_unicode_codepoint_in_range; // if non-zero, then the chars are continuous, and this is the first codepoint + int *array_of_unicode_codepoints; // if non-zero, then this is an array of unicode codepoints + int num_chars; + stbtt_packedchar *chardata_for_range; // output + unsigned char h_oversample, v_oversample; // don't set these, they're used internally +} stbtt_pack_range; + +STBTT_DEF int stbtt_PackFontRanges(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, stbtt_pack_range *ranges, int num_ranges); +// Creates character bitmaps from multiple ranges of characters stored in +// ranges. This will usually create a better-packed bitmap than multiple +// calls to stbtt_PackFontRange. Note that you can call this multiple +// times within a single PackBegin/PackEnd. + +STBTT_DEF void stbtt_PackSetOversampling(stbtt_pack_context *spc, unsigned int h_oversample, unsigned int v_oversample); +// Oversampling a font increases the quality by allowing higher-quality subpixel +// positioning, and is especially valuable at smaller text sizes. +// +// This function sets the amount of oversampling for all following calls to +// stbtt_PackFontRange(s) or stbtt_PackFontRangesGatherRects for a given +// pack context. The default (no oversampling) is achieved by h_oversample=1 +// and v_oversample=1. The total number of pixels required is +// h_oversample*v_oversample larger than the default; for example, 2x2 +// oversampling requires 4x the storage of 1x1. For best results, render +// oversampled textures with bilinear filtering. Look at the readme in +// stb/tests/oversample for information about oversampled fonts +// +// To use with PackFontRangesGather etc., you must set it before calls +// call to PackFontRangesGatherRects. + +STBTT_DEF void stbtt_PackSetSkipMissingCodepoints(stbtt_pack_context *spc, int skip); +// If skip != 0, this tells stb_truetype to skip any codepoints for which +// there is no corresponding glyph. If skip=0, which is the default, then +// codepoints without a glyph recived the font's "missing character" glyph, +// typically an empty box by convention. + +STBTT_DEF void stbtt_GetPackedQuad(const stbtt_packedchar *chardata, int pw, int ph, // same data as above + int char_index, // character to display + float *xpos, float *ypos, // pointers to current position in screen pixel space + stbtt_aligned_quad *q, // output: quad to draw + int align_to_integer); + +STBTT_DEF int stbtt_PackFontRangesGatherRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects); +STBTT_DEF void stbtt_PackFontRangesPackRects(stbtt_pack_context *spc, stbrp_rect *rects, int num_rects); +STBTT_DEF int stbtt_PackFontRangesRenderIntoRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects); +// Calling these functions in sequence is roughly equivalent to calling +// stbtt_PackFontRanges(). If you more control over the packing of multiple +// fonts, or if you want to pack custom data into a font texture, take a look +// at the source to of stbtt_PackFontRanges() and create a custom version +// using these functions, e.g. call GatherRects multiple times, +// building up a single array of rects, then call PackRects once, +// then call RenderIntoRects repeatedly. This may result in a +// better packing than calling PackFontRanges multiple times +// (or it may not). + +// this is an opaque structure that you shouldn't mess with which holds +// all the context needed from PackBegin to PackEnd. +struct stbtt_pack_context { + void *user_allocator_context; + void *pack_info; + int width; + int height; + int stride_in_bytes; + int padding; + int skip_missing; + unsigned int h_oversample, v_oversample; + unsigned char *pixels; + void *nodes; +}; + +////////////////////////////////////////////////////////////////////////////// +// +// FONT LOADING +// +// + +STBTT_DEF int stbtt_GetNumberOfFonts(const unsigned char *data); +// This function will determine the number of fonts in a font file. TrueType +// collection (.ttc) files may contain multiple fonts, while TrueType font +// (.ttf) files only contain one font. The number of fonts can be used for +// indexing with the previous function where the index is between zero and one +// less than the total fonts. If an error occurs, -1 is returned. + +STBTT_DEF int stbtt_GetFontOffsetForIndex(const unsigned char *data, int index); +// Each .ttf/.ttc file may have more than one font. Each font has a sequential +// index number starting from 0. Call this function to get the font offset for +// a given index; it returns -1 if the index is out of range. A regular .ttf +// file will only define one font and it always be at offset 0, so it will +// return '0' for index 0, and -1 for all other indices. + +// The following structure is defined publicly so you can declare one on +// the stack or as a global or etc, but you should treat it as opaque. +struct stbtt_fontinfo +{ + void * userdata; + unsigned char * data; // pointer to .ttf file + int fontstart; // offset of start of font + + int numGlyphs; // number of glyphs, needed for range checking + + int loca,head,glyf,hhea,hmtx,kern,gpos,svg; // table locations as offset from start of .ttf + int index_map; // a cmap mapping for our chosen character encoding + int indexToLocFormat; // format needed to map from glyph index to glyph + + stbtt__buf cff; // cff font data + stbtt__buf charstrings; // the charstring index + stbtt__buf gsubrs; // global charstring subroutines index + stbtt__buf subrs; // private charstring subroutines index + stbtt__buf fontdicts; // array of font dicts + stbtt__buf fdselect; // map from glyph to fontdict +}; + +STBTT_DEF int stbtt_InitFont(stbtt_fontinfo *info, const unsigned char *data, int offset); +// Given an offset into the file that defines a font, this function builds +// the necessary cached info for the rest of the system. You must allocate +// the stbtt_fontinfo yourself, and stbtt_InitFont will fill it out. You don't +// need to do anything special to free it, because the contents are pure +// value data with no additional data structures. Returns 0 on failure. + + +////////////////////////////////////////////////////////////////////////////// +// +// CHARACTER TO GLYPH-INDEX CONVERSIOn + +STBTT_DEF int stbtt_FindGlyphIndex(const stbtt_fontinfo *info, int unicode_codepoint); +// If you're going to perform multiple operations on the same character +// and you want a speed-up, call this function with the character you're +// going to process, then use glyph-based functions instead of the +// codepoint-based functions. +// Returns 0 if the character codepoint is not defined in the font. + + +////////////////////////////////////////////////////////////////////////////// +// +// CHARACTER PROPERTIES +// + +STBTT_DEF float stbtt_ScaleForPixelHeight(const stbtt_fontinfo *info, float pixels); +// computes a scale factor to produce a font whose "height" is 'pixels' tall. +// Height is measured as the distance from the highest ascender to the lowest +// descender; in other words, it's equivalent to calling stbtt_GetFontVMetrics +// and computing: +// scale = pixels / (ascent - descent) +// so if you prefer to measure height by the ascent only, use a similar calculation. + +STBTT_DEF float stbtt_ScaleForMappingEmToPixels(const stbtt_fontinfo *info, float pixels); +// computes a scale factor to produce a font whose EM size is mapped to +// 'pixels' tall. This is probably what traditional APIs compute, but +// I'm not positive. + +STBTT_DEF void stbtt_GetFontVMetrics(const stbtt_fontinfo *info, int *ascent, int *descent, int *lineGap); +// ascent is the coordinate above the baseline the font extends; descent +// is the coordinate below the baseline the font extends (i.e. it is typically negative) +// lineGap is the spacing between one row's descent and the next row's ascent... +// so you should advance the vertical position by "*ascent - *descent + *lineGap" +// these are expressed in unscaled coordinates, so you must multiply by +// the scale factor for a given size + +STBTT_DEF int stbtt_GetFontVMetricsOS2(const stbtt_fontinfo *info, int *typoAscent, int *typoDescent, int *typoLineGap); +// analogous to GetFontVMetrics, but returns the "typographic" values from the OS/2 +// table (specific to MS/Windows TTF files). +// +// Returns 1 on success (table present), 0 on failure. + +STBTT_DEF void stbtt_GetFontBoundingBox(const stbtt_fontinfo *info, int *x0, int *y0, int *x1, int *y1); +// the bounding box around all possible characters + +STBTT_DEF void stbtt_GetCodepointHMetrics(const stbtt_fontinfo *info, int codepoint, int *advanceWidth, int *leftSideBearing); +// leftSideBearing is the offset from the current horizontal position to the left edge of the character +// advanceWidth is the offset from the current horizontal position to the next horizontal position +// these are expressed in unscaled coordinates + +STBTT_DEF int stbtt_GetCodepointKernAdvance(const stbtt_fontinfo *info, int ch1, int ch2); +// an additional amount to add to the 'advance' value between ch1 and ch2 + +STBTT_DEF int stbtt_GetCodepointBox(const stbtt_fontinfo *info, int codepoint, int *x0, int *y0, int *x1, int *y1); +// Gets the bounding box of the visible part of the glyph, in unscaled coordinates + +STBTT_DEF void stbtt_GetGlyphHMetrics(const stbtt_fontinfo *info, int glyph_index, int *advanceWidth, int *leftSideBearing); +STBTT_DEF int stbtt_GetGlyphKernAdvance(const stbtt_fontinfo *info, int glyph1, int glyph2); +STBTT_DEF int stbtt_GetGlyphBox(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1); +// as above, but takes one or more glyph indices for greater efficiency + +typedef struct stbtt_kerningentry +{ + int glyph1; // use stbtt_FindGlyphIndex + int glyph2; + int advance; +} stbtt_kerningentry; + +STBTT_DEF int stbtt_GetKerningTableLength(const stbtt_fontinfo *info); +STBTT_DEF int stbtt_GetKerningTable(const stbtt_fontinfo *info, stbtt_kerningentry* table, int table_length); +// Retrieves a complete list of all of the kerning pairs provided by the font +// stbtt_GetKerningTable never writes more than table_length entries and returns how many entries it did write. +// The table will be sorted by (a.glyph1 == b.glyph1)?(a.glyph2 < b.glyph2):(a.glyph1 < b.glyph1) + +////////////////////////////////////////////////////////////////////////////// +// +// GLYPH SHAPES (you probably don't need these, but they have to go before +// the bitmaps for C declaration-order reasons) +// + +#ifndef STBTT_vmove // you can predefine these to use different values (but why?) + enum { + STBTT_vmove=1, + STBTT_vline, + STBTT_vcurve, + STBTT_vcubic + }; +#endif + +#ifndef stbtt_vertex // you can predefine this to use different values + // (we share this with other code at RAD) + #define stbtt_vertex_type short // can't use stbtt_int16 because that's not visible in the header file + typedef struct + { + stbtt_vertex_type x,y,cx,cy,cx1,cy1; + unsigned char type,padding; + } stbtt_vertex; +#endif + +STBTT_DEF int stbtt_IsGlyphEmpty(const stbtt_fontinfo *info, int glyph_index); +// returns non-zero if nothing is drawn for this glyph + +STBTT_DEF int stbtt_GetCodepointShape(const stbtt_fontinfo *info, int unicode_codepoint, stbtt_vertex **vertices); +STBTT_DEF int stbtt_GetGlyphShape(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **vertices); +// returns # of vertices and fills *vertices with the pointer to them +// these are expressed in "unscaled" coordinates +// +// The shape is a series of contours. Each one starts with +// a STBTT_moveto, then consists of a series of mixed +// STBTT_lineto and STBTT_curveto segments. A lineto +// draws a line from previous endpoint to its x,y; a curveto +// draws a quadratic bezier from previous endpoint to +// its x,y, using cx,cy as the bezier control point. + +STBTT_DEF void stbtt_FreeShape(const stbtt_fontinfo *info, stbtt_vertex *vertices); +// frees the data allocated above + +STBTT_DEF int stbtt_GetCodepointSVG(const stbtt_fontinfo *info, int unicode_codepoint, const char **svg); +STBTT_DEF int stbtt_GetGlyphSVG(const stbtt_fontinfo *info, int gl, const char **svg); +// fills svg with the character's SVG data. +// returns data size or 0 if SVG not found. + +////////////////////////////////////////////////////////////////////////////// +// +// BITMAP RENDERING +// + +STBTT_DEF void stbtt_FreeBitmap(unsigned char *bitmap, void *userdata); +// frees the bitmap allocated below + +STBTT_DEF unsigned char *stbtt_GetCodepointBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int codepoint, int *width, int *height, int *xoff, int *yoff); +// allocates a large-enough single-channel 8bpp bitmap and renders the +// specified character/glyph at the specified scale into it, with +// antialiasing. 0 is no coverage (transparent), 255 is fully covered (opaque). +// *width & *height are filled out with the width & height of the bitmap, +// which is stored left-to-right, top-to-bottom. +// +// xoff/yoff are the offset it pixel space from the glyph origin to the top-left of the bitmap + +STBTT_DEF unsigned char *stbtt_GetCodepointBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint, int *width, int *height, int *xoff, int *yoff); +// the same as stbtt_GetCodepoitnBitmap, but you can specify a subpixel +// shift for the character + +STBTT_DEF void stbtt_MakeCodepointBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int codepoint); +// the same as stbtt_GetCodepointBitmap, but you pass in storage for the bitmap +// in the form of 'output', with row spacing of 'out_stride' bytes. the bitmap +// is clipped to out_w/out_h bytes. Call stbtt_GetCodepointBitmapBox to get the +// width and height and positioning info for it first. + +STBTT_DEF void stbtt_MakeCodepointBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint); +// same as stbtt_MakeCodepointBitmap, but you can specify a subpixel +// shift for the character + +STBTT_DEF void stbtt_MakeCodepointBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int oversample_x, int oversample_y, float *sub_x, float *sub_y, int codepoint); +// same as stbtt_MakeCodepointBitmapSubpixel, but prefiltering +// is performed (see stbtt_PackSetOversampling) + +STBTT_DEF void stbtt_GetCodepointBitmapBox(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1); +// get the bbox of the bitmap centered around the glyph origin; so the +// bitmap width is ix1-ix0, height is iy1-iy0, and location to place +// the bitmap top left is (leftSideBearing*scale,iy0). +// (Note that the bitmap uses y-increases-down, but the shape uses +// y-increases-up, so CodepointBitmapBox and CodepointBox are inverted.) + +STBTT_DEF void stbtt_GetCodepointBitmapBoxSubpixel(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1); +// same as stbtt_GetCodepointBitmapBox, but you can specify a subpixel +// shift for the character + +// the following functions are equivalent to the above functions, but operate +// on glyph indices instead of Unicode codepoints (for efficiency) +STBTT_DEF unsigned char *stbtt_GetGlyphBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int glyph, int *width, int *height, int *xoff, int *yoff); +STBTT_DEF unsigned char *stbtt_GetGlyphBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int glyph, int *width, int *height, int *xoff, int *yoff); +STBTT_DEF void stbtt_MakeGlyphBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int glyph); +STBTT_DEF void stbtt_MakeGlyphBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int glyph); +STBTT_DEF void stbtt_MakeGlyphBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int oversample_x, int oversample_y, float *sub_x, float *sub_y, int glyph); +STBTT_DEF void stbtt_GetGlyphBitmapBox(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1); +STBTT_DEF void stbtt_GetGlyphBitmapBoxSubpixel(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y,float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1); + + +// @TODO: don't expose this structure +typedef struct +{ + int w,h,stride; + unsigned char *pixels; +} stbtt__bitmap; + +// rasterize a shape with quadratic beziers into a bitmap +STBTT_DEF void stbtt_Rasterize(stbtt__bitmap *result, // 1-channel bitmap to draw into + float flatness_in_pixels, // allowable error of curve in pixels + stbtt_vertex *vertices, // array of vertices defining shape + int num_verts, // number of vertices in above array + float scale_x, float scale_y, // scale applied to input vertices + float shift_x, float shift_y, // translation applied to input vertices + int x_off, int y_off, // another translation applied to input + int invert, // if non-zero, vertically flip shape + void *userdata); // context for to STBTT_MALLOC + +////////////////////////////////////////////////////////////////////////////// +// +// Signed Distance Function (or Field) rendering + +STBTT_DEF void stbtt_FreeSDF(unsigned char *bitmap, void *userdata); +// frees the SDF bitmap allocated below + +STBTT_DEF unsigned char * stbtt_GetGlyphSDF(const stbtt_fontinfo *info, float scale, int glyph, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff); +STBTT_DEF unsigned char * stbtt_GetCodepointSDF(const stbtt_fontinfo *info, float scale, int codepoint, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff); +// These functions compute a discretized SDF field for a single character, suitable for storing +// in a single-channel texture, sampling with bilinear filtering, and testing against +// larger than some threshold to produce scalable fonts. +// info -- the font +// scale -- controls the size of the resulting SDF bitmap, same as it would be creating a regular bitmap +// glyph/codepoint -- the character to generate the SDF for +// padding -- extra "pixels" around the character which are filled with the distance to the character (not 0), +// which allows effects like bit outlines +// onedge_value -- value 0-255 to test the SDF against to reconstruct the character (i.e. the isocontour of the character) +// pixel_dist_scale -- what value the SDF should increase by when moving one SDF "pixel" away from the edge (on the 0..255 scale) +// if positive, > onedge_value is inside; if negative, < onedge_value is inside +// width,height -- output height & width of the SDF bitmap (including padding) +// xoff,yoff -- output origin of the character +// return value -- a 2D array of bytes 0..255, width*height in size +// +// pixel_dist_scale & onedge_value are a scale & bias that allows you to make +// optimal use of the limited 0..255 for your application, trading off precision +// and special effects. SDF values outside the range 0..255 are clamped to 0..255. +// +// Example: +// scale = stbtt_ScaleForPixelHeight(22) +// padding = 5 +// onedge_value = 180 +// pixel_dist_scale = 180/5.0 = 36.0 +// +// This will create an SDF bitmap in which the character is about 22 pixels +// high but the whole bitmap is about 22+5+5=32 pixels high. To produce a filled +// shape, sample the SDF at each pixel and fill the pixel if the SDF value +// is greater than or equal to 180/255. (You'll actually want to antialias, +// which is beyond the scope of this example.) Additionally, you can compute +// offset outlines (e.g. to stroke the character border inside & outside, +// or only outside). For example, to fill outside the character up to 3 SDF +// pixels, you would compare against (180-36.0*3)/255 = 72/255. The above +// choice of variables maps a range from 5 pixels outside the shape to +// 2 pixels inside the shape to 0..255; this is intended primarily for apply +// outside effects only (the interior range is needed to allow proper +// antialiasing of the font at *smaller* sizes) +// +// The function computes the SDF analytically at each SDF pixel, not by e.g. +// building a higher-res bitmap and approximating it. In theory the quality +// should be as high as possible for an SDF of this size & representation, but +// unclear if this is true in practice (perhaps building a higher-res bitmap +// and computing from that can allow drop-out prevention). +// +// The algorithm has not been optimized at all, so expect it to be slow +// if computing lots of characters or very large sizes. + + + +////////////////////////////////////////////////////////////////////////////// +// +// Finding the right font... +// +// You should really just solve this offline, keep your own tables +// of what font is what, and don't try to get it out of the .ttf file. +// That's because getting it out of the .ttf file is really hard, because +// the names in the file can appear in many possible encodings, in many +// possible languages, and e.g. if you need a case-insensitive comparison, +// the details of that depend on the encoding & language in a complex way +// (actually underspecified in truetype, but also gigantic). +// +// But you can use the provided functions in two possible ways: +// stbtt_FindMatchingFont() will use *case-sensitive* comparisons on +// unicode-encoded names to try to find the font you want; +// you can run this before calling stbtt_InitFont() +// +// stbtt_GetFontNameString() lets you get any of the various strings +// from the file yourself and do your own comparisons on them. +// You have to have called stbtt_InitFont() first. + + +STBTT_DEF int stbtt_FindMatchingFont(const unsigned char *fontdata, const char *name, int flags); +// returns the offset (not index) of the font that matches, or -1 if none +// if you use STBTT_MACSTYLE_DONTCARE, use a font name like "Arial Bold". +// if you use any other flag, use a font name like "Arial"; this checks +// the 'macStyle' header field; i don't know if fonts set this consistently +#define STBTT_MACSTYLE_DONTCARE 0 +#define STBTT_MACSTYLE_BOLD 1 +#define STBTT_MACSTYLE_ITALIC 2 +#define STBTT_MACSTYLE_UNDERSCORE 4 +#define STBTT_MACSTYLE_NONE 8 // <= not same as 0, this makes us check the bitfield is 0 + +STBTT_DEF int stbtt_CompareUTF8toUTF16_bigendian(const char *s1, int len1, const char *s2, int len2); +// returns 1/0 whether the first string interpreted as utf8 is identical to +// the second string interpreted as big-endian utf16... useful for strings from next func + +STBTT_DEF const char *stbtt_GetFontNameString(const stbtt_fontinfo *font, int *length, int platformID, int encodingID, int languageID, int nameID); +// returns the string (which may be big-endian double byte, e.g. for unicode) +// and puts the length in bytes in *length. +// +// some of the values for the IDs are below; for more see the truetype spec: +// http://developer.apple.com/textfonts/TTRefMan/RM06/Chap6name.html +// http://www.microsoft.com/typography/otspec/name.htm + +enum { // platformID + STBTT_PLATFORM_ID_UNICODE =0, + STBTT_PLATFORM_ID_MAC =1, + STBTT_PLATFORM_ID_ISO =2, + STBTT_PLATFORM_ID_MICROSOFT =3 +}; + +enum { // encodingID for STBTT_PLATFORM_ID_UNICODE + STBTT_UNICODE_EID_UNICODE_1_0 =0, + STBTT_UNICODE_EID_UNICODE_1_1 =1, + STBTT_UNICODE_EID_ISO_10646 =2, + STBTT_UNICODE_EID_UNICODE_2_0_BMP=3, + STBTT_UNICODE_EID_UNICODE_2_0_FULL=4 +}; + +enum { // encodingID for STBTT_PLATFORM_ID_MICROSOFT + STBTT_MS_EID_SYMBOL =0, + STBTT_MS_EID_UNICODE_BMP =1, + STBTT_MS_EID_SHIFTJIS =2, + STBTT_MS_EID_UNICODE_FULL =10 +}; + +enum { // encodingID for STBTT_PLATFORM_ID_MAC; same as Script Manager codes + STBTT_MAC_EID_ROMAN =0, STBTT_MAC_EID_ARABIC =4, + STBTT_MAC_EID_JAPANESE =1, STBTT_MAC_EID_HEBREW =5, + STBTT_MAC_EID_CHINESE_TRAD =2, STBTT_MAC_EID_GREEK =6, + STBTT_MAC_EID_KOREAN =3, STBTT_MAC_EID_RUSSIAN =7 +}; + +enum { // languageID for STBTT_PLATFORM_ID_MICROSOFT; same as LCID... + // problematic because there are e.g. 16 english LCIDs and 16 arabic LCIDs + STBTT_MS_LANG_ENGLISH =0x0409, STBTT_MS_LANG_ITALIAN =0x0410, + STBTT_MS_LANG_CHINESE =0x0804, STBTT_MS_LANG_JAPANESE =0x0411, + STBTT_MS_LANG_DUTCH =0x0413, STBTT_MS_LANG_KOREAN =0x0412, + STBTT_MS_LANG_FRENCH =0x040c, STBTT_MS_LANG_RUSSIAN =0x0419, + STBTT_MS_LANG_GERMAN =0x0407, STBTT_MS_LANG_SPANISH =0x0409, + STBTT_MS_LANG_HEBREW =0x040d, STBTT_MS_LANG_SWEDISH =0x041D +}; + +enum { // languageID for STBTT_PLATFORM_ID_MAC + STBTT_MAC_LANG_ENGLISH =0 , STBTT_MAC_LANG_JAPANESE =11, + STBTT_MAC_LANG_ARABIC =12, STBTT_MAC_LANG_KOREAN =23, + STBTT_MAC_LANG_DUTCH =4 , STBTT_MAC_LANG_RUSSIAN =32, + STBTT_MAC_LANG_FRENCH =1 , STBTT_MAC_LANG_SPANISH =6 , + STBTT_MAC_LANG_GERMAN =2 , STBTT_MAC_LANG_SWEDISH =5 , + STBTT_MAC_LANG_HEBREW =10, STBTT_MAC_LANG_CHINESE_SIMPLIFIED =33, + STBTT_MAC_LANG_ITALIAN =3 , STBTT_MAC_LANG_CHINESE_TRAD =19 +}; + +#ifdef __cplusplus +} +#endif + +#endif // __STB_INCLUDE_STB_TRUETYPE_H__ + +/////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// +//// +//// IMPLEMENTATION +//// +//// + +#ifdef STB_TRUETYPE_IMPLEMENTATION + +#ifndef STBTT_MAX_OVERSAMPLE +#define STBTT_MAX_OVERSAMPLE 8 +#endif + +#if STBTT_MAX_OVERSAMPLE > 255 +#error "STBTT_MAX_OVERSAMPLE cannot be > 255" +#endif + +typedef int stbtt__test_oversample_pow2[(STBTT_MAX_OVERSAMPLE & (STBTT_MAX_OVERSAMPLE-1)) == 0 ? 1 : -1]; + +#ifndef STBTT_RASTERIZER_VERSION +#define STBTT_RASTERIZER_VERSION 2 +#endif + +#ifdef _MSC_VER +#define STBTT__NOTUSED(v) (void)(v) +#else +#define STBTT__NOTUSED(v) (void)sizeof(v) +#endif + +////////////////////////////////////////////////////////////////////////// +// +// stbtt__buf helpers to parse data from file +// + +static stbtt_uint8 stbtt__buf_get8(stbtt__buf *b) +{ + if (b->cursor >= b->size) + return 0; + return b->data[b->cursor++]; +} + +static stbtt_uint8 stbtt__buf_peek8(stbtt__buf *b) +{ + if (b->cursor >= b->size) + return 0; + return b->data[b->cursor]; +} + +static void stbtt__buf_seek(stbtt__buf *b, int o) +{ + STBTT_assert(!(o > b->size || o < 0)); + b->cursor = (o > b->size || o < 0) ? b->size : o; +} + +static void stbtt__buf_skip(stbtt__buf *b, int o) +{ + stbtt__buf_seek(b, b->cursor + o); +} + +static stbtt_uint32 stbtt__buf_get(stbtt__buf *b, int n) +{ + stbtt_uint32 v = 0; + int i; + STBTT_assert(n >= 1 && n <= 4); + for (i = 0; i < n; i++) + v = (v << 8) | stbtt__buf_get8(b); + return v; +} + +static stbtt__buf stbtt__new_buf(const void *p, size_t size) +{ + stbtt__buf r; + STBTT_assert(size < 0x40000000); + r.data = (stbtt_uint8*) p; + r.size = (int) size; + r.cursor = 0; + return r; +} + +#define stbtt__buf_get16(b) stbtt__buf_get((b), 2) +#define stbtt__buf_get32(b) stbtt__buf_get((b), 4) + +static stbtt__buf stbtt__buf_range(const stbtt__buf *b, int o, int s) +{ + stbtt__buf r = stbtt__new_buf(NULL, 0); + if (o < 0 || s < 0 || o > b->size || s > b->size - o) return r; + r.data = b->data + o; + r.size = s; + return r; +} + +static stbtt__buf stbtt__cff_get_index(stbtt__buf *b) +{ + int count, start, offsize; + start = b->cursor; + count = stbtt__buf_get16(b); + if (count) { + offsize = stbtt__buf_get8(b); + STBTT_assert(offsize >= 1 && offsize <= 4); + stbtt__buf_skip(b, offsize * count); + stbtt__buf_skip(b, stbtt__buf_get(b, offsize) - 1); + } + return stbtt__buf_range(b, start, b->cursor - start); +} + +static stbtt_uint32 stbtt__cff_int(stbtt__buf *b) +{ + int b0 = stbtt__buf_get8(b); + if (b0 >= 32 && b0 <= 246) return b0 - 139; + else if (b0 >= 247 && b0 <= 250) return (b0 - 247)*256 + stbtt__buf_get8(b) + 108; + else if (b0 >= 251 && b0 <= 254) return -(b0 - 251)*256 - stbtt__buf_get8(b) - 108; + else if (b0 == 28) return stbtt__buf_get16(b); + else if (b0 == 29) return stbtt__buf_get32(b); + STBTT_assert(0); + return 0; +} + +static void stbtt__cff_skip_operand(stbtt__buf *b) { + int v, b0 = stbtt__buf_peek8(b); + STBTT_assert(b0 >= 28); + if (b0 == 30) { + stbtt__buf_skip(b, 1); + while (b->cursor < b->size) { + v = stbtt__buf_get8(b); + if ((v & 0xF) == 0xF || (v >> 4) == 0xF) + break; + } + } else { + stbtt__cff_int(b); + } +} + +static stbtt__buf stbtt__dict_get(stbtt__buf *b, int key) +{ + stbtt__buf_seek(b, 0); + while (b->cursor < b->size) { + int start = b->cursor, end, op; + while (stbtt__buf_peek8(b) >= 28) + stbtt__cff_skip_operand(b); + end = b->cursor; + op = stbtt__buf_get8(b); + if (op == 12) op = stbtt__buf_get8(b) | 0x100; + if (op == key) return stbtt__buf_range(b, start, end-start); + } + return stbtt__buf_range(b, 0, 0); +} + +static void stbtt__dict_get_ints(stbtt__buf *b, int key, int outcount, stbtt_uint32 *out) +{ + int i; + stbtt__buf operands = stbtt__dict_get(b, key); + for (i = 0; i < outcount && operands.cursor < operands.size; i++) + out[i] = stbtt__cff_int(&operands); +} + +static int stbtt__cff_index_count(stbtt__buf *b) +{ + stbtt__buf_seek(b, 0); + return stbtt__buf_get16(b); +} + +static stbtt__buf stbtt__cff_index_get(stbtt__buf b, int i) +{ + int count, offsize, start, end; + stbtt__buf_seek(&b, 0); + count = stbtt__buf_get16(&b); + offsize = stbtt__buf_get8(&b); + STBTT_assert(i >= 0 && i < count); + STBTT_assert(offsize >= 1 && offsize <= 4); + stbtt__buf_skip(&b, i*offsize); + start = stbtt__buf_get(&b, offsize); + end = stbtt__buf_get(&b, offsize); + return stbtt__buf_range(&b, 2+(count+1)*offsize+start, end - start); +} + +////////////////////////////////////////////////////////////////////////// +// +// accessors to parse data from file +// + +// on platforms that don't allow misaligned reads, if we want to allow +// truetype fonts that aren't padded to alignment, define ALLOW_UNALIGNED_TRUETYPE + +#define ttBYTE(p) (* (stbtt_uint8 *) (p)) +#define ttCHAR(p) (* (stbtt_int8 *) (p)) +#define ttFixed(p) ttLONG(p) + +static stbtt_uint16 ttUSHORT(stbtt_uint8 *p) { return p[0]*256 + p[1]; } +static stbtt_int16 ttSHORT(stbtt_uint8 *p) { return p[0]*256 + p[1]; } +static stbtt_uint32 ttULONG(stbtt_uint8 *p) { return (p[0]<<24) + (p[1]<<16) + (p[2]<<8) + p[3]; } +static stbtt_int32 ttLONG(stbtt_uint8 *p) { return (p[0]<<24) + (p[1]<<16) + (p[2]<<8) + p[3]; } + +#define stbtt_tag4(p,c0,c1,c2,c3) ((p)[0] == (c0) && (p)[1] == (c1) && (p)[2] == (c2) && (p)[3] == (c3)) +#define stbtt_tag(p,str) stbtt_tag4(p,str[0],str[1],str[2],str[3]) + +static int stbtt__isfont(stbtt_uint8 *font) +{ + // check the version number + if (stbtt_tag4(font, '1',0,0,0)) return 1; // TrueType 1 + if (stbtt_tag(font, "typ1")) return 1; // TrueType with type 1 font -- we don't support this! + if (stbtt_tag(font, "OTTO")) return 1; // OpenType with CFF + if (stbtt_tag4(font, 0,1,0,0)) return 1; // OpenType 1.0 + if (stbtt_tag(font, "true")) return 1; // Apple specification for TrueType fonts + return 0; +} + +// @OPTIMIZE: binary search +static stbtt_uint32 stbtt__find_table(stbtt_uint8 *data, stbtt_uint32 fontstart, const char *tag) +{ + stbtt_int32 num_tables = ttUSHORT(data+fontstart+4); + stbtt_uint32 tabledir = fontstart + 12; + stbtt_int32 i; + for (i=0; i < num_tables; ++i) { + stbtt_uint32 loc = tabledir + 16*i; + if (stbtt_tag(data+loc+0, tag)) + return ttULONG(data+loc+8); + } + return 0; +} + +static int stbtt_GetFontOffsetForIndex_internal(unsigned char *font_collection, int index) +{ + // if it's just a font, there's only one valid index + if (stbtt__isfont(font_collection)) + return index == 0 ? 0 : -1; + + // check if it's a TTC + if (stbtt_tag(font_collection, "ttcf")) { + // version 1? + if (ttULONG(font_collection+4) == 0x00010000 || ttULONG(font_collection+4) == 0x00020000) { + stbtt_int32 n = ttLONG(font_collection+8); + if (index >= n) + return -1; + return ttULONG(font_collection+12+index*4); + } + } + return -1; +} + +static int stbtt_GetNumberOfFonts_internal(unsigned char *font_collection) +{ + // if it's just a font, there's only one valid font + if (stbtt__isfont(font_collection)) + return 1; + + // check if it's a TTC + if (stbtt_tag(font_collection, "ttcf")) { + // version 1? + if (ttULONG(font_collection+4) == 0x00010000 || ttULONG(font_collection+4) == 0x00020000) { + return ttLONG(font_collection+8); + } + } + return 0; +} + +static stbtt__buf stbtt__get_subrs(stbtt__buf cff, stbtt__buf fontdict) +{ + stbtt_uint32 subrsoff = 0, private_loc[2] = { 0, 0 }; + stbtt__buf pdict; + stbtt__dict_get_ints(&fontdict, 18, 2, private_loc); + if (!private_loc[1] || !private_loc[0]) return stbtt__new_buf(NULL, 0); + pdict = stbtt__buf_range(&cff, private_loc[1], private_loc[0]); + stbtt__dict_get_ints(&pdict, 19, 1, &subrsoff); + if (!subrsoff) return stbtt__new_buf(NULL, 0); + stbtt__buf_seek(&cff, private_loc[1]+subrsoff); + return stbtt__cff_get_index(&cff); +} + +// since most people won't use this, find this table the first time it's needed +static int stbtt__get_svg(stbtt_fontinfo *info) +{ + stbtt_uint32 t; + if (info->svg < 0) { + t = stbtt__find_table(info->data, info->fontstart, "SVG "); + if (t) { + stbtt_uint32 offset = ttULONG(info->data + t + 2); + info->svg = t + offset; + } else { + info->svg = 0; + } + } + return info->svg; +} + +static int stbtt_InitFont_internal(stbtt_fontinfo *info, unsigned char *data, int fontstart) +{ + stbtt_uint32 cmap, t; + stbtt_int32 i,numTables; + + info->data = data; + info->fontstart = fontstart; + info->cff = stbtt__new_buf(NULL, 0); + + cmap = stbtt__find_table(data, fontstart, "cmap"); // required + info->loca = stbtt__find_table(data, fontstart, "loca"); // required + info->head = stbtt__find_table(data, fontstart, "head"); // required + info->glyf = stbtt__find_table(data, fontstart, "glyf"); // required + info->hhea = stbtt__find_table(data, fontstart, "hhea"); // required + info->hmtx = stbtt__find_table(data, fontstart, "hmtx"); // required + info->kern = stbtt__find_table(data, fontstart, "kern"); // not required + info->gpos = stbtt__find_table(data, fontstart, "GPOS"); // not required + + if (!cmap || !info->head || !info->hhea || !info->hmtx) + return 0; + if (info->glyf) { + // required for truetype + if (!info->loca) return 0; + } else { + // initialization for CFF / Type2 fonts (OTF) + stbtt__buf b, topdict, topdictidx; + stbtt_uint32 cstype = 2, charstrings = 0, fdarrayoff = 0, fdselectoff = 0; + stbtt_uint32 cff; + + cff = stbtt__find_table(data, fontstart, "CFF "); + if (!cff) return 0; + + info->fontdicts = stbtt__new_buf(NULL, 0); + info->fdselect = stbtt__new_buf(NULL, 0); + + // @TODO this should use size from table (not 512MB) + info->cff = stbtt__new_buf(data+cff, 512*1024*1024); + b = info->cff; + + // read the header + stbtt__buf_skip(&b, 2); + stbtt__buf_seek(&b, stbtt__buf_get8(&b)); // hdrsize + + // @TODO the name INDEX could list multiple fonts, + // but we just use the first one. + stbtt__cff_get_index(&b); // name INDEX + topdictidx = stbtt__cff_get_index(&b); + topdict = stbtt__cff_index_get(topdictidx, 0); + stbtt__cff_get_index(&b); // string INDEX + info->gsubrs = stbtt__cff_get_index(&b); + + stbtt__dict_get_ints(&topdict, 17, 1, &charstrings); + stbtt__dict_get_ints(&topdict, 0x100 | 6, 1, &cstype); + stbtt__dict_get_ints(&topdict, 0x100 | 36, 1, &fdarrayoff); + stbtt__dict_get_ints(&topdict, 0x100 | 37, 1, &fdselectoff); + info->subrs = stbtt__get_subrs(b, topdict); + + // we only support Type 2 charstrings + if (cstype != 2) return 0; + if (charstrings == 0) return 0; + + if (fdarrayoff) { + // looks like a CID font + if (!fdselectoff) return 0; + stbtt__buf_seek(&b, fdarrayoff); + info->fontdicts = stbtt__cff_get_index(&b); + info->fdselect = stbtt__buf_range(&b, fdselectoff, b.size-fdselectoff); + } + + stbtt__buf_seek(&b, charstrings); + info->charstrings = stbtt__cff_get_index(&b); + } + + t = stbtt__find_table(data, fontstart, "maxp"); + if (t) + info->numGlyphs = ttUSHORT(data+t+4); + else + info->numGlyphs = 0xffff; + + info->svg = -1; + + // find a cmap encoding table we understand *now* to avoid searching + // later. (todo: could make this installable) + // the same regardless of glyph. + numTables = ttUSHORT(data + cmap + 2); + info->index_map = 0; + for (i=0; i < numTables; ++i) { + stbtt_uint32 encoding_record = cmap + 4 + 8 * i; + // find an encoding we understand: + switch(ttUSHORT(data+encoding_record)) { + case STBTT_PLATFORM_ID_MICROSOFT: + switch (ttUSHORT(data+encoding_record+2)) { + case STBTT_MS_EID_UNICODE_BMP: + case STBTT_MS_EID_UNICODE_FULL: + // MS/Unicode + info->index_map = cmap + ttULONG(data+encoding_record+4); + break; + } + break; + case STBTT_PLATFORM_ID_UNICODE: + // Mac/iOS has these + // all the encodingIDs are unicode, so we don't bother to check it + info->index_map = cmap + ttULONG(data+encoding_record+4); + break; + } + } + if (info->index_map == 0) + return 0; + + info->indexToLocFormat = ttUSHORT(data+info->head + 50); + return 1; +} + +STBTT_DEF int stbtt_FindGlyphIndex(const stbtt_fontinfo *info, int unicode_codepoint) +{ + stbtt_uint8 *data = info->data; + stbtt_uint32 index_map = info->index_map; + + stbtt_uint16 format = ttUSHORT(data + index_map + 0); + if (format == 0) { // apple byte encoding + stbtt_int32 bytes = ttUSHORT(data + index_map + 2); + if (unicode_codepoint < bytes-6) + return ttBYTE(data + index_map + 6 + unicode_codepoint); + return 0; + } else if (format == 6) { + stbtt_uint32 first = ttUSHORT(data + index_map + 6); + stbtt_uint32 count = ttUSHORT(data + index_map + 8); + if ((stbtt_uint32) unicode_codepoint >= first && (stbtt_uint32) unicode_codepoint < first+count) + return ttUSHORT(data + index_map + 10 + (unicode_codepoint - first)*2); + return 0; + } else if (format == 2) { + STBTT_assert(0); // @TODO: high-byte mapping for japanese/chinese/korean + return 0; + } else if (format == 4) { // standard mapping for windows fonts: binary search collection of ranges + stbtt_uint16 segcount = ttUSHORT(data+index_map+6) >> 1; + stbtt_uint16 searchRange = ttUSHORT(data+index_map+8) >> 1; + stbtt_uint16 entrySelector = ttUSHORT(data+index_map+10); + stbtt_uint16 rangeShift = ttUSHORT(data+index_map+12) >> 1; + + // do a binary search of the segments + stbtt_uint32 endCount = index_map + 14; + stbtt_uint32 search = endCount; + + if (unicode_codepoint > 0xffff) + return 0; + + // they lie from endCount .. endCount + segCount + // but searchRange is the nearest power of two, so... + if (unicode_codepoint >= ttUSHORT(data + search + rangeShift*2)) + search += rangeShift*2; + + // now decrement to bias correctly to find smallest + search -= 2; + while (entrySelector) { + stbtt_uint16 end; + searchRange >>= 1; + end = ttUSHORT(data + search + searchRange*2); + if (unicode_codepoint > end) + search += searchRange*2; + --entrySelector; + } + search += 2; + + { + stbtt_uint16 offset, start; + stbtt_uint16 item = (stbtt_uint16) ((search - endCount) >> 1); + + STBTT_assert(unicode_codepoint <= ttUSHORT(data + endCount + 2*item)); + start = ttUSHORT(data + index_map + 14 + segcount*2 + 2 + 2*item); + if (unicode_codepoint < start) + return 0; + + offset = ttUSHORT(data + index_map + 14 + segcount*6 + 2 + 2*item); + if (offset == 0) + return (stbtt_uint16) (unicode_codepoint + ttSHORT(data + index_map + 14 + segcount*4 + 2 + 2*item)); + + return ttUSHORT(data + offset + (unicode_codepoint-start)*2 + index_map + 14 + segcount*6 + 2 + 2*item); + } + } else if (format == 12 || format == 13) { + stbtt_uint32 ngroups = ttULONG(data+index_map+12); + stbtt_int32 low,high; + low = 0; high = (stbtt_int32)ngroups; + // Binary search the right group. + while (low < high) { + stbtt_int32 mid = low + ((high-low) >> 1); // rounds down, so low <= mid < high + stbtt_uint32 start_char = ttULONG(data+index_map+16+mid*12); + stbtt_uint32 end_char = ttULONG(data+index_map+16+mid*12+4); + if ((stbtt_uint32) unicode_codepoint < start_char) + high = mid; + else if ((stbtt_uint32) unicode_codepoint > end_char) + low = mid+1; + else { + stbtt_uint32 start_glyph = ttULONG(data+index_map+16+mid*12+8); + if (format == 12) + return start_glyph + unicode_codepoint-start_char; + else // format == 13 + return start_glyph; + } + } + return 0; // not found + } + // @TODO + STBTT_assert(0); + return 0; +} + +STBTT_DEF int stbtt_GetCodepointShape(const stbtt_fontinfo *info, int unicode_codepoint, stbtt_vertex **vertices) +{ + return stbtt_GetGlyphShape(info, stbtt_FindGlyphIndex(info, unicode_codepoint), vertices); +} + +static void stbtt_setvertex(stbtt_vertex *v, stbtt_uint8 type, stbtt_int32 x, stbtt_int32 y, stbtt_int32 cx, stbtt_int32 cy) +{ + v->type = type; + v->x = (stbtt_int16) x; + v->y = (stbtt_int16) y; + v->cx = (stbtt_int16) cx; + v->cy = (stbtt_int16) cy; +} + +static int stbtt__GetGlyfOffset(const stbtt_fontinfo *info, int glyph_index) +{ + int g1,g2; + + STBTT_assert(!info->cff.size); + + if (glyph_index >= info->numGlyphs) return -1; // glyph index out of range + if (info->indexToLocFormat >= 2) return -1; // unknown index->glyph map format + + if (info->indexToLocFormat == 0) { + g1 = info->glyf + ttUSHORT(info->data + info->loca + glyph_index * 2) * 2; + g2 = info->glyf + ttUSHORT(info->data + info->loca + glyph_index * 2 + 2) * 2; + } else { + g1 = info->glyf + ttULONG (info->data + info->loca + glyph_index * 4); + g2 = info->glyf + ttULONG (info->data + info->loca + glyph_index * 4 + 4); + } + + return g1==g2 ? -1 : g1; // if length is 0, return -1 +} + +static int stbtt__GetGlyphInfoT2(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1); + +STBTT_DEF int stbtt_GetGlyphBox(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1) +{ + if (info->cff.size) { + stbtt__GetGlyphInfoT2(info, glyph_index, x0, y0, x1, y1); + } else { + int g = stbtt__GetGlyfOffset(info, glyph_index); + if (g < 0) return 0; + + if (x0) *x0 = ttSHORT(info->data + g + 2); + if (y0) *y0 = ttSHORT(info->data + g + 4); + if (x1) *x1 = ttSHORT(info->data + g + 6); + if (y1) *y1 = ttSHORT(info->data + g + 8); + } + return 1; +} + +STBTT_DEF int stbtt_GetCodepointBox(const stbtt_fontinfo *info, int codepoint, int *x0, int *y0, int *x1, int *y1) +{ + return stbtt_GetGlyphBox(info, stbtt_FindGlyphIndex(info,codepoint), x0,y0,x1,y1); +} + +STBTT_DEF int stbtt_IsGlyphEmpty(const stbtt_fontinfo *info, int glyph_index) +{ + stbtt_int16 numberOfContours; + int g; + if (info->cff.size) + return stbtt__GetGlyphInfoT2(info, glyph_index, NULL, NULL, NULL, NULL) == 0; + g = stbtt__GetGlyfOffset(info, glyph_index); + if (g < 0) return 1; + numberOfContours = ttSHORT(info->data + g); + return numberOfContours == 0; +} + +static int stbtt__close_shape(stbtt_vertex *vertices, int num_vertices, int was_off, int start_off, + stbtt_int32 sx, stbtt_int32 sy, stbtt_int32 scx, stbtt_int32 scy, stbtt_int32 cx, stbtt_int32 cy) +{ + if (start_off) { + if (was_off) + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, (cx+scx)>>1, (cy+scy)>>1, cx,cy); + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, sx,sy,scx,scy); + } else { + if (was_off) + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve,sx,sy,cx,cy); + else + stbtt_setvertex(&vertices[num_vertices++], STBTT_vline,sx,sy,0,0); + } + return num_vertices; +} + +static int stbtt__GetGlyphShapeTT(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **pvertices) +{ + stbtt_int16 numberOfContours; + stbtt_uint8 *endPtsOfContours; + stbtt_uint8 *data = info->data; + stbtt_vertex *vertices=0; + int num_vertices=0; + int g = stbtt__GetGlyfOffset(info, glyph_index); + + *pvertices = NULL; + + if (g < 0) return 0; + + numberOfContours = ttSHORT(data + g); + + if (numberOfContours > 0) { + stbtt_uint8 flags=0,flagcount; + stbtt_int32 ins, i,j=0,m,n, next_move, was_off=0, off, start_off=0; + stbtt_int32 x,y,cx,cy,sx,sy, scx,scy; + stbtt_uint8 *points; + endPtsOfContours = (data + g + 10); + ins = ttUSHORT(data + g + 10 + numberOfContours * 2); + points = data + g + 10 + numberOfContours * 2 + 2 + ins; + + n = 1+ttUSHORT(endPtsOfContours + numberOfContours*2-2); + + m = n + 2*numberOfContours; // a loose bound on how many vertices we might need + vertices = (stbtt_vertex *) STBTT_malloc(m * sizeof(vertices[0]), info->userdata); + if (vertices == 0) + return 0; + + next_move = 0; + flagcount=0; + + // in first pass, we load uninterpreted data into the allocated array + // above, shifted to the end of the array so we won't overwrite it when + // we create our final data starting from the front + + off = m - n; // starting offset for uninterpreted data, regardless of how m ends up being calculated + + // first load flags + + for (i=0; i < n; ++i) { + if (flagcount == 0) { + flags = *points++; + if (flags & 8) + flagcount = *points++; + } else + --flagcount; + vertices[off+i].type = flags; + } + + // now load x coordinates + x=0; + for (i=0; i < n; ++i) { + flags = vertices[off+i].type; + if (flags & 2) { + stbtt_int16 dx = *points++; + x += (flags & 16) ? dx : -dx; // ??? + } else { + if (!(flags & 16)) { + x = x + (stbtt_int16) (points[0]*256 + points[1]); + points += 2; + } + } + vertices[off+i].x = (stbtt_int16) x; + } + + // now load y coordinates + y=0; + for (i=0; i < n; ++i) { + flags = vertices[off+i].type; + if (flags & 4) { + stbtt_int16 dy = *points++; + y += (flags & 32) ? dy : -dy; // ??? + } else { + if (!(flags & 32)) { + y = y + (stbtt_int16) (points[0]*256 + points[1]); + points += 2; + } + } + vertices[off+i].y = (stbtt_int16) y; + } + + // now convert them to our format + num_vertices=0; + sx = sy = cx = cy = scx = scy = 0; + for (i=0; i < n; ++i) { + flags = vertices[off+i].type; + x = (stbtt_int16) vertices[off+i].x; + y = (stbtt_int16) vertices[off+i].y; + + if (next_move == i) { + if (i != 0) + num_vertices = stbtt__close_shape(vertices, num_vertices, was_off, start_off, sx,sy,scx,scy,cx,cy); + + // now start the new one + start_off = !(flags & 1); + if (start_off) { + // if we start off with an off-curve point, then when we need to find a point on the curve + // where we can start, and we need to save some state for when we wraparound. + scx = x; + scy = y; + if (!(vertices[off+i+1].type & 1)) { + // next point is also a curve point, so interpolate an on-point curve + sx = (x + (stbtt_int32) vertices[off+i+1].x) >> 1; + sy = (y + (stbtt_int32) vertices[off+i+1].y) >> 1; + } else { + // otherwise just use the next point as our start point + sx = (stbtt_int32) vertices[off+i+1].x; + sy = (stbtt_int32) vertices[off+i+1].y; + ++i; // we're using point i+1 as the starting point, so skip it + } + } else { + sx = x; + sy = y; + } + stbtt_setvertex(&vertices[num_vertices++], STBTT_vmove,sx,sy,0,0); + was_off = 0; + next_move = 1 + ttUSHORT(endPtsOfContours+j*2); + ++j; + } else { + if (!(flags & 1)) { // if it's a curve + if (was_off) // two off-curve control points in a row means interpolate an on-curve midpoint + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, (cx+x)>>1, (cy+y)>>1, cx, cy); + cx = x; + cy = y; + was_off = 1; + } else { + if (was_off) + stbtt_setvertex(&vertices[num_vertices++], STBTT_vcurve, x,y, cx, cy); + else + stbtt_setvertex(&vertices[num_vertices++], STBTT_vline, x,y,0,0); + was_off = 0; + } + } + } + num_vertices = stbtt__close_shape(vertices, num_vertices, was_off, start_off, sx,sy,scx,scy,cx,cy); + } else if (numberOfContours < 0) { + // Compound shapes. + int more = 1; + stbtt_uint8 *comp = data + g + 10; + num_vertices = 0; + vertices = 0; + while (more) { + stbtt_uint16 flags, gidx; + int comp_num_verts = 0, i; + stbtt_vertex *comp_verts = 0, *tmp = 0; + float mtx[6] = {1,0,0,1,0,0}, m, n; + + flags = ttSHORT(comp); comp+=2; + gidx = ttSHORT(comp); comp+=2; + + if (flags & 2) { // XY values + if (flags & 1) { // shorts + mtx[4] = ttSHORT(comp); comp+=2; + mtx[5] = ttSHORT(comp); comp+=2; + } else { + mtx[4] = ttCHAR(comp); comp+=1; + mtx[5] = ttCHAR(comp); comp+=1; + } + } + else { + // @TODO handle matching point + STBTT_assert(0); + } + if (flags & (1<<3)) { // WE_HAVE_A_SCALE + mtx[0] = mtx[3] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[1] = mtx[2] = 0; + } else if (flags & (1<<6)) { // WE_HAVE_AN_X_AND_YSCALE + mtx[0] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[1] = mtx[2] = 0; + mtx[3] = ttSHORT(comp)/16384.0f; comp+=2; + } else if (flags & (1<<7)) { // WE_HAVE_A_TWO_BY_TWO + mtx[0] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[1] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[2] = ttSHORT(comp)/16384.0f; comp+=2; + mtx[3] = ttSHORT(comp)/16384.0f; comp+=2; + } + + // Find transformation scales. + m = (float) STBTT_sqrt(mtx[0]*mtx[0] + mtx[1]*mtx[1]); + n = (float) STBTT_sqrt(mtx[2]*mtx[2] + mtx[3]*mtx[3]); + + // Get indexed glyph. + comp_num_verts = stbtt_GetGlyphShape(info, gidx, &comp_verts); + if (comp_num_verts > 0) { + // Transform vertices. + for (i = 0; i < comp_num_verts; ++i) { + stbtt_vertex* v = &comp_verts[i]; + stbtt_vertex_type x,y; + x=v->x; y=v->y; + v->x = (stbtt_vertex_type)(m * (mtx[0]*x + mtx[2]*y + mtx[4])); + v->y = (stbtt_vertex_type)(n * (mtx[1]*x + mtx[3]*y + mtx[5])); + x=v->cx; y=v->cy; + v->cx = (stbtt_vertex_type)(m * (mtx[0]*x + mtx[2]*y + mtx[4])); + v->cy = (stbtt_vertex_type)(n * (mtx[1]*x + mtx[3]*y + mtx[5])); + } + // Append vertices. + tmp = (stbtt_vertex*)STBTT_malloc((num_vertices+comp_num_verts)*sizeof(stbtt_vertex), info->userdata); + if (!tmp) { + if (vertices) STBTT_free(vertices, info->userdata); + if (comp_verts) STBTT_free(comp_verts, info->userdata); + return 0; + } + if (num_vertices > 0) STBTT_memcpy(tmp, vertices, num_vertices*sizeof(stbtt_vertex)); + STBTT_memcpy(tmp+num_vertices, comp_verts, comp_num_verts*sizeof(stbtt_vertex)); + if (vertices) STBTT_free(vertices, info->userdata); + vertices = tmp; + STBTT_free(comp_verts, info->userdata); + num_vertices += comp_num_verts; + } + // More components ? + more = flags & (1<<5); + } + } else { + // numberOfCounters == 0, do nothing + } + + *pvertices = vertices; + return num_vertices; +} + +typedef struct +{ + int bounds; + int started; + float first_x, first_y; + float x, y; + stbtt_int32 min_x, max_x, min_y, max_y; + + stbtt_vertex *pvertices; + int num_vertices; +} stbtt__csctx; + +#define STBTT__CSCTX_INIT(bounds) {bounds,0, 0,0, 0,0, 0,0,0,0, NULL, 0} + +static void stbtt__track_vertex(stbtt__csctx *c, stbtt_int32 x, stbtt_int32 y) +{ + if (x > c->max_x || !c->started) c->max_x = x; + if (y > c->max_y || !c->started) c->max_y = y; + if (x < c->min_x || !c->started) c->min_x = x; + if (y < c->min_y || !c->started) c->min_y = y; + c->started = 1; +} + +static void stbtt__csctx_v(stbtt__csctx *c, stbtt_uint8 type, stbtt_int32 x, stbtt_int32 y, stbtt_int32 cx, stbtt_int32 cy, stbtt_int32 cx1, stbtt_int32 cy1) +{ + if (c->bounds) { + stbtt__track_vertex(c, x, y); + if (type == STBTT_vcubic) { + stbtt__track_vertex(c, cx, cy); + stbtt__track_vertex(c, cx1, cy1); + } + } else { + stbtt_setvertex(&c->pvertices[c->num_vertices], type, x, y, cx, cy); + c->pvertices[c->num_vertices].cx1 = (stbtt_int16) cx1; + c->pvertices[c->num_vertices].cy1 = (stbtt_int16) cy1; + } + c->num_vertices++; +} + +static void stbtt__csctx_close_shape(stbtt__csctx *ctx) +{ + if (ctx->first_x != ctx->x || ctx->first_y != ctx->y) + stbtt__csctx_v(ctx, STBTT_vline, (int)ctx->first_x, (int)ctx->first_y, 0, 0, 0, 0); +} + +static void stbtt__csctx_rmove_to(stbtt__csctx *ctx, float dx, float dy) +{ + stbtt__csctx_close_shape(ctx); + ctx->first_x = ctx->x = ctx->x + dx; + ctx->first_y = ctx->y = ctx->y + dy; + stbtt__csctx_v(ctx, STBTT_vmove, (int)ctx->x, (int)ctx->y, 0, 0, 0, 0); +} + +static void stbtt__csctx_rline_to(stbtt__csctx *ctx, float dx, float dy) +{ + ctx->x += dx; + ctx->y += dy; + stbtt__csctx_v(ctx, STBTT_vline, (int)ctx->x, (int)ctx->y, 0, 0, 0, 0); +} + +static void stbtt__csctx_rccurve_to(stbtt__csctx *ctx, float dx1, float dy1, float dx2, float dy2, float dx3, float dy3) +{ + float cx1 = ctx->x + dx1; + float cy1 = ctx->y + dy1; + float cx2 = cx1 + dx2; + float cy2 = cy1 + dy2; + ctx->x = cx2 + dx3; + ctx->y = cy2 + dy3; + stbtt__csctx_v(ctx, STBTT_vcubic, (int)ctx->x, (int)ctx->y, (int)cx1, (int)cy1, (int)cx2, (int)cy2); +} + +static stbtt__buf stbtt__get_subr(stbtt__buf idx, int n) +{ + int count = stbtt__cff_index_count(&idx); + int bias = 107; + if (count >= 33900) + bias = 32768; + else if (count >= 1240) + bias = 1131; + n += bias; + if (n < 0 || n >= count) + return stbtt__new_buf(NULL, 0); + return stbtt__cff_index_get(idx, n); +} + +static stbtt__buf stbtt__cid_get_glyph_subrs(const stbtt_fontinfo *info, int glyph_index) +{ + stbtt__buf fdselect = info->fdselect; + int nranges, start, end, v, fmt, fdselector = -1, i; + + stbtt__buf_seek(&fdselect, 0); + fmt = stbtt__buf_get8(&fdselect); + if (fmt == 0) { + // untested + stbtt__buf_skip(&fdselect, glyph_index); + fdselector = stbtt__buf_get8(&fdselect); + } else if (fmt == 3) { + nranges = stbtt__buf_get16(&fdselect); + start = stbtt__buf_get16(&fdselect); + for (i = 0; i < nranges; i++) { + v = stbtt__buf_get8(&fdselect); + end = stbtt__buf_get16(&fdselect); + if (glyph_index >= start && glyph_index < end) { + fdselector = v; + break; + } + start = end; + } + } + if (fdselector == -1) stbtt__new_buf(NULL, 0); + return stbtt__get_subrs(info->cff, stbtt__cff_index_get(info->fontdicts, fdselector)); +} + +static int stbtt__run_charstring(const stbtt_fontinfo *info, int glyph_index, stbtt__csctx *c) +{ + int in_header = 1, maskbits = 0, subr_stack_height = 0, sp = 0, v, i, b0; + int has_subrs = 0, clear_stack; + float s[48]; + stbtt__buf subr_stack[10], subrs = info->subrs, b; + float f; + +#define STBTT__CSERR(s) (0) + + // this currently ignores the initial width value, which isn't needed if we have hmtx + b = stbtt__cff_index_get(info->charstrings, glyph_index); + while (b.cursor < b.size) { + i = 0; + clear_stack = 1; + b0 = stbtt__buf_get8(&b); + switch (b0) { + // @TODO implement hinting + case 0x13: // hintmask + case 0x14: // cntrmask + if (in_header) + maskbits += (sp / 2); // implicit "vstem" + in_header = 0; + stbtt__buf_skip(&b, (maskbits + 7) / 8); + break; + + case 0x01: // hstem + case 0x03: // vstem + case 0x12: // hstemhm + case 0x17: // vstemhm + maskbits += (sp / 2); + break; + + case 0x15: // rmoveto + in_header = 0; + if (sp < 2) return STBTT__CSERR("rmoveto stack"); + stbtt__csctx_rmove_to(c, s[sp-2], s[sp-1]); + break; + case 0x04: // vmoveto + in_header = 0; + if (sp < 1) return STBTT__CSERR("vmoveto stack"); + stbtt__csctx_rmove_to(c, 0, s[sp-1]); + break; + case 0x16: // hmoveto + in_header = 0; + if (sp < 1) return STBTT__CSERR("hmoveto stack"); + stbtt__csctx_rmove_to(c, s[sp-1], 0); + break; + + case 0x05: // rlineto + if (sp < 2) return STBTT__CSERR("rlineto stack"); + for (; i + 1 < sp; i += 2) + stbtt__csctx_rline_to(c, s[i], s[i+1]); + break; + + // hlineto/vlineto and vhcurveto/hvcurveto alternate horizontal and vertical + // starting from a different place. + + case 0x07: // vlineto + if (sp < 1) return STBTT__CSERR("vlineto stack"); + goto vlineto; + case 0x06: // hlineto + if (sp < 1) return STBTT__CSERR("hlineto stack"); + for (;;) { + if (i >= sp) break; + stbtt__csctx_rline_to(c, s[i], 0); + i++; + vlineto: + if (i >= sp) break; + stbtt__csctx_rline_to(c, 0, s[i]); + i++; + } + break; + + case 0x1F: // hvcurveto + if (sp < 4) return STBTT__CSERR("hvcurveto stack"); + goto hvcurveto; + case 0x1E: // vhcurveto + if (sp < 4) return STBTT__CSERR("vhcurveto stack"); + for (;;) { + if (i + 3 >= sp) break; + stbtt__csctx_rccurve_to(c, 0, s[i], s[i+1], s[i+2], s[i+3], (sp - i == 5) ? s[i + 4] : 0.0f); + i += 4; + hvcurveto: + if (i + 3 >= sp) break; + stbtt__csctx_rccurve_to(c, s[i], 0, s[i+1], s[i+2], (sp - i == 5) ? s[i+4] : 0.0f, s[i+3]); + i += 4; + } + break; + + case 0x08: // rrcurveto + if (sp < 6) return STBTT__CSERR("rcurveline stack"); + for (; i + 5 < sp; i += 6) + stbtt__csctx_rccurve_to(c, s[i], s[i+1], s[i+2], s[i+3], s[i+4], s[i+5]); + break; + + case 0x18: // rcurveline + if (sp < 8) return STBTT__CSERR("rcurveline stack"); + for (; i + 5 < sp - 2; i += 6) + stbtt__csctx_rccurve_to(c, s[i], s[i+1], s[i+2], s[i+3], s[i+4], s[i+5]); + if (i + 1 >= sp) return STBTT__CSERR("rcurveline stack"); + stbtt__csctx_rline_to(c, s[i], s[i+1]); + break; + + case 0x19: // rlinecurve + if (sp < 8) return STBTT__CSERR("rlinecurve stack"); + for (; i + 1 < sp - 6; i += 2) + stbtt__csctx_rline_to(c, s[i], s[i+1]); + if (i + 5 >= sp) return STBTT__CSERR("rlinecurve stack"); + stbtt__csctx_rccurve_to(c, s[i], s[i+1], s[i+2], s[i+3], s[i+4], s[i+5]); + break; + + case 0x1A: // vvcurveto + case 0x1B: // hhcurveto + if (sp < 4) return STBTT__CSERR("(vv|hh)curveto stack"); + f = 0.0; + if (sp & 1) { f = s[i]; i++; } + for (; i + 3 < sp; i += 4) { + if (b0 == 0x1B) + stbtt__csctx_rccurve_to(c, s[i], f, s[i+1], s[i+2], s[i+3], 0.0); + else + stbtt__csctx_rccurve_to(c, f, s[i], s[i+1], s[i+2], 0.0, s[i+3]); + f = 0.0; + } + break; + + case 0x0A: // callsubr + if (!has_subrs) { + if (info->fdselect.size) + subrs = stbtt__cid_get_glyph_subrs(info, glyph_index); + has_subrs = 1; + } + // fallthrough + case 0x1D: // callgsubr + if (sp < 1) return STBTT__CSERR("call(g|)subr stack"); + v = (int) s[--sp]; + if (subr_stack_height >= 10) return STBTT__CSERR("recursion limit"); + subr_stack[subr_stack_height++] = b; + b = stbtt__get_subr(b0 == 0x0A ? subrs : info->gsubrs, v); + if (b.size == 0) return STBTT__CSERR("subr not found"); + b.cursor = 0; + clear_stack = 0; + break; + + case 0x0B: // return + if (subr_stack_height <= 0) return STBTT__CSERR("return outside subr"); + b = subr_stack[--subr_stack_height]; + clear_stack = 0; + break; + + case 0x0E: // endchar + stbtt__csctx_close_shape(c); + return 1; + + case 0x0C: { // two-byte escape + float dx1, dx2, dx3, dx4, dx5, dx6, dy1, dy2, dy3, dy4, dy5, dy6; + float dx, dy; + int b1 = stbtt__buf_get8(&b); + switch (b1) { + // @TODO These "flex" implementations ignore the flex-depth and resolution, + // and always draw beziers. + case 0x22: // hflex + if (sp < 7) return STBTT__CSERR("hflex stack"); + dx1 = s[0]; + dx2 = s[1]; + dy2 = s[2]; + dx3 = s[3]; + dx4 = s[4]; + dx5 = s[5]; + dx6 = s[6]; + stbtt__csctx_rccurve_to(c, dx1, 0, dx2, dy2, dx3, 0); + stbtt__csctx_rccurve_to(c, dx4, 0, dx5, -dy2, dx6, 0); + break; + + case 0x23: // flex + if (sp < 13) return STBTT__CSERR("flex stack"); + dx1 = s[0]; + dy1 = s[1]; + dx2 = s[2]; + dy2 = s[3]; + dx3 = s[4]; + dy3 = s[5]; + dx4 = s[6]; + dy4 = s[7]; + dx5 = s[8]; + dy5 = s[9]; + dx6 = s[10]; + dy6 = s[11]; + //fd is s[12] + stbtt__csctx_rccurve_to(c, dx1, dy1, dx2, dy2, dx3, dy3); + stbtt__csctx_rccurve_to(c, dx4, dy4, dx5, dy5, dx6, dy6); + break; + + case 0x24: // hflex1 + if (sp < 9) return STBTT__CSERR("hflex1 stack"); + dx1 = s[0]; + dy1 = s[1]; + dx2 = s[2]; + dy2 = s[3]; + dx3 = s[4]; + dx4 = s[5]; + dx5 = s[6]; + dy5 = s[7]; + dx6 = s[8]; + stbtt__csctx_rccurve_to(c, dx1, dy1, dx2, dy2, dx3, 0); + stbtt__csctx_rccurve_to(c, dx4, 0, dx5, dy5, dx6, -(dy1+dy2+dy5)); + break; + + case 0x25: // flex1 + if (sp < 11) return STBTT__CSERR("flex1 stack"); + dx1 = s[0]; + dy1 = s[1]; + dx2 = s[2]; + dy2 = s[3]; + dx3 = s[4]; + dy3 = s[5]; + dx4 = s[6]; + dy4 = s[7]; + dx5 = s[8]; + dy5 = s[9]; + dx6 = dy6 = s[10]; + dx = dx1+dx2+dx3+dx4+dx5; + dy = dy1+dy2+dy3+dy4+dy5; + if (STBTT_fabs(dx) > STBTT_fabs(dy)) + dy6 = -dy; + else + dx6 = -dx; + stbtt__csctx_rccurve_to(c, dx1, dy1, dx2, dy2, dx3, dy3); + stbtt__csctx_rccurve_to(c, dx4, dy4, dx5, dy5, dx6, dy6); + break; + + default: + return STBTT__CSERR("unimplemented"); + } + } break; + + default: + if (b0 != 255 && b0 != 28 && (b0 < 32 || b0 > 254)) + return STBTT__CSERR("reserved operator"); + + // push immediate + if (b0 == 255) { + f = (float)(stbtt_int32)stbtt__buf_get32(&b) / 0x10000; + } else { + stbtt__buf_skip(&b, -1); + f = (float)(stbtt_int16)stbtt__cff_int(&b); + } + if (sp >= 48) return STBTT__CSERR("push stack overflow"); + s[sp++] = f; + clear_stack = 0; + break; + } + if (clear_stack) sp = 0; + } + return STBTT__CSERR("no endchar"); + +#undef STBTT__CSERR +} + +static int stbtt__GetGlyphShapeT2(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **pvertices) +{ + // runs the charstring twice, once to count and once to output (to avoid realloc) + stbtt__csctx count_ctx = STBTT__CSCTX_INIT(1); + stbtt__csctx output_ctx = STBTT__CSCTX_INIT(0); + if (stbtt__run_charstring(info, glyph_index, &count_ctx)) { + *pvertices = (stbtt_vertex*)STBTT_malloc(count_ctx.num_vertices*sizeof(stbtt_vertex), info->userdata); + output_ctx.pvertices = *pvertices; + if (stbtt__run_charstring(info, glyph_index, &output_ctx)) { + STBTT_assert(output_ctx.num_vertices == count_ctx.num_vertices); + return output_ctx.num_vertices; + } + } + *pvertices = NULL; + return 0; +} + +static int stbtt__GetGlyphInfoT2(const stbtt_fontinfo *info, int glyph_index, int *x0, int *y0, int *x1, int *y1) +{ + stbtt__csctx c = STBTT__CSCTX_INIT(1); + int r = stbtt__run_charstring(info, glyph_index, &c); + if (x0) *x0 = r ? c.min_x : 0; + if (y0) *y0 = r ? c.min_y : 0; + if (x1) *x1 = r ? c.max_x : 0; + if (y1) *y1 = r ? c.max_y : 0; + return r ? c.num_vertices : 0; +} + +STBTT_DEF int stbtt_GetGlyphShape(const stbtt_fontinfo *info, int glyph_index, stbtt_vertex **pvertices) +{ + if (!info->cff.size) + return stbtt__GetGlyphShapeTT(info, glyph_index, pvertices); + else + return stbtt__GetGlyphShapeT2(info, glyph_index, pvertices); +} + +STBTT_DEF void stbtt_GetGlyphHMetrics(const stbtt_fontinfo *info, int glyph_index, int *advanceWidth, int *leftSideBearing) +{ + stbtt_uint16 numOfLongHorMetrics = ttUSHORT(info->data+info->hhea + 34); + if (glyph_index < numOfLongHorMetrics) { + if (advanceWidth) *advanceWidth = ttSHORT(info->data + info->hmtx + 4*glyph_index); + if (leftSideBearing) *leftSideBearing = ttSHORT(info->data + info->hmtx + 4*glyph_index + 2); + } else { + if (advanceWidth) *advanceWidth = ttSHORT(info->data + info->hmtx + 4*(numOfLongHorMetrics-1)); + if (leftSideBearing) *leftSideBearing = ttSHORT(info->data + info->hmtx + 4*numOfLongHorMetrics + 2*(glyph_index - numOfLongHorMetrics)); + } +} + +STBTT_DEF int stbtt_GetKerningTableLength(const stbtt_fontinfo *info) +{ + stbtt_uint8 *data = info->data + info->kern; + + // we only look at the first table. it must be 'horizontal' and format 0. + if (!info->kern) + return 0; + if (ttUSHORT(data+2) < 1) // number of tables, need at least 1 + return 0; + if (ttUSHORT(data+8) != 1) // horizontal flag must be set in format + return 0; + + return ttUSHORT(data+10); +} + +STBTT_DEF int stbtt_GetKerningTable(const stbtt_fontinfo *info, stbtt_kerningentry* table, int table_length) +{ + stbtt_uint8 *data = info->data + info->kern; + int k, length; + + // we only look at the first table. it must be 'horizontal' and format 0. + if (!info->kern) + return 0; + if (ttUSHORT(data+2) < 1) // number of tables, need at least 1 + return 0; + if (ttUSHORT(data+8) != 1) // horizontal flag must be set in format + return 0; + + length = ttUSHORT(data+10); + if (table_length < length) + length = table_length; + + for (k = 0; k < length; k++) + { + table[k].glyph1 = ttUSHORT(data+18+(k*6)); + table[k].glyph2 = ttUSHORT(data+20+(k*6)); + table[k].advance = ttSHORT(data+22+(k*6)); + } + + return length; +} + +static int stbtt__GetGlyphKernInfoAdvance(const stbtt_fontinfo *info, int glyph1, int glyph2) +{ + stbtt_uint8 *data = info->data + info->kern; + stbtt_uint32 needle, straw; + int l, r, m; + + // we only look at the first table. it must be 'horizontal' and format 0. + if (!info->kern) + return 0; + if (ttUSHORT(data+2) < 1) // number of tables, need at least 1 + return 0; + if (ttUSHORT(data+8) != 1) // horizontal flag must be set in format + return 0; + + l = 0; + r = ttUSHORT(data+10) - 1; + needle = glyph1 << 16 | glyph2; + while (l <= r) { + m = (l + r) >> 1; + straw = ttULONG(data+18+(m*6)); // note: unaligned read + if (needle < straw) + r = m - 1; + else if (needle > straw) + l = m + 1; + else + return ttSHORT(data+22+(m*6)); + } + return 0; +} + +static stbtt_int32 stbtt__GetCoverageIndex(stbtt_uint8 *coverageTable, int glyph) +{ + stbtt_uint16 coverageFormat = ttUSHORT(coverageTable); + switch(coverageFormat) { + case 1: { + stbtt_uint16 glyphCount = ttUSHORT(coverageTable + 2); + + // Binary search. + stbtt_int32 l=0, r=glyphCount-1, m; + int straw, needle=glyph; + while (l <= r) { + stbtt_uint8 *glyphArray = coverageTable + 4; + stbtt_uint16 glyphID; + m = (l + r) >> 1; + glyphID = ttUSHORT(glyphArray + 2 * m); + straw = glyphID; + if (needle < straw) + r = m - 1; + else if (needle > straw) + l = m + 1; + else { + return m; + } + } + } break; + + case 2: { + stbtt_uint16 rangeCount = ttUSHORT(coverageTable + 2); + stbtt_uint8 *rangeArray = coverageTable + 4; + + // Binary search. + stbtt_int32 l=0, r=rangeCount-1, m; + int strawStart, strawEnd, needle=glyph; + while (l <= r) { + stbtt_uint8 *rangeRecord; + m = (l + r) >> 1; + rangeRecord = rangeArray + 6 * m; + strawStart = ttUSHORT(rangeRecord); + strawEnd = ttUSHORT(rangeRecord + 2); + if (needle < strawStart) + r = m - 1; + else if (needle > strawEnd) + l = m + 1; + else { + stbtt_uint16 startCoverageIndex = ttUSHORT(rangeRecord + 4); + return startCoverageIndex + glyph - strawStart; + } + } + } break; + + default: { + // There are no other cases. + STBTT_assert(0); + } break; + } + + return -1; +} + +static stbtt_int32 stbtt__GetGlyphClass(stbtt_uint8 *classDefTable, int glyph) +{ + stbtt_uint16 classDefFormat = ttUSHORT(classDefTable); + switch(classDefFormat) + { + case 1: { + stbtt_uint16 startGlyphID = ttUSHORT(classDefTable + 2); + stbtt_uint16 glyphCount = ttUSHORT(classDefTable + 4); + stbtt_uint8 *classDef1ValueArray = classDefTable + 6; + + if (glyph >= startGlyphID && glyph < startGlyphID + glyphCount) + return (stbtt_int32)ttUSHORT(classDef1ValueArray + 2 * (glyph - startGlyphID)); + + classDefTable = classDef1ValueArray + 2 * glyphCount; + } break; + + case 2: { + stbtt_uint16 classRangeCount = ttUSHORT(classDefTable + 2); + stbtt_uint8 *classRangeRecords = classDefTable + 4; + + // Binary search. + stbtt_int32 l=0, r=classRangeCount-1, m; + int strawStart, strawEnd, needle=glyph; + while (l <= r) { + stbtt_uint8 *classRangeRecord; + m = (l + r) >> 1; + classRangeRecord = classRangeRecords + 6 * m; + strawStart = ttUSHORT(classRangeRecord); + strawEnd = ttUSHORT(classRangeRecord + 2); + if (needle < strawStart) + r = m - 1; + else if (needle > strawEnd) + l = m + 1; + else + return (stbtt_int32)ttUSHORT(classRangeRecord + 4); + } + + classDefTable = classRangeRecords + 6 * classRangeCount; + } break; + + default: { + // There are no other cases. + STBTT_assert(0); + } break; + } + + return -1; +} + +// Define to STBTT_assert(x) if you want to break on unimplemented formats. +#define STBTT_GPOS_TODO_assert(x) + +static stbtt_int32 stbtt__GetGlyphGPOSInfoAdvance(const stbtt_fontinfo *info, int glyph1, int glyph2) +{ + stbtt_uint16 lookupListOffset; + stbtt_uint8 *lookupList; + stbtt_uint16 lookupCount; + stbtt_uint8 *data; + stbtt_int32 i; + + if (!info->gpos) return 0; + + data = info->data + info->gpos; + + if (ttUSHORT(data+0) != 1) return 0; // Major version 1 + if (ttUSHORT(data+2) != 0) return 0; // Minor version 0 + + lookupListOffset = ttUSHORT(data+8); + lookupList = data + lookupListOffset; + lookupCount = ttUSHORT(lookupList); + + for (i=0; i> 1; + pairValue = pairValueArray + (2 + valueRecordPairSizeInBytes) * m; + secondGlyph = ttUSHORT(pairValue); + straw = secondGlyph; + if (needle < straw) + r = m - 1; + else if (needle > straw) + l = m + 1; + else { + stbtt_int16 xAdvance = ttSHORT(pairValue + 2); + return xAdvance; + } + } + } break; + + case 2: { + stbtt_uint16 valueFormat1 = ttUSHORT(table + 4); + stbtt_uint16 valueFormat2 = ttUSHORT(table + 6); + + stbtt_uint16 classDef1Offset = ttUSHORT(table + 8); + stbtt_uint16 classDef2Offset = ttUSHORT(table + 10); + int glyph1class = stbtt__GetGlyphClass(table + classDef1Offset, glyph1); + int glyph2class = stbtt__GetGlyphClass(table + classDef2Offset, glyph2); + + stbtt_uint16 class1Count = ttUSHORT(table + 12); + stbtt_uint16 class2Count = ttUSHORT(table + 14); + STBTT_assert(glyph1class < class1Count); + STBTT_assert(glyph2class < class2Count); + + // TODO: Support more formats. + STBTT_GPOS_TODO_assert(valueFormat1 == 4); + if (valueFormat1 != 4) return 0; + STBTT_GPOS_TODO_assert(valueFormat2 == 0); + if (valueFormat2 != 0) return 0; + + if (glyph1class >= 0 && glyph1class < class1Count && glyph2class >= 0 && glyph2class < class2Count) { + stbtt_uint8 *class1Records = table + 16; + stbtt_uint8 *class2Records = class1Records + 2 * (glyph1class * class2Count); + stbtt_int16 xAdvance = ttSHORT(class2Records + 2 * glyph2class); + return xAdvance; + } + } break; + + default: { + // There are no other cases. + STBTT_assert(0); + break; + }; + } + } + break; + }; + + default: + // TODO: Implement other stuff. + break; + } + } + + return 0; +} + +STBTT_DEF int stbtt_GetGlyphKernAdvance(const stbtt_fontinfo *info, int g1, int g2) +{ + int xAdvance = 0; + + if (info->gpos) + xAdvance += stbtt__GetGlyphGPOSInfoAdvance(info, g1, g2); + else if (info->kern) + xAdvance += stbtt__GetGlyphKernInfoAdvance(info, g1, g2); + + return xAdvance; +} + +STBTT_DEF int stbtt_GetCodepointKernAdvance(const stbtt_fontinfo *info, int ch1, int ch2) +{ + if (!info->kern && !info->gpos) // if no kerning table, don't waste time looking up both codepoint->glyphs + return 0; + return stbtt_GetGlyphKernAdvance(info, stbtt_FindGlyphIndex(info,ch1), stbtt_FindGlyphIndex(info,ch2)); +} + +STBTT_DEF void stbtt_GetCodepointHMetrics(const stbtt_fontinfo *info, int codepoint, int *advanceWidth, int *leftSideBearing) +{ + stbtt_GetGlyphHMetrics(info, stbtt_FindGlyphIndex(info,codepoint), advanceWidth, leftSideBearing); +} + +STBTT_DEF void stbtt_GetFontVMetrics(const stbtt_fontinfo *info, int *ascent, int *descent, int *lineGap) +{ + if (ascent ) *ascent = ttSHORT(info->data+info->hhea + 4); + if (descent) *descent = ttSHORT(info->data+info->hhea + 6); + if (lineGap) *lineGap = ttSHORT(info->data+info->hhea + 8); +} + +STBTT_DEF int stbtt_GetFontVMetricsOS2(const stbtt_fontinfo *info, int *typoAscent, int *typoDescent, int *typoLineGap) +{ + int tab = stbtt__find_table(info->data, info->fontstart, "OS/2"); + if (!tab) + return 0; + if (typoAscent ) *typoAscent = ttSHORT(info->data+tab + 68); + if (typoDescent) *typoDescent = ttSHORT(info->data+tab + 70); + if (typoLineGap) *typoLineGap = ttSHORT(info->data+tab + 72); + return 1; +} + +STBTT_DEF void stbtt_GetFontBoundingBox(const stbtt_fontinfo *info, int *x0, int *y0, int *x1, int *y1) +{ + *x0 = ttSHORT(info->data + info->head + 36); + *y0 = ttSHORT(info->data + info->head + 38); + *x1 = ttSHORT(info->data + info->head + 40); + *y1 = ttSHORT(info->data + info->head + 42); +} + +STBTT_DEF float stbtt_ScaleForPixelHeight(const stbtt_fontinfo *info, float height) +{ + int fheight = ttSHORT(info->data + info->hhea + 4) - ttSHORT(info->data + info->hhea + 6); + return (float) height / fheight; +} + +STBTT_DEF float stbtt_ScaleForMappingEmToPixels(const stbtt_fontinfo *info, float pixels) +{ + int unitsPerEm = ttUSHORT(info->data + info->head + 18); + return pixels / unitsPerEm; +} + +STBTT_DEF void stbtt_FreeShape(const stbtt_fontinfo *info, stbtt_vertex *v) +{ + STBTT_free(v, info->userdata); +} + +STBTT_DEF stbtt_uint8 *stbtt_FindSVGDoc(const stbtt_fontinfo *info, int gl) +{ + int i; + stbtt_uint8 *data = info->data; + stbtt_uint8 *svg_doc_list = data + stbtt__get_svg((stbtt_fontinfo *) info); + + int numEntries = ttUSHORT(svg_doc_list); + stbtt_uint8 *svg_docs = svg_doc_list + 2; + + for(i=0; i= ttUSHORT(svg_doc)) && (gl <= ttUSHORT(svg_doc + 2))) + return svg_doc; + } + return 0; +} + +STBTT_DEF int stbtt_GetGlyphSVG(const stbtt_fontinfo *info, int gl, const char **svg) +{ + stbtt_uint8 *data = info->data; + stbtt_uint8 *svg_doc; + + if (info->svg == 0) + return 0; + + svg_doc = stbtt_FindSVGDoc(info, gl); + if (svg_doc != NULL) { + *svg = (char *) data + info->svg + ttULONG(svg_doc + 4); + return ttULONG(svg_doc + 8); + } else { + return 0; + } +} + +STBTT_DEF int stbtt_GetCodepointSVG(const stbtt_fontinfo *info, int unicode_codepoint, const char **svg) +{ + return stbtt_GetGlyphSVG(info, stbtt_FindGlyphIndex(info, unicode_codepoint), svg); +} + +////////////////////////////////////////////////////////////////////////////// +// +// antialiasing software rasterizer +// + +STBTT_DEF void stbtt_GetGlyphBitmapBoxSubpixel(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y,float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1) +{ + int x0=0,y0=0,x1,y1; // =0 suppresses compiler warning + if (!stbtt_GetGlyphBox(font, glyph, &x0,&y0,&x1,&y1)) { + // e.g. space character + if (ix0) *ix0 = 0; + if (iy0) *iy0 = 0; + if (ix1) *ix1 = 0; + if (iy1) *iy1 = 0; + } else { + // move to integral bboxes (treating pixels as little squares, what pixels get touched)? + if (ix0) *ix0 = STBTT_ifloor( x0 * scale_x + shift_x); + if (iy0) *iy0 = STBTT_ifloor(-y1 * scale_y + shift_y); + if (ix1) *ix1 = STBTT_iceil ( x1 * scale_x + shift_x); + if (iy1) *iy1 = STBTT_iceil (-y0 * scale_y + shift_y); + } +} + +STBTT_DEF void stbtt_GetGlyphBitmapBox(const stbtt_fontinfo *font, int glyph, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1) +{ + stbtt_GetGlyphBitmapBoxSubpixel(font, glyph, scale_x, scale_y,0.0f,0.0f, ix0, iy0, ix1, iy1); +} + +STBTT_DEF void stbtt_GetCodepointBitmapBoxSubpixel(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, float shift_x, float shift_y, int *ix0, int *iy0, int *ix1, int *iy1) +{ + stbtt_GetGlyphBitmapBoxSubpixel(font, stbtt_FindGlyphIndex(font,codepoint), scale_x, scale_y,shift_x,shift_y, ix0,iy0,ix1,iy1); +} + +STBTT_DEF void stbtt_GetCodepointBitmapBox(const stbtt_fontinfo *font, int codepoint, float scale_x, float scale_y, int *ix0, int *iy0, int *ix1, int *iy1) +{ + stbtt_GetCodepointBitmapBoxSubpixel(font, codepoint, scale_x, scale_y,0.0f,0.0f, ix0,iy0,ix1,iy1); +} + +////////////////////////////////////////////////////////////////////////////// +// +// Rasterizer + +typedef struct stbtt__hheap_chunk +{ + struct stbtt__hheap_chunk *next; +} stbtt__hheap_chunk; + +typedef struct stbtt__hheap +{ + struct stbtt__hheap_chunk *head; + void *first_free; + int num_remaining_in_head_chunk; +} stbtt__hheap; + +static void *stbtt__hheap_alloc(stbtt__hheap *hh, size_t size, void *userdata) +{ + if (hh->first_free) { + void *p = hh->first_free; + hh->first_free = * (void **) p; + return p; + } else { + if (hh->num_remaining_in_head_chunk == 0) { + int count = (size < 32 ? 2000 : size < 128 ? 800 : 100); + stbtt__hheap_chunk *c = (stbtt__hheap_chunk *) STBTT_malloc(sizeof(stbtt__hheap_chunk) + size * count, userdata); + if (c == NULL) + return NULL; + c->next = hh->head; + hh->head = c; + hh->num_remaining_in_head_chunk = count; + } + --hh->num_remaining_in_head_chunk; + return (char *) (hh->head) + sizeof(stbtt__hheap_chunk) + size * hh->num_remaining_in_head_chunk; + } +} + +static void stbtt__hheap_free(stbtt__hheap *hh, void *p) +{ + *(void **) p = hh->first_free; + hh->first_free = p; +} + +static void stbtt__hheap_cleanup(stbtt__hheap *hh, void *userdata) +{ + stbtt__hheap_chunk *c = hh->head; + while (c) { + stbtt__hheap_chunk *n = c->next; + STBTT_free(c, userdata); + c = n; + } +} + +typedef struct stbtt__edge { + float x0,y0, x1,y1; + int invert; +} stbtt__edge; + + +typedef struct stbtt__active_edge +{ + struct stbtt__active_edge *next; + #if STBTT_RASTERIZER_VERSION==1 + int x,dx; + float ey; + int direction; + #elif STBTT_RASTERIZER_VERSION==2 + float fx,fdx,fdy; + float direction; + float sy; + float ey; + #else + #error "Unrecognized value of STBTT_RASTERIZER_VERSION" + #endif +} stbtt__active_edge; + +#if STBTT_RASTERIZER_VERSION == 1 +#define STBTT_FIXSHIFT 10 +#define STBTT_FIX (1 << STBTT_FIXSHIFT) +#define STBTT_FIXMASK (STBTT_FIX-1) + +static stbtt__active_edge *stbtt__new_active(stbtt__hheap *hh, stbtt__edge *e, int off_x, float start_point, void *userdata) +{ + stbtt__active_edge *z = (stbtt__active_edge *) stbtt__hheap_alloc(hh, sizeof(*z), userdata); + float dxdy = (e->x1 - e->x0) / (e->y1 - e->y0); + STBTT_assert(z != NULL); + if (!z) return z; + + // round dx down to avoid overshooting + if (dxdy < 0) + z->dx = -STBTT_ifloor(STBTT_FIX * -dxdy); + else + z->dx = STBTT_ifloor(STBTT_FIX * dxdy); + + z->x = STBTT_ifloor(STBTT_FIX * e->x0 + z->dx * (start_point - e->y0)); // use z->dx so when we offset later it's by the same amount + z->x -= off_x * STBTT_FIX; + + z->ey = e->y1; + z->next = 0; + z->direction = e->invert ? 1 : -1; + return z; +} +#elif STBTT_RASTERIZER_VERSION == 2 +static stbtt__active_edge *stbtt__new_active(stbtt__hheap *hh, stbtt__edge *e, int off_x, float start_point, void *userdata) +{ + stbtt__active_edge *z = (stbtt__active_edge *) stbtt__hheap_alloc(hh, sizeof(*z), userdata); + float dxdy = (e->x1 - e->x0) / (e->y1 - e->y0); + STBTT_assert(z != NULL); + //STBTT_assert(e->y0 <= start_point); + if (!z) return z; + z->fdx = dxdy; + z->fdy = dxdy != 0.0f ? (1.0f/dxdy) : 0.0f; + z->fx = e->x0 + dxdy * (start_point - e->y0); + z->fx -= off_x; + z->direction = e->invert ? 1.0f : -1.0f; + z->sy = e->y0; + z->ey = e->y1; + z->next = 0; + return z; +} +#else +#error "Unrecognized value of STBTT_RASTERIZER_VERSION" +#endif + +#if STBTT_RASTERIZER_VERSION == 1 +// note: this routine clips fills that extend off the edges... ideally this +// wouldn't happen, but it could happen if the truetype glyph bounding boxes +// are wrong, or if the user supplies a too-small bitmap +static void stbtt__fill_active_edges(unsigned char *scanline, int len, stbtt__active_edge *e, int max_weight) +{ + // non-zero winding fill + int x0=0, w=0; + + while (e) { + if (w == 0) { + // if we're currently at zero, we need to record the edge start point + x0 = e->x; w += e->direction; + } else { + int x1 = e->x; w += e->direction; + // if we went to zero, we need to draw + if (w == 0) { + int i = x0 >> STBTT_FIXSHIFT; + int j = x1 >> STBTT_FIXSHIFT; + + if (i < len && j >= 0) { + if (i == j) { + // x0,x1 are the same pixel, so compute combined coverage + scanline[i] = scanline[i] + (stbtt_uint8) ((x1 - x0) * max_weight >> STBTT_FIXSHIFT); + } else { + if (i >= 0) // add antialiasing for x0 + scanline[i] = scanline[i] + (stbtt_uint8) (((STBTT_FIX - (x0 & STBTT_FIXMASK)) * max_weight) >> STBTT_FIXSHIFT); + else + i = -1; // clip + + if (j < len) // add antialiasing for x1 + scanline[j] = scanline[j] + (stbtt_uint8) (((x1 & STBTT_FIXMASK) * max_weight) >> STBTT_FIXSHIFT); + else + j = len; // clip + + for (++i; i < j; ++i) // fill pixels between x0 and x1 + scanline[i] = scanline[i] + (stbtt_uint8) max_weight; + } + } + } + } + + e = e->next; + } +} + +static void stbtt__rasterize_sorted_edges(stbtt__bitmap *result, stbtt__edge *e, int n, int vsubsample, int off_x, int off_y, void *userdata) +{ + stbtt__hheap hh = { 0, 0, 0 }; + stbtt__active_edge *active = NULL; + int y,j=0; + int max_weight = (255 / vsubsample); // weight per vertical scanline + int s; // vertical subsample index + unsigned char scanline_data[512], *scanline; + + if (result->w > 512) + scanline = (unsigned char *) STBTT_malloc(result->w, userdata); + else + scanline = scanline_data; + + y = off_y * vsubsample; + e[n].y0 = (off_y + result->h) * (float) vsubsample + 1; + + while (j < result->h) { + STBTT_memset(scanline, 0, result->w); + for (s=0; s < vsubsample; ++s) { + // find center of pixel for this scanline + float scan_y = y + 0.5f; + stbtt__active_edge **step = &active; + + // update all active edges; + // remove all active edges that terminate before the center of this scanline + while (*step) { + stbtt__active_edge * z = *step; + if (z->ey <= scan_y) { + *step = z->next; // delete from list + STBTT_assert(z->direction); + z->direction = 0; + stbtt__hheap_free(&hh, z); + } else { + z->x += z->dx; // advance to position for current scanline + step = &((*step)->next); // advance through list + } + } + + // resort the list if needed + for(;;) { + int changed=0; + step = &active; + while (*step && (*step)->next) { + if ((*step)->x > (*step)->next->x) { + stbtt__active_edge *t = *step; + stbtt__active_edge *q = t->next; + + t->next = q->next; + q->next = t; + *step = q; + changed = 1; + } + step = &(*step)->next; + } + if (!changed) break; + } + + // insert all edges that start before the center of this scanline -- omit ones that also end on this scanline + while (e->y0 <= scan_y) { + if (e->y1 > scan_y) { + stbtt__active_edge *z = stbtt__new_active(&hh, e, off_x, scan_y, userdata); + if (z != NULL) { + // find insertion point + if (active == NULL) + active = z; + else if (z->x < active->x) { + // insert at front + z->next = active; + active = z; + } else { + // find thing to insert AFTER + stbtt__active_edge *p = active; + while (p->next && p->next->x < z->x) + p = p->next; + // at this point, p->next->x is NOT < z->x + z->next = p->next; + p->next = z; + } + } + } + ++e; + } + + // now process all active edges in XOR fashion + if (active) + stbtt__fill_active_edges(scanline, result->w, active, max_weight); + + ++y; + } + STBTT_memcpy(result->pixels + j * result->stride, scanline, result->w); + ++j; + } + + stbtt__hheap_cleanup(&hh, userdata); + + if (scanline != scanline_data) + STBTT_free(scanline, userdata); +} + +#elif STBTT_RASTERIZER_VERSION == 2 + +// the edge passed in here does not cross the vertical line at x or the vertical line at x+1 +// (i.e. it has already been clipped to those) +static void stbtt__handle_clipped_edge(float *scanline, int x, stbtt__active_edge *e, float x0, float y0, float x1, float y1) +{ + if (y0 == y1) return; + STBTT_assert(y0 < y1); + STBTT_assert(e->sy <= e->ey); + if (y0 > e->ey) return; + if (y1 < e->sy) return; + if (y0 < e->sy) { + x0 += (x1-x0) * (e->sy - y0) / (y1-y0); + y0 = e->sy; + } + if (y1 > e->ey) { + x1 += (x1-x0) * (e->ey - y1) / (y1-y0); + y1 = e->ey; + } + + if (x0 == x) + STBTT_assert(x1 <= x+1); + else if (x0 == x+1) + STBTT_assert(x1 >= x); + else if (x0 <= x) + STBTT_assert(x1 <= x); + else if (x0 >= x+1) + STBTT_assert(x1 >= x+1); + else + STBTT_assert(x1 >= x && x1 <= x+1); + + if (x0 <= x && x1 <= x) + scanline[x] += e->direction * (y1-y0); + else if (x0 >= x+1 && x1 >= x+1) + ; + else { + STBTT_assert(x0 >= x && x0 <= x+1 && x1 >= x && x1 <= x+1); + scanline[x] += e->direction * (y1-y0) * (1-((x0-x)+(x1-x))/2); // coverage = 1 - average x position + } +} + +static void stbtt__fill_active_edges_new(float *scanline, float *scanline_fill, int len, stbtt__active_edge *e, float y_top) +{ + float y_bottom = y_top+1; + + while (e) { + // brute force every pixel + + // compute intersection points with top & bottom + STBTT_assert(e->ey >= y_top); + + if (e->fdx == 0) { + float x0 = e->fx; + if (x0 < len) { + if (x0 >= 0) { + stbtt__handle_clipped_edge(scanline,(int) x0,e, x0,y_top, x0,y_bottom); + stbtt__handle_clipped_edge(scanline_fill-1,(int) x0+1,e, x0,y_top, x0,y_bottom); + } else { + stbtt__handle_clipped_edge(scanline_fill-1,0,e, x0,y_top, x0,y_bottom); + } + } + } else { + float x0 = e->fx; + float dx = e->fdx; + float xb = x0 + dx; + float x_top, x_bottom; + float sy0,sy1; + float dy = e->fdy; + STBTT_assert(e->sy <= y_bottom && e->ey >= y_top); + + // compute endpoints of line segment clipped to this scanline (if the + // line segment starts on this scanline. x0 is the intersection of the + // line with y_top, but that may be off the line segment. + if (e->sy > y_top) { + x_top = x0 + dx * (e->sy - y_top); + sy0 = e->sy; + } else { + x_top = x0; + sy0 = y_top; + } + if (e->ey < y_bottom) { + x_bottom = x0 + dx * (e->ey - y_top); + sy1 = e->ey; + } else { + x_bottom = xb; + sy1 = y_bottom; + } + + if (x_top >= 0 && x_bottom >= 0 && x_top < len && x_bottom < len) { + // from here on, we don't have to range check x values + + if ((int) x_top == (int) x_bottom) { + float height; + // simple case, only spans one pixel + int x = (int) x_top; + height = sy1 - sy0; + STBTT_assert(x >= 0 && x < len); + scanline[x] += e->direction * (1-((x_top - x) + (x_bottom-x))/2) * height; + scanline_fill[x] += e->direction * height; // everything right of this pixel is filled + } else { + int x,x1,x2; + float y_crossing, step, sign, area; + // covers 2+ pixels + if (x_top > x_bottom) { + // flip scanline vertically; signed area is the same + float t; + sy0 = y_bottom - (sy0 - y_top); + sy1 = y_bottom - (sy1 - y_top); + t = sy0, sy0 = sy1, sy1 = t; + t = x_bottom, x_bottom = x_top, x_top = t; + dx = -dx; + dy = -dy; + t = x0, x0 = xb, xb = t; + } + + x1 = (int) x_top; + x2 = (int) x_bottom; + // compute intersection with y axis at x1+1 + y_crossing = (x1+1 - x0) * dy + y_top; + + sign = e->direction; + // area of the rectangle covered from y0..y_crossing + area = sign * (y_crossing-sy0); + // area of the triangle (x_top,y0), (x+1,y0), (x+1,y_crossing) + scanline[x1] += area * (1-((x_top - x1)+(x1+1-x1))/2); + + step = sign * dy; + for (x = x1+1; x < x2; ++x) { + scanline[x] += area + step/2; + area += step; + } + y_crossing += dy * (x2 - (x1+1)); + + STBTT_assert(STBTT_fabs(area) <= 1.01f); + + scanline[x2] += area + sign * (1-((x2-x2)+(x_bottom-x2))/2) * (sy1-y_crossing); + + scanline_fill[x2] += sign * (sy1-sy0); + } + } else { + // if edge goes outside of box we're drawing, we require + // clipping logic. since this does not match the intended use + // of this library, we use a different, very slow brute + // force implementation + int x; + for (x=0; x < len; ++x) { + // cases: + // + // there can be up to two intersections with the pixel. any intersection + // with left or right edges can be handled by splitting into two (or three) + // regions. intersections with top & bottom do not necessitate case-wise logic. + // + // the old way of doing this found the intersections with the left & right edges, + // then used some simple logic to produce up to three segments in sorted order + // from top-to-bottom. however, this had a problem: if an x edge was epsilon + // across the x border, then the corresponding y position might not be distinct + // from the other y segment, and it might ignored as an empty segment. to avoid + // that, we need to explicitly produce segments based on x positions. + + // rename variables to clearly-defined pairs + float y0 = y_top; + float x1 = (float) (x); + float x2 = (float) (x+1); + float x3 = xb; + float y3 = y_bottom; + + // x = e->x + e->dx * (y-y_top) + // (y-y_top) = (x - e->x) / e->dx + // y = (x - e->x) / e->dx + y_top + float y1 = (x - x0) / dx + y_top; + float y2 = (x+1 - x0) / dx + y_top; + + if (x0 < x1 && x3 > x2) { // three segments descending down-right + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x1,y1); + stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x2,y2); + stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3); + } else if (x3 < x1 && x0 > x2) { // three segments descending down-left + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x2,y2); + stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x1,y1); + stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x3,y3); + } else if (x0 < x1 && x3 > x1) { // two segments across x, down-right + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x1,y1); + stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x3,y3); + } else if (x3 < x1 && x0 > x1) { // two segments across x, down-left + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x1,y1); + stbtt__handle_clipped_edge(scanline,x,e, x1,y1, x3,y3); + } else if (x0 < x2 && x3 > x2) { // two segments across x+1, down-right + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x2,y2); + stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3); + } else if (x3 < x2 && x0 > x2) { // two segments across x+1, down-left + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x2,y2); + stbtt__handle_clipped_edge(scanline,x,e, x2,y2, x3,y3); + } else { // one segment + stbtt__handle_clipped_edge(scanline,x,e, x0,y0, x3,y3); + } + } + } + } + e = e->next; + } +} + +// directly AA rasterize edges w/o supersampling +static void stbtt__rasterize_sorted_edges(stbtt__bitmap *result, stbtt__edge *e, int n, int vsubsample, int off_x, int off_y, void *userdata) +{ + stbtt__hheap hh = { 0, 0, 0 }; + stbtt__active_edge *active = NULL; + int y,j=0, i; + float scanline_data[129], *scanline, *scanline2; + + STBTT__NOTUSED(vsubsample); + + if (result->w > 64) + scanline = (float *) STBTT_malloc((result->w*2+1) * sizeof(float), userdata); + else + scanline = scanline_data; + + scanline2 = scanline + result->w; + + y = off_y; + e[n].y0 = (float) (off_y + result->h) + 1; + + while (j < result->h) { + // find center of pixel for this scanline + float scan_y_top = y + 0.0f; + float scan_y_bottom = y + 1.0f; + stbtt__active_edge **step = &active; + + STBTT_memset(scanline , 0, result->w*sizeof(scanline[0])); + STBTT_memset(scanline2, 0, (result->w+1)*sizeof(scanline[0])); + + // update all active edges; + // remove all active edges that terminate before the top of this scanline + while (*step) { + stbtt__active_edge * z = *step; + if (z->ey <= scan_y_top) { + *step = z->next; // delete from list + STBTT_assert(z->direction); + z->direction = 0; + stbtt__hheap_free(&hh, z); + } else { + step = &((*step)->next); // advance through list + } + } + + // insert all edges that start before the bottom of this scanline + while (e->y0 <= scan_y_bottom) { + if (e->y0 != e->y1) { + stbtt__active_edge *z = stbtt__new_active(&hh, e, off_x, scan_y_top, userdata); + if (z != NULL) { + if (j == 0 && off_y != 0) { + if (z->ey < scan_y_top) { + // this can happen due to subpixel positioning and some kind of fp rounding error i think + z->ey = scan_y_top; + } + } + STBTT_assert(z->ey >= scan_y_top); // if we get really unlucky a tiny bit of an edge can be out of bounds + // insert at front + z->next = active; + active = z; + } + } + ++e; + } + + // now process all active edges + if (active) + stbtt__fill_active_edges_new(scanline, scanline2+1, result->w, active, scan_y_top); + + { + float sum = 0; + for (i=0; i < result->w; ++i) { + float k; + int m; + sum += scanline2[i]; + k = scanline[i] + sum; + k = (float) STBTT_fabs(k)*255 + 0.5f; + m = (int) k; + if (m > 255) m = 255; + result->pixels[j*result->stride + i] = (unsigned char) m; + } + } + // advance all the edges + step = &active; + while (*step) { + stbtt__active_edge *z = *step; + z->fx += z->fdx; // advance to position for current scanline + step = &((*step)->next); // advance through list + } + + ++y; + ++j; + } + + stbtt__hheap_cleanup(&hh, userdata); + + if (scanline != scanline_data) + STBTT_free(scanline, userdata); +} +#else +#error "Unrecognized value of STBTT_RASTERIZER_VERSION" +#endif + +#define STBTT__COMPARE(a,b) ((a)->y0 < (b)->y0) + +static void stbtt__sort_edges_ins_sort(stbtt__edge *p, int n) +{ + int i,j; + for (i=1; i < n; ++i) { + stbtt__edge t = p[i], *a = &t; + j = i; + while (j > 0) { + stbtt__edge *b = &p[j-1]; + int c = STBTT__COMPARE(a,b); + if (!c) break; + p[j] = p[j-1]; + --j; + } + if (i != j) + p[j] = t; + } +} + +static void stbtt__sort_edges_quicksort(stbtt__edge *p, int n) +{ + /* threshold for transitioning to insertion sort */ + while (n > 12) { + stbtt__edge t; + int c01,c12,c,m,i,j; + + /* compute median of three */ + m = n >> 1; + c01 = STBTT__COMPARE(&p[0],&p[m]); + c12 = STBTT__COMPARE(&p[m],&p[n-1]); + /* if 0 >= mid >= end, or 0 < mid < end, then use mid */ + if (c01 != c12) { + /* otherwise, we'll need to swap something else to middle */ + int z; + c = STBTT__COMPARE(&p[0],&p[n-1]); + /* 0>mid && midn => n; 0 0 */ + /* 0n: 0>n => 0; 0 n */ + z = (c == c12) ? 0 : n-1; + t = p[z]; + p[z] = p[m]; + p[m] = t; + } + /* now p[m] is the median-of-three */ + /* swap it to the beginning so it won't move around */ + t = p[0]; + p[0] = p[m]; + p[m] = t; + + /* partition loop */ + i=1; + j=n-1; + for(;;) { + /* handling of equality is crucial here */ + /* for sentinels & efficiency with duplicates */ + for (;;++i) { + if (!STBTT__COMPARE(&p[i], &p[0])) break; + } + for (;;--j) { + if (!STBTT__COMPARE(&p[0], &p[j])) break; + } + /* make sure we haven't crossed */ + if (i >= j) break; + t = p[i]; + p[i] = p[j]; + p[j] = t; + + ++i; + --j; + } + /* recurse on smaller side, iterate on larger */ + if (j < (n-i)) { + stbtt__sort_edges_quicksort(p,j); + p = p+i; + n = n-i; + } else { + stbtt__sort_edges_quicksort(p+i, n-i); + n = j; + } + } +} + +static void stbtt__sort_edges(stbtt__edge *p, int n) +{ + stbtt__sort_edges_quicksort(p, n); + stbtt__sort_edges_ins_sort(p, n); +} + +typedef struct +{ + float x,y; +} stbtt__point; + +static void stbtt__rasterize(stbtt__bitmap *result, stbtt__point *pts, int *wcount, int windings, float scale_x, float scale_y, float shift_x, float shift_y, int off_x, int off_y, int invert, void *userdata) +{ + float y_scale_inv = invert ? -scale_y : scale_y; + stbtt__edge *e; + int n,i,j,k,m; +#if STBTT_RASTERIZER_VERSION == 1 + int vsubsample = result->h < 8 ? 15 : 5; +#elif STBTT_RASTERIZER_VERSION == 2 + int vsubsample = 1; +#else + #error "Unrecognized value of STBTT_RASTERIZER_VERSION" +#endif + // vsubsample should divide 255 evenly; otherwise we won't reach full opacity + + // now we have to blow out the windings into explicit edge lists + n = 0; + for (i=0; i < windings; ++i) + n += wcount[i]; + + e = (stbtt__edge *) STBTT_malloc(sizeof(*e) * (n+1), userdata); // add an extra one as a sentinel + if (e == 0) return; + n = 0; + + m=0; + for (i=0; i < windings; ++i) { + stbtt__point *p = pts + m; + m += wcount[i]; + j = wcount[i]-1; + for (k=0; k < wcount[i]; j=k++) { + int a=k,b=j; + // skip the edge if horizontal + if (p[j].y == p[k].y) + continue; + // add edge from j to k to the list + e[n].invert = 0; + if (invert ? p[j].y > p[k].y : p[j].y < p[k].y) { + e[n].invert = 1; + a=j,b=k; + } + e[n].x0 = p[a].x * scale_x + shift_x; + e[n].y0 = (p[a].y * y_scale_inv + shift_y) * vsubsample; + e[n].x1 = p[b].x * scale_x + shift_x; + e[n].y1 = (p[b].y * y_scale_inv + shift_y) * vsubsample; + ++n; + } + } + + // now sort the edges by their highest point (should snap to integer, and then by x) + //STBTT_sort(e, n, sizeof(e[0]), stbtt__edge_compare); + stbtt__sort_edges(e, n); + + // now, traverse the scanlines and find the intersections on each scanline, use xor winding rule + stbtt__rasterize_sorted_edges(result, e, n, vsubsample, off_x, off_y, userdata); + + STBTT_free(e, userdata); +} + +static void stbtt__add_point(stbtt__point *points, int n, float x, float y) +{ + if (!points) return; // during first pass, it's unallocated + points[n].x = x; + points[n].y = y; +} + +// tessellate until threshold p is happy... @TODO warped to compensate for non-linear stretching +static int stbtt__tesselate_curve(stbtt__point *points, int *num_points, float x0, float y0, float x1, float y1, float x2, float y2, float objspace_flatness_squared, int n) +{ + // midpoint + float mx = (x0 + 2*x1 + x2)/4; + float my = (y0 + 2*y1 + y2)/4; + // versus directly drawn line + float dx = (x0+x2)/2 - mx; + float dy = (y0+y2)/2 - my; + if (n > 16) // 65536 segments on one curve better be enough! + return 1; + if (dx*dx+dy*dy > objspace_flatness_squared) { // half-pixel error allowed... need to be smaller if AA + stbtt__tesselate_curve(points, num_points, x0,y0, (x0+x1)/2.0f,(y0+y1)/2.0f, mx,my, objspace_flatness_squared,n+1); + stbtt__tesselate_curve(points, num_points, mx,my, (x1+x2)/2.0f,(y1+y2)/2.0f, x2,y2, objspace_flatness_squared,n+1); + } else { + stbtt__add_point(points, *num_points,x2,y2); + *num_points = *num_points+1; + } + return 1; +} + +static void stbtt__tesselate_cubic(stbtt__point *points, int *num_points, float x0, float y0, float x1, float y1, float x2, float y2, float x3, float y3, float objspace_flatness_squared, int n) +{ + // @TODO this "flatness" calculation is just made-up nonsense that seems to work well enough + float dx0 = x1-x0; + float dy0 = y1-y0; + float dx1 = x2-x1; + float dy1 = y2-y1; + float dx2 = x3-x2; + float dy2 = y3-y2; + float dx = x3-x0; + float dy = y3-y0; + float longlen = (float) (STBTT_sqrt(dx0*dx0+dy0*dy0)+STBTT_sqrt(dx1*dx1+dy1*dy1)+STBTT_sqrt(dx2*dx2+dy2*dy2)); + float shortlen = (float) STBTT_sqrt(dx*dx+dy*dy); + float flatness_squared = longlen*longlen-shortlen*shortlen; + + if (n > 16) // 65536 segments on one curve better be enough! + return; + + if (flatness_squared > objspace_flatness_squared) { + float x01 = (x0+x1)/2; + float y01 = (y0+y1)/2; + float x12 = (x1+x2)/2; + float y12 = (y1+y2)/2; + float x23 = (x2+x3)/2; + float y23 = (y2+y3)/2; + + float xa = (x01+x12)/2; + float ya = (y01+y12)/2; + float xb = (x12+x23)/2; + float yb = (y12+y23)/2; + + float mx = (xa+xb)/2; + float my = (ya+yb)/2; + + stbtt__tesselate_cubic(points, num_points, x0,y0, x01,y01, xa,ya, mx,my, objspace_flatness_squared,n+1); + stbtt__tesselate_cubic(points, num_points, mx,my, xb,yb, x23,y23, x3,y3, objspace_flatness_squared,n+1); + } else { + stbtt__add_point(points, *num_points,x3,y3); + *num_points = *num_points+1; + } +} + +// returns number of contours +static stbtt__point *stbtt_FlattenCurves(stbtt_vertex *vertices, int num_verts, float objspace_flatness, int **contour_lengths, int *num_contours, void *userdata) +{ + stbtt__point *points=0; + int num_points=0; + + float objspace_flatness_squared = objspace_flatness * objspace_flatness; + int i,n=0,start=0, pass; + + // count how many "moves" there are to get the contour count + for (i=0; i < num_verts; ++i) + if (vertices[i].type == STBTT_vmove) + ++n; + + *num_contours = n; + if (n == 0) return 0; + + *contour_lengths = (int *) STBTT_malloc(sizeof(**contour_lengths) * n, userdata); + + if (*contour_lengths == 0) { + *num_contours = 0; + return 0; + } + + // make two passes through the points so we don't need to realloc + for (pass=0; pass < 2; ++pass) { + float x=0,y=0; + if (pass == 1) { + points = (stbtt__point *) STBTT_malloc(num_points * sizeof(points[0]), userdata); + if (points == NULL) goto error; + } + num_points = 0; + n= -1; + for (i=0; i < num_verts; ++i) { + switch (vertices[i].type) { + case STBTT_vmove: + // start the next contour + if (n >= 0) + (*contour_lengths)[n] = num_points - start; + ++n; + start = num_points; + + x = vertices[i].x, y = vertices[i].y; + stbtt__add_point(points, num_points++, x,y); + break; + case STBTT_vline: + x = vertices[i].x, y = vertices[i].y; + stbtt__add_point(points, num_points++, x, y); + break; + case STBTT_vcurve: + stbtt__tesselate_curve(points, &num_points, x,y, + vertices[i].cx, vertices[i].cy, + vertices[i].x, vertices[i].y, + objspace_flatness_squared, 0); + x = vertices[i].x, y = vertices[i].y; + break; + case STBTT_vcubic: + stbtt__tesselate_cubic(points, &num_points, x,y, + vertices[i].cx, vertices[i].cy, + vertices[i].cx1, vertices[i].cy1, + vertices[i].x, vertices[i].y, + objspace_flatness_squared, 0); + x = vertices[i].x, y = vertices[i].y; + break; + } + } + (*contour_lengths)[n] = num_points - start; + } + + return points; +error: + STBTT_free(points, userdata); + STBTT_free(*contour_lengths, userdata); + *contour_lengths = 0; + *num_contours = 0; + return NULL; +} + +STBTT_DEF void stbtt_Rasterize(stbtt__bitmap *result, float flatness_in_pixels, stbtt_vertex *vertices, int num_verts, float scale_x, float scale_y, float shift_x, float shift_y, int x_off, int y_off, int invert, void *userdata) +{ + float scale = scale_x > scale_y ? scale_y : scale_x; + int winding_count = 0; + int *winding_lengths = NULL; + stbtt__point *windings = stbtt_FlattenCurves(vertices, num_verts, flatness_in_pixels / scale, &winding_lengths, &winding_count, userdata); + if (windings) { + stbtt__rasterize(result, windings, winding_lengths, winding_count, scale_x, scale_y, shift_x, shift_y, x_off, y_off, invert, userdata); + STBTT_free(winding_lengths, userdata); + STBTT_free(windings, userdata); + } +} + +STBTT_DEF void stbtt_FreeBitmap(unsigned char *bitmap, void *userdata) +{ + STBTT_free(bitmap, userdata); +} + +STBTT_DEF unsigned char *stbtt_GetGlyphBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int glyph, int *width, int *height, int *xoff, int *yoff) +{ + int ix0,iy0,ix1,iy1; + stbtt__bitmap gbm; + stbtt_vertex *vertices; + int num_verts = stbtt_GetGlyphShape(info, glyph, &vertices); + + if (scale_x == 0) scale_x = scale_y; + if (scale_y == 0) { + if (scale_x == 0) { + STBTT_free(vertices, info->userdata); + return NULL; + } + scale_y = scale_x; + } + + stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale_x, scale_y, shift_x, shift_y, &ix0,&iy0,&ix1,&iy1); + + // now we get the size + gbm.w = (ix1 - ix0); + gbm.h = (iy1 - iy0); + gbm.pixels = NULL; // in case we error + + if (width ) *width = gbm.w; + if (height) *height = gbm.h; + if (xoff ) *xoff = ix0; + if (yoff ) *yoff = iy0; + + if (gbm.w && gbm.h) { + gbm.pixels = (unsigned char *) STBTT_malloc(gbm.w * gbm.h, info->userdata); + if (gbm.pixels) { + gbm.stride = gbm.w; + + stbtt_Rasterize(&gbm, 0.35f, vertices, num_verts, scale_x, scale_y, shift_x, shift_y, ix0, iy0, 1, info->userdata); + } + } + STBTT_free(vertices, info->userdata); + return gbm.pixels; +} + +STBTT_DEF unsigned char *stbtt_GetGlyphBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int glyph, int *width, int *height, int *xoff, int *yoff) +{ + return stbtt_GetGlyphBitmapSubpixel(info, scale_x, scale_y, 0.0f, 0.0f, glyph, width, height, xoff, yoff); +} + +STBTT_DEF void stbtt_MakeGlyphBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int glyph) +{ + int ix0,iy0; + stbtt_vertex *vertices; + int num_verts = stbtt_GetGlyphShape(info, glyph, &vertices); + stbtt__bitmap gbm; + + stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale_x, scale_y, shift_x, shift_y, &ix0,&iy0,0,0); + gbm.pixels = output; + gbm.w = out_w; + gbm.h = out_h; + gbm.stride = out_stride; + + if (gbm.w && gbm.h) + stbtt_Rasterize(&gbm, 0.35f, vertices, num_verts, scale_x, scale_y, shift_x, shift_y, ix0,iy0, 1, info->userdata); + + STBTT_free(vertices, info->userdata); +} + +STBTT_DEF void stbtt_MakeGlyphBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int glyph) +{ + stbtt_MakeGlyphBitmapSubpixel(info, output, out_w, out_h, out_stride, scale_x, scale_y, 0.0f,0.0f, glyph); +} + +STBTT_DEF unsigned char *stbtt_GetCodepointBitmapSubpixel(const stbtt_fontinfo *info, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint, int *width, int *height, int *xoff, int *yoff) +{ + return stbtt_GetGlyphBitmapSubpixel(info, scale_x, scale_y,shift_x,shift_y, stbtt_FindGlyphIndex(info,codepoint), width,height,xoff,yoff); +} + +STBTT_DEF void stbtt_MakeCodepointBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int oversample_x, int oversample_y, float *sub_x, float *sub_y, int codepoint) +{ + stbtt_MakeGlyphBitmapSubpixelPrefilter(info, output, out_w, out_h, out_stride, scale_x, scale_y, shift_x, shift_y, oversample_x, oversample_y, sub_x, sub_y, stbtt_FindGlyphIndex(info,codepoint)); +} + +STBTT_DEF void stbtt_MakeCodepointBitmapSubpixel(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int codepoint) +{ + stbtt_MakeGlyphBitmapSubpixel(info, output, out_w, out_h, out_stride, scale_x, scale_y, shift_x, shift_y, stbtt_FindGlyphIndex(info,codepoint)); +} + +STBTT_DEF unsigned char *stbtt_GetCodepointBitmap(const stbtt_fontinfo *info, float scale_x, float scale_y, int codepoint, int *width, int *height, int *xoff, int *yoff) +{ + return stbtt_GetCodepointBitmapSubpixel(info, scale_x, scale_y, 0.0f,0.0f, codepoint, width,height,xoff,yoff); +} + +STBTT_DEF void stbtt_MakeCodepointBitmap(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, int codepoint) +{ + stbtt_MakeCodepointBitmapSubpixel(info, output, out_w, out_h, out_stride, scale_x, scale_y, 0.0f,0.0f, codepoint); +} + +////////////////////////////////////////////////////////////////////////////// +// +// bitmap baking +// +// This is SUPER-CRAPPY packing to keep source code small + +static int stbtt_BakeFontBitmap_internal(unsigned char *data, int offset, // font location (use offset=0 for plain .ttf) + float pixel_height, // height of font in pixels + unsigned char *pixels, int pw, int ph, // bitmap to be filled in + int first_char, int num_chars, // characters to bake + stbtt_bakedchar *chardata) +{ + float scale; + int x,y,bottom_y, i; + stbtt_fontinfo f; + f.userdata = NULL; + if (!stbtt_InitFont(&f, data, offset)) + return -1; + STBTT_memset(pixels, 0, pw*ph); // background of 0 around pixels + x=y=1; + bottom_y = 1; + + scale = stbtt_ScaleForPixelHeight(&f, pixel_height); + + for (i=0; i < num_chars; ++i) { + int advance, lsb, x0,y0,x1,y1,gw,gh; + int g = stbtt_FindGlyphIndex(&f, first_char + i); + stbtt_GetGlyphHMetrics(&f, g, &advance, &lsb); + stbtt_GetGlyphBitmapBox(&f, g, scale,scale, &x0,&y0,&x1,&y1); + gw = x1-x0; + gh = y1-y0; + if (x + gw + 1 >= pw) + y = bottom_y, x = 1; // advance to next row + if (y + gh + 1 >= ph) // check if it fits vertically AFTER potentially moving to next row + return -i; + STBTT_assert(x+gw < pw); + STBTT_assert(y+gh < ph); + stbtt_MakeGlyphBitmap(&f, pixels+x+y*pw, gw,gh,pw, scale,scale, g); + chardata[i].x0 = (stbtt_int16) x; + chardata[i].y0 = (stbtt_int16) y; + chardata[i].x1 = (stbtt_int16) (x + gw); + chardata[i].y1 = (stbtt_int16) (y + gh); + chardata[i].xadvance = scale * advance; + chardata[i].xoff = (float) x0; + chardata[i].yoff = (float) y0; + x = x + gw + 1; + if (y+gh+1 > bottom_y) + bottom_y = y+gh+1; + } + return bottom_y; +} + +STBTT_DEF void stbtt_GetBakedQuad(const stbtt_bakedchar *chardata, int pw, int ph, int char_index, float *xpos, float *ypos, stbtt_aligned_quad *q, int opengl_fillrule) +{ + float d3d_bias = opengl_fillrule ? 0 : -0.5f; + float ipw = 1.0f / pw, iph = 1.0f / ph; + const stbtt_bakedchar *b = chardata + char_index; + int round_x = STBTT_ifloor((*xpos + b->xoff) + 0.5f); + int round_y = STBTT_ifloor((*ypos + b->yoff) + 0.5f); + + q->x0 = round_x + d3d_bias; + q->y0 = round_y + d3d_bias; + q->x1 = round_x + b->x1 - b->x0 + d3d_bias; + q->y1 = round_y + b->y1 - b->y0 + d3d_bias; + + q->s0 = b->x0 * ipw; + q->t0 = b->y0 * iph; + q->s1 = b->x1 * ipw; + q->t1 = b->y1 * iph; + + *xpos += b->xadvance; +} + +////////////////////////////////////////////////////////////////////////////// +// +// rectangle packing replacement routines if you don't have stb_rect_pack.h +// + +#ifndef STB_RECT_PACK_VERSION + +typedef int stbrp_coord; + +//////////////////////////////////////////////////////////////////////////////////// +// // +// // +// COMPILER WARNING ?!?!? // +// // +// // +// if you get a compile warning due to these symbols being defined more than // +// once, move #include "stb_rect_pack.h" before #include "stb_truetype.h" // +// // +//////////////////////////////////////////////////////////////////////////////////// + +typedef struct +{ + int width,height; + int x,y,bottom_y; +} stbrp_context; + +typedef struct +{ + unsigned char x; +} stbrp_node; + +struct stbrp_rect +{ + stbrp_coord x,y; + int id,w,h,was_packed; +}; + +static void stbrp_init_target(stbrp_context *con, int pw, int ph, stbrp_node *nodes, int num_nodes) +{ + con->width = pw; + con->height = ph; + con->x = 0; + con->y = 0; + con->bottom_y = 0; + STBTT__NOTUSED(nodes); + STBTT__NOTUSED(num_nodes); +} + +static void stbrp_pack_rects(stbrp_context *con, stbrp_rect *rects, int num_rects) +{ + int i; + for (i=0; i < num_rects; ++i) { + if (con->x + rects[i].w > con->width) { + con->x = 0; + con->y = con->bottom_y; + } + if (con->y + rects[i].h > con->height) + break; + rects[i].x = con->x; + rects[i].y = con->y; + rects[i].was_packed = 1; + con->x += rects[i].w; + if (con->y + rects[i].h > con->bottom_y) + con->bottom_y = con->y + rects[i].h; + } + for ( ; i < num_rects; ++i) + rects[i].was_packed = 0; +} +#endif + +////////////////////////////////////////////////////////////////////////////// +// +// bitmap baking +// +// This is SUPER-AWESOME (tm Ryan Gordon) packing using stb_rect_pack.h. If +// stb_rect_pack.h isn't available, it uses the BakeFontBitmap strategy. + +STBTT_DEF int stbtt_PackBegin(stbtt_pack_context *spc, unsigned char *pixels, int pw, int ph, int stride_in_bytes, int padding, void *alloc_context) +{ + stbrp_context *context = (stbrp_context *) STBTT_malloc(sizeof(*context) ,alloc_context); + int num_nodes = pw - padding; + stbrp_node *nodes = (stbrp_node *) STBTT_malloc(sizeof(*nodes ) * num_nodes,alloc_context); + + if (context == NULL || nodes == NULL) { + if (context != NULL) STBTT_free(context, alloc_context); + if (nodes != NULL) STBTT_free(nodes , alloc_context); + return 0; + } + + spc->user_allocator_context = alloc_context; + spc->width = pw; + spc->height = ph; + spc->pixels = pixels; + spc->pack_info = context; + spc->nodes = nodes; + spc->padding = padding; + spc->stride_in_bytes = stride_in_bytes != 0 ? stride_in_bytes : pw; + spc->h_oversample = 1; + spc->v_oversample = 1; + spc->skip_missing = 0; + + stbrp_init_target(context, pw-padding, ph-padding, nodes, num_nodes); + + if (pixels) + STBTT_memset(pixels, 0, pw*ph); // background of 0 around pixels + + return 1; +} + +STBTT_DEF void stbtt_PackEnd (stbtt_pack_context *spc) +{ + STBTT_free(spc->nodes , spc->user_allocator_context); + STBTT_free(spc->pack_info, spc->user_allocator_context); +} + +STBTT_DEF void stbtt_PackSetOversampling(stbtt_pack_context *spc, unsigned int h_oversample, unsigned int v_oversample) +{ + STBTT_assert(h_oversample <= STBTT_MAX_OVERSAMPLE); + STBTT_assert(v_oversample <= STBTT_MAX_OVERSAMPLE); + if (h_oversample <= STBTT_MAX_OVERSAMPLE) + spc->h_oversample = h_oversample; + if (v_oversample <= STBTT_MAX_OVERSAMPLE) + spc->v_oversample = v_oversample; +} + +STBTT_DEF void stbtt_PackSetSkipMissingCodepoints(stbtt_pack_context *spc, int skip) +{ + spc->skip_missing = skip; +} + +#define STBTT__OVER_MASK (STBTT_MAX_OVERSAMPLE-1) + +static void stbtt__h_prefilter(unsigned char *pixels, int w, int h, int stride_in_bytes, unsigned int kernel_width) +{ + unsigned char buffer[STBTT_MAX_OVERSAMPLE]; + int safe_w = w - kernel_width; + int j; + STBTT_memset(buffer, 0, STBTT_MAX_OVERSAMPLE); // suppress bogus warning from VS2013 -analyze + for (j=0; j < h; ++j) { + int i; + unsigned int total; + STBTT_memset(buffer, 0, kernel_width); + + total = 0; + + // make kernel_width a constant in common cases so compiler can optimize out the divide + switch (kernel_width) { + case 2: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / 2); + } + break; + case 3: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / 3); + } + break; + case 4: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / 4); + } + break; + case 5: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / 5); + } + break; + default: + for (i=0; i <= safe_w; ++i) { + total += pixels[i] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i]; + pixels[i] = (unsigned char) (total / kernel_width); + } + break; + } + + for (; i < w; ++i) { + STBTT_assert(pixels[i] == 0); + total -= buffer[i & STBTT__OVER_MASK]; + pixels[i] = (unsigned char) (total / kernel_width); + } + + pixels += stride_in_bytes; + } +} + +static void stbtt__v_prefilter(unsigned char *pixels, int w, int h, int stride_in_bytes, unsigned int kernel_width) +{ + unsigned char buffer[STBTT_MAX_OVERSAMPLE]; + int safe_h = h - kernel_width; + int j; + STBTT_memset(buffer, 0, STBTT_MAX_OVERSAMPLE); // suppress bogus warning from VS2013 -analyze + for (j=0; j < w; ++j) { + int i; + unsigned int total; + STBTT_memset(buffer, 0, kernel_width); + + total = 0; + + // make kernel_width a constant in common cases so compiler can optimize out the divide + switch (kernel_width) { + case 2: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / 2); + } + break; + case 3: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / 3); + } + break; + case 4: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / 4); + } + break; + case 5: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / 5); + } + break; + default: + for (i=0; i <= safe_h; ++i) { + total += pixels[i*stride_in_bytes] - buffer[i & STBTT__OVER_MASK]; + buffer[(i+kernel_width) & STBTT__OVER_MASK] = pixels[i*stride_in_bytes]; + pixels[i*stride_in_bytes] = (unsigned char) (total / kernel_width); + } + break; + } + + for (; i < h; ++i) { + STBTT_assert(pixels[i*stride_in_bytes] == 0); + total -= buffer[i & STBTT__OVER_MASK]; + pixels[i*stride_in_bytes] = (unsigned char) (total / kernel_width); + } + + pixels += 1; + } +} + +static float stbtt__oversample_shift(int oversample) +{ + if (!oversample) + return 0.0f; + + // The prefilter is a box filter of width "oversample", + // which shifts phase by (oversample - 1)/2 pixels in + // oversampled space. We want to shift in the opposite + // direction to counter this. + return (float)-(oversample - 1) / (2.0f * (float)oversample); +} + +// rects array must be big enough to accommodate all characters in the given ranges +STBTT_DEF int stbtt_PackFontRangesGatherRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects) +{ + int i,j,k; + int missing_glyph_added = 0; + + k=0; + for (i=0; i < num_ranges; ++i) { + float fh = ranges[i].font_size; + float scale = fh > 0 ? stbtt_ScaleForPixelHeight(info, fh) : stbtt_ScaleForMappingEmToPixels(info, -fh); + ranges[i].h_oversample = (unsigned char) spc->h_oversample; + ranges[i].v_oversample = (unsigned char) spc->v_oversample; + for (j=0; j < ranges[i].num_chars; ++j) { + int x0,y0,x1,y1; + int codepoint = ranges[i].array_of_unicode_codepoints == NULL ? ranges[i].first_unicode_codepoint_in_range + j : ranges[i].array_of_unicode_codepoints[j]; + int glyph = stbtt_FindGlyphIndex(info, codepoint); + if (glyph == 0 && (spc->skip_missing || missing_glyph_added)) { + rects[k].w = rects[k].h = 0; + } else { + stbtt_GetGlyphBitmapBoxSubpixel(info,glyph, + scale * spc->h_oversample, + scale * spc->v_oversample, + 0,0, + &x0,&y0,&x1,&y1); + rects[k].w = (stbrp_coord) (x1-x0 + spc->padding + spc->h_oversample-1); + rects[k].h = (stbrp_coord) (y1-y0 + spc->padding + spc->v_oversample-1); + if (glyph == 0) + missing_glyph_added = 1; + } + ++k; + } + } + + return k; +} + +STBTT_DEF void stbtt_MakeGlyphBitmapSubpixelPrefilter(const stbtt_fontinfo *info, unsigned char *output, int out_w, int out_h, int out_stride, float scale_x, float scale_y, float shift_x, float shift_y, int prefilter_x, int prefilter_y, float *sub_x, float *sub_y, int glyph) +{ + stbtt_MakeGlyphBitmapSubpixel(info, + output, + out_w - (prefilter_x - 1), + out_h - (prefilter_y - 1), + out_stride, + scale_x, + scale_y, + shift_x, + shift_y, + glyph); + + if (prefilter_x > 1) + stbtt__h_prefilter(output, out_w, out_h, out_stride, prefilter_x); + + if (prefilter_y > 1) + stbtt__v_prefilter(output, out_w, out_h, out_stride, prefilter_y); + + *sub_x = stbtt__oversample_shift(prefilter_x); + *sub_y = stbtt__oversample_shift(prefilter_y); +} + +// rects array must be big enough to accommodate all characters in the given ranges +STBTT_DEF int stbtt_PackFontRangesRenderIntoRects(stbtt_pack_context *spc, const stbtt_fontinfo *info, stbtt_pack_range *ranges, int num_ranges, stbrp_rect *rects) +{ + int i,j,k, missing_glyph = -1, return_value = 1; + + // save current values + int old_h_over = spc->h_oversample; + int old_v_over = spc->v_oversample; + + k = 0; + for (i=0; i < num_ranges; ++i) { + float fh = ranges[i].font_size; + float scale = fh > 0 ? stbtt_ScaleForPixelHeight(info, fh) : stbtt_ScaleForMappingEmToPixels(info, -fh); + float recip_h,recip_v,sub_x,sub_y; + spc->h_oversample = ranges[i].h_oversample; + spc->v_oversample = ranges[i].v_oversample; + recip_h = 1.0f / spc->h_oversample; + recip_v = 1.0f / spc->v_oversample; + sub_x = stbtt__oversample_shift(spc->h_oversample); + sub_y = stbtt__oversample_shift(spc->v_oversample); + for (j=0; j < ranges[i].num_chars; ++j) { + stbrp_rect *r = &rects[k]; + if (r->was_packed && r->w != 0 && r->h != 0) { + stbtt_packedchar *bc = &ranges[i].chardata_for_range[j]; + int advance, lsb, x0,y0,x1,y1; + int codepoint = ranges[i].array_of_unicode_codepoints == NULL ? ranges[i].first_unicode_codepoint_in_range + j : ranges[i].array_of_unicode_codepoints[j]; + int glyph = stbtt_FindGlyphIndex(info, codepoint); + stbrp_coord pad = (stbrp_coord) spc->padding; + + // pad on left and top + r->x += pad; + r->y += pad; + r->w -= pad; + r->h -= pad; + stbtt_GetGlyphHMetrics(info, glyph, &advance, &lsb); + stbtt_GetGlyphBitmapBox(info, glyph, + scale * spc->h_oversample, + scale * spc->v_oversample, + &x0,&y0,&x1,&y1); + stbtt_MakeGlyphBitmapSubpixel(info, + spc->pixels + r->x + r->y*spc->stride_in_bytes, + r->w - spc->h_oversample+1, + r->h - spc->v_oversample+1, + spc->stride_in_bytes, + scale * spc->h_oversample, + scale * spc->v_oversample, + 0,0, + glyph); + + if (spc->h_oversample > 1) + stbtt__h_prefilter(spc->pixels + r->x + r->y*spc->stride_in_bytes, + r->w, r->h, spc->stride_in_bytes, + spc->h_oversample); + + if (spc->v_oversample > 1) + stbtt__v_prefilter(spc->pixels + r->x + r->y*spc->stride_in_bytes, + r->w, r->h, spc->stride_in_bytes, + spc->v_oversample); + + bc->x0 = (stbtt_int16) r->x; + bc->y0 = (stbtt_int16) r->y; + bc->x1 = (stbtt_int16) (r->x + r->w); + bc->y1 = (stbtt_int16) (r->y + r->h); + bc->xadvance = scale * advance; + bc->xoff = (float) x0 * recip_h + sub_x; + bc->yoff = (float) y0 * recip_v + sub_y; + bc->xoff2 = (x0 + r->w) * recip_h + sub_x; + bc->yoff2 = (y0 + r->h) * recip_v + sub_y; + + if (glyph == 0) + missing_glyph = j; + } else if (spc->skip_missing) { + return_value = 0; + } else if (r->was_packed && r->w == 0 && r->h == 0 && missing_glyph >= 0) { + ranges[i].chardata_for_range[j] = ranges[i].chardata_for_range[missing_glyph]; + } else { + return_value = 0; // if any fail, report failure + } + + ++k; + } + } + + // restore original values + spc->h_oversample = old_h_over; + spc->v_oversample = old_v_over; + + return return_value; +} + +STBTT_DEF void stbtt_PackFontRangesPackRects(stbtt_pack_context *spc, stbrp_rect *rects, int num_rects) +{ + stbrp_pack_rects((stbrp_context *) spc->pack_info, rects, num_rects); +} + +STBTT_DEF int stbtt_PackFontRanges(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, stbtt_pack_range *ranges, int num_ranges) +{ + stbtt_fontinfo info; + int i,j,n, return_value = 1; + //stbrp_context *context = (stbrp_context *) spc->pack_info; + stbrp_rect *rects; + + // flag all characters as NOT packed + for (i=0; i < num_ranges; ++i) + for (j=0; j < ranges[i].num_chars; ++j) + ranges[i].chardata_for_range[j].x0 = + ranges[i].chardata_for_range[j].y0 = + ranges[i].chardata_for_range[j].x1 = + ranges[i].chardata_for_range[j].y1 = 0; + + n = 0; + for (i=0; i < num_ranges; ++i) + n += ranges[i].num_chars; + + rects = (stbrp_rect *) STBTT_malloc(sizeof(*rects) * n, spc->user_allocator_context); + if (rects == NULL) + return 0; + + info.userdata = spc->user_allocator_context; + stbtt_InitFont(&info, fontdata, stbtt_GetFontOffsetForIndex(fontdata,font_index)); + + n = stbtt_PackFontRangesGatherRects(spc, &info, ranges, num_ranges, rects); + + stbtt_PackFontRangesPackRects(spc, rects, n); + + return_value = stbtt_PackFontRangesRenderIntoRects(spc, &info, ranges, num_ranges, rects); + + STBTT_free(rects, spc->user_allocator_context); + return return_value; +} + +STBTT_DEF int stbtt_PackFontRange(stbtt_pack_context *spc, const unsigned char *fontdata, int font_index, float font_size, + int first_unicode_codepoint_in_range, int num_chars_in_range, stbtt_packedchar *chardata_for_range) +{ + stbtt_pack_range range; + range.first_unicode_codepoint_in_range = first_unicode_codepoint_in_range; + range.array_of_unicode_codepoints = NULL; + range.num_chars = num_chars_in_range; + range.chardata_for_range = chardata_for_range; + range.font_size = font_size; + return stbtt_PackFontRanges(spc, fontdata, font_index, &range, 1); +} + +STBTT_DEF void stbtt_GetScaledFontVMetrics(const unsigned char *fontdata, int index, float size, float *ascent, float *descent, float *lineGap) +{ + int i_ascent, i_descent, i_lineGap; + float scale; + stbtt_fontinfo info; + stbtt_InitFont(&info, fontdata, stbtt_GetFontOffsetForIndex(fontdata, index)); + scale = size > 0 ? stbtt_ScaleForPixelHeight(&info, size) : stbtt_ScaleForMappingEmToPixels(&info, -size); + stbtt_GetFontVMetrics(&info, &i_ascent, &i_descent, &i_lineGap); + *ascent = (float) i_ascent * scale; + *descent = (float) i_descent * scale; + *lineGap = (float) i_lineGap * scale; +} + +STBTT_DEF void stbtt_GetPackedQuad(const stbtt_packedchar *chardata, int pw, int ph, int char_index, float *xpos, float *ypos, stbtt_aligned_quad *q, int align_to_integer) +{ + float ipw = 1.0f / pw, iph = 1.0f / ph; + const stbtt_packedchar *b = chardata + char_index; + + if (align_to_integer) { + float x = (float) STBTT_ifloor((*xpos + b->xoff) + 0.5f); + float y = (float) STBTT_ifloor((*ypos + b->yoff) + 0.5f); + q->x0 = x; + q->y0 = y; + q->x1 = x + b->xoff2 - b->xoff; + q->y1 = y + b->yoff2 - b->yoff; + } else { + q->x0 = *xpos + b->xoff; + q->y0 = *ypos + b->yoff; + q->x1 = *xpos + b->xoff2; + q->y1 = *ypos + b->yoff2; + } + + q->s0 = b->x0 * ipw; + q->t0 = b->y0 * iph; + q->s1 = b->x1 * ipw; + q->t1 = b->y1 * iph; + + *xpos += b->xadvance; +} + +////////////////////////////////////////////////////////////////////////////// +// +// sdf computation +// + +#define STBTT_min(a,b) ((a) < (b) ? (a) : (b)) +#define STBTT_max(a,b) ((a) < (b) ? (b) : (a)) + +static int stbtt__ray_intersect_bezier(float orig[2], float ray[2], float q0[2], float q1[2], float q2[2], float hits[2][2]) +{ + float q0perp = q0[1]*ray[0] - q0[0]*ray[1]; + float q1perp = q1[1]*ray[0] - q1[0]*ray[1]; + float q2perp = q2[1]*ray[0] - q2[0]*ray[1]; + float roperp = orig[1]*ray[0] - orig[0]*ray[1]; + + float a = q0perp - 2*q1perp + q2perp; + float b = q1perp - q0perp; + float c = q0perp - roperp; + + float s0 = 0., s1 = 0.; + int num_s = 0; + + if (a != 0.0) { + float discr = b*b - a*c; + if (discr > 0.0) { + float rcpna = -1 / a; + float d = (float) STBTT_sqrt(discr); + s0 = (b+d) * rcpna; + s1 = (b-d) * rcpna; + if (s0 >= 0.0 && s0 <= 1.0) + num_s = 1; + if (d > 0.0 && s1 >= 0.0 && s1 <= 1.0) { + if (num_s == 0) s0 = s1; + ++num_s; + } + } + } else { + // 2*b*s + c = 0 + // s = -c / (2*b) + s0 = c / (-2 * b); + if (s0 >= 0.0 && s0 <= 1.0) + num_s = 1; + } + + if (num_s == 0) + return 0; + else { + float rcp_len2 = 1 / (ray[0]*ray[0] + ray[1]*ray[1]); + float rayn_x = ray[0] * rcp_len2, rayn_y = ray[1] * rcp_len2; + + float q0d = q0[0]*rayn_x + q0[1]*rayn_y; + float q1d = q1[0]*rayn_x + q1[1]*rayn_y; + float q2d = q2[0]*rayn_x + q2[1]*rayn_y; + float rod = orig[0]*rayn_x + orig[1]*rayn_y; + + float q10d = q1d - q0d; + float q20d = q2d - q0d; + float q0rd = q0d - rod; + + hits[0][0] = q0rd + s0*(2.0f - 2.0f*s0)*q10d + s0*s0*q20d; + hits[0][1] = a*s0+b; + + if (num_s > 1) { + hits[1][0] = q0rd + s1*(2.0f - 2.0f*s1)*q10d + s1*s1*q20d; + hits[1][1] = a*s1+b; + return 2; + } else { + return 1; + } + } +} + +static int equal(float *a, float *b) +{ + return (a[0] == b[0] && a[1] == b[1]); +} + +static int stbtt__compute_crossings_x(float x, float y, int nverts, stbtt_vertex *verts) +{ + int i; + float orig[2], ray[2] = { 1, 0 }; + float y_frac; + int winding = 0; + + orig[0] = x; + orig[1] = y; + + // make sure y never passes through a vertex of the shape + y_frac = (float) STBTT_fmod(y, 1.0f); + if (y_frac < 0.01f) + y += 0.01f; + else if (y_frac > 0.99f) + y -= 0.01f; + orig[1] = y; + + // test a ray from (-infinity,y) to (x,y) + for (i=0; i < nverts; ++i) { + if (verts[i].type == STBTT_vline) { + int x0 = (int) verts[i-1].x, y0 = (int) verts[i-1].y; + int x1 = (int) verts[i ].x, y1 = (int) verts[i ].y; + if (y > STBTT_min(y0,y1) && y < STBTT_max(y0,y1) && x > STBTT_min(x0,x1)) { + float x_inter = (y - y0) / (y1 - y0) * (x1-x0) + x0; + if (x_inter < x) + winding += (y0 < y1) ? 1 : -1; + } + } + if (verts[i].type == STBTT_vcurve) { + int x0 = (int) verts[i-1].x , y0 = (int) verts[i-1].y ; + int x1 = (int) verts[i ].cx, y1 = (int) verts[i ].cy; + int x2 = (int) verts[i ].x , y2 = (int) verts[i ].y ; + int ax = STBTT_min(x0,STBTT_min(x1,x2)), ay = STBTT_min(y0,STBTT_min(y1,y2)); + int by = STBTT_max(y0,STBTT_max(y1,y2)); + if (y > ay && y < by && x > ax) { + float q0[2],q1[2],q2[2]; + float hits[2][2]; + q0[0] = (float)x0; + q0[1] = (float)y0; + q1[0] = (float)x1; + q1[1] = (float)y1; + q2[0] = (float)x2; + q2[1] = (float)y2; + if (equal(q0,q1) || equal(q1,q2)) { + x0 = (int)verts[i-1].x; + y0 = (int)verts[i-1].y; + x1 = (int)verts[i ].x; + y1 = (int)verts[i ].y; + if (y > STBTT_min(y0,y1) && y < STBTT_max(y0,y1) && x > STBTT_min(x0,x1)) { + float x_inter = (y - y0) / (y1 - y0) * (x1-x0) + x0; + if (x_inter < x) + winding += (y0 < y1) ? 1 : -1; + } + } else { + int num_hits = stbtt__ray_intersect_bezier(orig, ray, q0, q1, q2, hits); + if (num_hits >= 1) + if (hits[0][0] < 0) + winding += (hits[0][1] < 0 ? -1 : 1); + if (num_hits >= 2) + if (hits[1][0] < 0) + winding += (hits[1][1] < 0 ? -1 : 1); + } + } + } + } + return winding; +} + +static float stbtt__cuberoot( float x ) +{ + if (x<0) + return -(float) STBTT_pow(-x,1.0f/3.0f); + else + return (float) STBTT_pow( x,1.0f/3.0f); +} + +// x^3 + c*x^2 + b*x + a = 0 +static int stbtt__solve_cubic(float a, float b, float c, float* r) +{ + float s = -a / 3; + float p = b - a*a / 3; + float q = a * (2*a*a - 9*b) / 27 + c; + float p3 = p*p*p; + float d = q*q + 4*p3 / 27; + if (d >= 0) { + float z = (float) STBTT_sqrt(d); + float u = (-q + z) / 2; + float v = (-q - z) / 2; + u = stbtt__cuberoot(u); + v = stbtt__cuberoot(v); + r[0] = s + u + v; + return 1; + } else { + float u = (float) STBTT_sqrt(-p/3); + float v = (float) STBTT_acos(-STBTT_sqrt(-27/p3) * q / 2) / 3; // p3 must be negative, since d is negative + float m = (float) STBTT_cos(v); + float n = (float) STBTT_cos(v-3.141592/2)*1.732050808f; + r[0] = s + u * 2 * m; + r[1] = s - u * (m + n); + r[2] = s - u * (m - n); + + //STBTT_assert( STBTT_fabs(((r[0]+a)*r[0]+b)*r[0]+c) < 0.05f); // these asserts may not be safe at all scales, though they're in bezier t parameter units so maybe? + //STBTT_assert( STBTT_fabs(((r[1]+a)*r[1]+b)*r[1]+c) < 0.05f); + //STBTT_assert( STBTT_fabs(((r[2]+a)*r[2]+b)*r[2]+c) < 0.05f); + return 3; + } +} + +STBTT_DEF unsigned char * stbtt_GetGlyphSDF(const stbtt_fontinfo *info, float scale, int glyph, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff) +{ + float scale_x = scale, scale_y = scale; + int ix0,iy0,ix1,iy1; + int w,h; + unsigned char *data; + + if (scale == 0) return NULL; + + stbtt_GetGlyphBitmapBoxSubpixel(info, glyph, scale, scale, 0.0f,0.0f, &ix0,&iy0,&ix1,&iy1); + + // if empty, return NULL + if (ix0 == ix1 || iy0 == iy1) + return NULL; + + ix0 -= padding; + iy0 -= padding; + ix1 += padding; + iy1 += padding; + + w = (ix1 - ix0); + h = (iy1 - iy0); + + if (width ) *width = w; + if (height) *height = h; + if (xoff ) *xoff = ix0; + if (yoff ) *yoff = iy0; + + // invert for y-downwards bitmaps + scale_y = -scale_y; + + { + int x,y,i,j; + float *precompute; + stbtt_vertex *verts; + int num_verts = stbtt_GetGlyphShape(info, glyph, &verts); + data = (unsigned char *) STBTT_malloc(w * h, info->userdata); + precompute = (float *) STBTT_malloc(num_verts * sizeof(float), info->userdata); + + for (i=0,j=num_verts-1; i < num_verts; j=i++) { + if (verts[i].type == STBTT_vline) { + float x0 = verts[i].x*scale_x, y0 = verts[i].y*scale_y; + float x1 = verts[j].x*scale_x, y1 = verts[j].y*scale_y; + float dist = (float) STBTT_sqrt((x1-x0)*(x1-x0) + (y1-y0)*(y1-y0)); + precompute[i] = (dist == 0) ? 0.0f : 1.0f / dist; + } else if (verts[i].type == STBTT_vcurve) { + float x2 = verts[j].x *scale_x, y2 = verts[j].y *scale_y; + float x1 = verts[i].cx*scale_x, y1 = verts[i].cy*scale_y; + float x0 = verts[i].x *scale_x, y0 = verts[i].y *scale_y; + float bx = x0 - 2*x1 + x2, by = y0 - 2*y1 + y2; + float len2 = bx*bx + by*by; + if (len2 != 0.0f) + precompute[i] = 1.0f / (bx*bx + by*by); + else + precompute[i] = 0.0f; + } else + precompute[i] = 0.0f; + } + + for (y=iy0; y < iy1; ++y) { + for (x=ix0; x < ix1; ++x) { + float val; + float min_dist = 999999.0f; + float sx = (float) x + 0.5f; + float sy = (float) y + 0.5f; + float x_gspace = (sx / scale_x); + float y_gspace = (sy / scale_y); + + int winding = stbtt__compute_crossings_x(x_gspace, y_gspace, num_verts, verts); // @OPTIMIZE: this could just be a rasterization, but needs to be line vs. non-tesselated curves so a new path + + for (i=0; i < num_verts; ++i) { + float x0 = verts[i].x*scale_x, y0 = verts[i].y*scale_y; + + // check against every point here rather than inside line/curve primitives -- @TODO: wrong if multiple 'moves' in a row produce a garbage point, and given culling, probably more efficient to do within line/curve + float dist2 = (x0-sx)*(x0-sx) + (y0-sy)*(y0-sy); + if (dist2 < min_dist*min_dist) + min_dist = (float) STBTT_sqrt(dist2); + + if (verts[i].type == STBTT_vline) { + float x1 = verts[i-1].x*scale_x, y1 = verts[i-1].y*scale_y; + + // coarse culling against bbox + //if (sx > STBTT_min(x0,x1)-min_dist && sx < STBTT_max(x0,x1)+min_dist && + // sy > STBTT_min(y0,y1)-min_dist && sy < STBTT_max(y0,y1)+min_dist) + float dist = (float) STBTT_fabs((x1-x0)*(y0-sy) - (y1-y0)*(x0-sx)) * precompute[i]; + STBTT_assert(i != 0); + if (dist < min_dist) { + // check position along line + // x' = x0 + t*(x1-x0), y' = y0 + t*(y1-y0) + // minimize (x'-sx)*(x'-sx)+(y'-sy)*(y'-sy) + float dx = x1-x0, dy = y1-y0; + float px = x0-sx, py = y0-sy; + // minimize (px+t*dx)^2 + (py+t*dy)^2 = px*px + 2*px*dx*t + t^2*dx*dx + py*py + 2*py*dy*t + t^2*dy*dy + // derivative: 2*px*dx + 2*py*dy + (2*dx*dx+2*dy*dy)*t, set to 0 and solve + float t = -(px*dx + py*dy) / (dx*dx + dy*dy); + if (t >= 0.0f && t <= 1.0f) + min_dist = dist; + } + } else if (verts[i].type == STBTT_vcurve) { + float x2 = verts[i-1].x *scale_x, y2 = verts[i-1].y *scale_y; + float x1 = verts[i ].cx*scale_x, y1 = verts[i ].cy*scale_y; + float box_x0 = STBTT_min(STBTT_min(x0,x1),x2); + float box_y0 = STBTT_min(STBTT_min(y0,y1),y2); + float box_x1 = STBTT_max(STBTT_max(x0,x1),x2); + float box_y1 = STBTT_max(STBTT_max(y0,y1),y2); + // coarse culling against bbox to avoid computing cubic unnecessarily + if (sx > box_x0-min_dist && sx < box_x1+min_dist && sy > box_y0-min_dist && sy < box_y1+min_dist) { + int num=0; + float ax = x1-x0, ay = y1-y0; + float bx = x0 - 2*x1 + x2, by = y0 - 2*y1 + y2; + float mx = x0 - sx, my = y0 - sy; + float res[3],px,py,t,it; + float a_inv = precompute[i]; + if (a_inv == 0.0) { // if a_inv is 0, it's 2nd degree so use quadratic formula + float a = 3*(ax*bx + ay*by); + float b = 2*(ax*ax + ay*ay) + (mx*bx+my*by); + float c = mx*ax+my*ay; + if (a == 0.0) { // if a is 0, it's linear + if (b != 0.0) { + res[num++] = -c/b; + } + } else { + float discriminant = b*b - 4*a*c; + if (discriminant < 0) + num = 0; + else { + float root = (float) STBTT_sqrt(discriminant); + res[0] = (-b - root)/(2*a); + res[1] = (-b + root)/(2*a); + num = 2; // don't bother distinguishing 1-solution case, as code below will still work + } + } + } else { + float b = 3*(ax*bx + ay*by) * a_inv; // could precompute this as it doesn't depend on sample point + float c = (2*(ax*ax + ay*ay) + (mx*bx+my*by)) * a_inv; + float d = (mx*ax+my*ay) * a_inv; + num = stbtt__solve_cubic(b, c, d, res); + } + if (num >= 1 && res[0] >= 0.0f && res[0] <= 1.0f) { + t = res[0], it = 1.0f - t; + px = it*it*x0 + 2*t*it*x1 + t*t*x2; + py = it*it*y0 + 2*t*it*y1 + t*t*y2; + dist2 = (px-sx)*(px-sx) + (py-sy)*(py-sy); + if (dist2 < min_dist * min_dist) + min_dist = (float) STBTT_sqrt(dist2); + } + if (num >= 2 && res[1] >= 0.0f && res[1] <= 1.0f) { + t = res[1], it = 1.0f - t; + px = it*it*x0 + 2*t*it*x1 + t*t*x2; + py = it*it*y0 + 2*t*it*y1 + t*t*y2; + dist2 = (px-sx)*(px-sx) + (py-sy)*(py-sy); + if (dist2 < min_dist * min_dist) + min_dist = (float) STBTT_sqrt(dist2); + } + if (num >= 3 && res[2] >= 0.0f && res[2] <= 1.0f) { + t = res[2], it = 1.0f - t; + px = it*it*x0 + 2*t*it*x1 + t*t*x2; + py = it*it*y0 + 2*t*it*y1 + t*t*y2; + dist2 = (px-sx)*(px-sx) + (py-sy)*(py-sy); + if (dist2 < min_dist * min_dist) + min_dist = (float) STBTT_sqrt(dist2); + } + } + } + } + if (winding == 0) + min_dist = -min_dist; // if outside the shape, value is negative + val = onedge_value + pixel_dist_scale * min_dist; + if (val < 0) + val = 0; + else if (val > 255) + val = 255; + data[(y-iy0)*w+(x-ix0)] = (unsigned char) val; + } + } + STBTT_free(precompute, info->userdata); + STBTT_free(verts, info->userdata); + } + return data; +} + +STBTT_DEF unsigned char * stbtt_GetCodepointSDF(const stbtt_fontinfo *info, float scale, int codepoint, int padding, unsigned char onedge_value, float pixel_dist_scale, int *width, int *height, int *xoff, int *yoff) +{ + return stbtt_GetGlyphSDF(info, scale, stbtt_FindGlyphIndex(info, codepoint), padding, onedge_value, pixel_dist_scale, width, height, xoff, yoff); +} + +STBTT_DEF void stbtt_FreeSDF(unsigned char *bitmap, void *userdata) +{ + STBTT_free(bitmap, userdata); +} + +////////////////////////////////////////////////////////////////////////////// +// +// font name matching -- recommended not to use this +// + +// check if a utf8 string contains a prefix which is the utf16 string; if so return length of matching utf8 string +static stbtt_int32 stbtt__CompareUTF8toUTF16_bigendian_prefix(stbtt_uint8 *s1, stbtt_int32 len1, stbtt_uint8 *s2, stbtt_int32 len2) +{ + stbtt_int32 i=0; + + // convert utf16 to utf8 and compare the results while converting + while (len2) { + stbtt_uint16 ch = s2[0]*256 + s2[1]; + if (ch < 0x80) { + if (i >= len1) return -1; + if (s1[i++] != ch) return -1; + } else if (ch < 0x800) { + if (i+1 >= len1) return -1; + if (s1[i++] != 0xc0 + (ch >> 6)) return -1; + if (s1[i++] != 0x80 + (ch & 0x3f)) return -1; + } else if (ch >= 0xd800 && ch < 0xdc00) { + stbtt_uint32 c; + stbtt_uint16 ch2 = s2[2]*256 + s2[3]; + if (i+3 >= len1) return -1; + c = ((ch - 0xd800) << 10) + (ch2 - 0xdc00) + 0x10000; + if (s1[i++] != 0xf0 + (c >> 18)) return -1; + if (s1[i++] != 0x80 + ((c >> 12) & 0x3f)) return -1; + if (s1[i++] != 0x80 + ((c >> 6) & 0x3f)) return -1; + if (s1[i++] != 0x80 + ((c ) & 0x3f)) return -1; + s2 += 2; // plus another 2 below + len2 -= 2; + } else if (ch >= 0xdc00 && ch < 0xe000) { + return -1; + } else { + if (i+2 >= len1) return -1; + if (s1[i++] != 0xe0 + (ch >> 12)) return -1; + if (s1[i++] != 0x80 + ((ch >> 6) & 0x3f)) return -1; + if (s1[i++] != 0x80 + ((ch ) & 0x3f)) return -1; + } + s2 += 2; + len2 -= 2; + } + return i; +} + +static int stbtt_CompareUTF8toUTF16_bigendian_internal(char *s1, int len1, char *s2, int len2) +{ + return len1 == stbtt__CompareUTF8toUTF16_bigendian_prefix((stbtt_uint8*) s1, len1, (stbtt_uint8*) s2, len2); +} + +// returns results in whatever encoding you request... but note that 2-byte encodings +// will be BIG-ENDIAN... use stbtt_CompareUTF8toUTF16_bigendian() to compare +STBTT_DEF const char *stbtt_GetFontNameString(const stbtt_fontinfo *font, int *length, int platformID, int encodingID, int languageID, int nameID) +{ + stbtt_int32 i,count,stringOffset; + stbtt_uint8 *fc = font->data; + stbtt_uint32 offset = font->fontstart; + stbtt_uint32 nm = stbtt__find_table(fc, offset, "name"); + if (!nm) return NULL; + + count = ttUSHORT(fc+nm+2); + stringOffset = nm + ttUSHORT(fc+nm+4); + for (i=0; i < count; ++i) { + stbtt_uint32 loc = nm + 6 + 12 * i; + if (platformID == ttUSHORT(fc+loc+0) && encodingID == ttUSHORT(fc+loc+2) + && languageID == ttUSHORT(fc+loc+4) && nameID == ttUSHORT(fc+loc+6)) { + *length = ttUSHORT(fc+loc+8); + return (const char *) (fc+stringOffset+ttUSHORT(fc+loc+10)); + } + } + return NULL; +} + +static int stbtt__matchpair(stbtt_uint8 *fc, stbtt_uint32 nm, stbtt_uint8 *name, stbtt_int32 nlen, stbtt_int32 target_id, stbtt_int32 next_id) +{ + stbtt_int32 i; + stbtt_int32 count = ttUSHORT(fc+nm+2); + stbtt_int32 stringOffset = nm + ttUSHORT(fc+nm+4); + + for (i=0; i < count; ++i) { + stbtt_uint32 loc = nm + 6 + 12 * i; + stbtt_int32 id = ttUSHORT(fc+loc+6); + if (id == target_id) { + // find the encoding + stbtt_int32 platform = ttUSHORT(fc+loc+0), encoding = ttUSHORT(fc+loc+2), language = ttUSHORT(fc+loc+4); + + // is this a Unicode encoding? + if (platform == 0 || (platform == 3 && encoding == 1) || (platform == 3 && encoding == 10)) { + stbtt_int32 slen = ttUSHORT(fc+loc+8); + stbtt_int32 off = ttUSHORT(fc+loc+10); + + // check if there's a prefix match + stbtt_int32 matchlen = stbtt__CompareUTF8toUTF16_bigendian_prefix(name, nlen, fc+stringOffset+off,slen); + if (matchlen >= 0) { + // check for target_id+1 immediately following, with same encoding & language + if (i+1 < count && ttUSHORT(fc+loc+12+6) == next_id && ttUSHORT(fc+loc+12) == platform && ttUSHORT(fc+loc+12+2) == encoding && ttUSHORT(fc+loc+12+4) == language) { + slen = ttUSHORT(fc+loc+12+8); + off = ttUSHORT(fc+loc+12+10); + if (slen == 0) { + if (matchlen == nlen) + return 1; + } else if (matchlen < nlen && name[matchlen] == ' ') { + ++matchlen; + if (stbtt_CompareUTF8toUTF16_bigendian_internal((char*) (name+matchlen), nlen-matchlen, (char*)(fc+stringOffset+off),slen)) + return 1; + } + } else { + // if nothing immediately following + if (matchlen == nlen) + return 1; + } + } + } + + // @TODO handle other encodings + } + } + return 0; +} + +static int stbtt__matches(stbtt_uint8 *fc, stbtt_uint32 offset, stbtt_uint8 *name, stbtt_int32 flags) +{ + stbtt_int32 nlen = (stbtt_int32) STBTT_strlen((char *) name); + stbtt_uint32 nm,hd; + if (!stbtt__isfont(fc+offset)) return 0; + + // check italics/bold/underline flags in macStyle... + if (flags) { + hd = stbtt__find_table(fc, offset, "head"); + if ((ttUSHORT(fc+hd+44) & 7) != (flags & 7)) return 0; + } + + nm = stbtt__find_table(fc, offset, "name"); + if (!nm) return 0; + + if (flags) { + // if we checked the macStyle flags, then just check the family and ignore the subfamily + if (stbtt__matchpair(fc, nm, name, nlen, 16, -1)) return 1; + if (stbtt__matchpair(fc, nm, name, nlen, 1, -1)) return 1; + if (stbtt__matchpair(fc, nm, name, nlen, 3, -1)) return 1; + } else { + if (stbtt__matchpair(fc, nm, name, nlen, 16, 17)) return 1; + if (stbtt__matchpair(fc, nm, name, nlen, 1, 2)) return 1; + if (stbtt__matchpair(fc, nm, name, nlen, 3, -1)) return 1; + } + + return 0; +} + +static int stbtt_FindMatchingFont_internal(unsigned char *font_collection, char *name_utf8, stbtt_int32 flags) +{ + stbtt_int32 i; + for (i=0;;++i) { + stbtt_int32 off = stbtt_GetFontOffsetForIndex(font_collection, i); + if (off < 0) return off; + if (stbtt__matches((stbtt_uint8 *) font_collection, off, (stbtt_uint8*) name_utf8, flags)) + return off; + } +} + +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +STBTT_DEF int stbtt_BakeFontBitmap(const unsigned char *data, int offset, + float pixel_height, unsigned char *pixels, int pw, int ph, + int first_char, int num_chars, stbtt_bakedchar *chardata) +{ + return stbtt_BakeFontBitmap_internal((unsigned char *) data, offset, pixel_height, pixels, pw, ph, first_char, num_chars, chardata); +} + +STBTT_DEF int stbtt_GetFontOffsetForIndex(const unsigned char *data, int index) +{ + return stbtt_GetFontOffsetForIndex_internal((unsigned char *) data, index); +} + +STBTT_DEF int stbtt_GetNumberOfFonts(const unsigned char *data) +{ + return stbtt_GetNumberOfFonts_internal((unsigned char *) data); +} + +STBTT_DEF int stbtt_InitFont(stbtt_fontinfo *info, const unsigned char *data, int offset) +{ + return stbtt_InitFont_internal(info, (unsigned char *) data, offset); +} + +STBTT_DEF int stbtt_FindMatchingFont(const unsigned char *fontdata, const char *name, int flags) +{ + return stbtt_FindMatchingFont_internal((unsigned char *) fontdata, (char *) name, flags); +} + +STBTT_DEF int stbtt_CompareUTF8toUTF16_bigendian(const char *s1, int len1, const char *s2, int len2) +{ + return stbtt_CompareUTF8toUTF16_bigendian_internal((char *) s1, len1, (char *) s2, len2); +} + +#if defined(__GNUC__) || defined(__clang__) +#pragma GCC diagnostic pop +#endif + +#endif // STB_TRUETYPE_IMPLEMENTATION + + +// FULL VERSION HISTORY +// +// 1.19 (2018-02-11) OpenType GPOS kerning (horizontal only), STBTT_fmod +// 1.18 (2018-01-29) add missing function +// 1.17 (2017-07-23) make more arguments const; doc fix +// 1.16 (2017-07-12) SDF support +// 1.15 (2017-03-03) make more arguments const +// 1.14 (2017-01-16) num-fonts-in-TTC function +// 1.13 (2017-01-02) support OpenType fonts, certain Apple fonts +// 1.12 (2016-10-25) suppress warnings about casting away const with -Wcast-qual +// 1.11 (2016-04-02) fix unused-variable warning +// 1.10 (2016-04-02) allow user-defined fabs() replacement +// fix memory leak if fontsize=0.0 +// fix warning from duplicate typedef +// 1.09 (2016-01-16) warning fix; avoid crash on outofmem; use alloc userdata for PackFontRanges +// 1.08 (2015-09-13) document stbtt_Rasterize(); fixes for vertical & horizontal edges +// 1.07 (2015-08-01) allow PackFontRanges to accept arrays of sparse codepoints; +// allow PackFontRanges to pack and render in separate phases; +// fix stbtt_GetFontOFfsetForIndex (never worked for non-0 input?); +// fixed an assert() bug in the new rasterizer +// replace assert() with STBTT_assert() in new rasterizer +// 1.06 (2015-07-14) performance improvements (~35% faster on x86 and x64 on test machine) +// also more precise AA rasterizer, except if shapes overlap +// remove need for STBTT_sort +// 1.05 (2015-04-15) fix misplaced definitions for STBTT_STATIC +// 1.04 (2015-04-15) typo in example +// 1.03 (2015-04-12) STBTT_STATIC, fix memory leak in new packing, various fixes +// 1.02 (2014-12-10) fix various warnings & compile issues w/ stb_rect_pack, C++ +// 1.01 (2014-12-08) fix subpixel position when oversampling to exactly match +// non-oversampled; STBTT_POINT_SIZE for packed case only +// 1.00 (2014-12-06) add new PackBegin etc. API, w/ support for oversampling +// 0.99 (2014-09-18) fix multiple bugs with subpixel rendering (ryg) +// 0.9 (2014-08-07) support certain mac/iOS fonts without an MS platformID +// 0.8b (2014-07-07) fix a warning +// 0.8 (2014-05-25) fix a few more warnings +// 0.7 (2013-09-25) bugfix: subpixel glyph bug fixed in 0.5 had come back +// 0.6c (2012-07-24) improve documentation +// 0.6b (2012-07-20) fix a few more warnings +// 0.6 (2012-07-17) fix warnings; added stbtt_ScaleForMappingEmToPixels, +// stbtt_GetFontBoundingBox, stbtt_IsGlyphEmpty +// 0.5 (2011-12-09) bugfixes: +// subpixel glyph renderer computed wrong bounding box +// first vertex of shape can be off-curve (FreeSans) +// 0.4b (2011-12-03) fixed an error in the font baking example +// 0.4 (2011-12-01) kerning, subpixel rendering (tor) +// bugfixes for: +// codepoint-to-glyph conversion using table fmt=12 +// codepoint-to-glyph conversion using table fmt=4 +// stbtt_GetBakedQuad with non-square texture (Zer) +// updated Hello World! sample to use kerning and subpixel +// fixed some warnings +// 0.3 (2009-06-24) cmap fmt=12, compound shapes (MM) +// userdata, malloc-from-userdata, non-zero fill (stb) +// 0.2 (2009-03-11) Fix unsigned/signed char warnings +// 0.1 (2009-03-09) First public release +// + +/* +------------------------------------------------------------------------------ +This software is available under 2 licenses -- choose whichever you prefer. +------------------------------------------------------------------------------ +ALTERNATIVE A - MIT License +Copyright (c) 2017 Sean Barrett +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +------------------------------------------------------------------------------ +ALTERNATIVE B - Public Domain (www.unlicense.org) +This is free and unencumbered software released into the public domain. +Anyone is free to copy, modify, publish, use, compile, sell, or distribute this +software, either in source code form or as a compiled binary, for any purpose, +commercial or non-commercial, and by any means. +In jurisdictions that recognize copyright laws, the author or authors of this +software dedicate any and all copyright interest in the software to the public +domain. We make this dedication for the benefit of the public at large and to +the detriment of our heirs and successors. We intend this dedication to be an +overt act of relinquishment in perpetuity of all present and future rights to +this software under copyright law. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +------------------------------------------------------------------------------ +*/ \ No newline at end of file diff --git a/troposphere/daybreak/nanovg/include/nanovg_dk.h b/troposphere/daybreak/nanovg/include/nanovg_dk.h new file mode 100644 index 000000000..a0669f5b4 --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg_dk.h @@ -0,0 +1,520 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "nanovg.h" +#include "nanovg/dk_renderer.hpp" + +#ifdef __cplusplus +extern "C" { +#endif + +static int dknvg__maxi(int a, int b) { return a > b ? a : b; } + +static const DKNVGtextureDescriptor* dknvg__findTexture(DKNVGcontext* dk, int id) { + return dk->renderer->GetTextureDescriptor(*dk, id); +} + +static int dknvg__renderCreate(void* uptr) +{ + DKNVGcontext *dk = (DKNVGcontext*)uptr; + return dk->renderer->Create(*dk); +} + +static int dknvg__renderCreateTexture(void* uptr, int type, int w, int h, int imageFlags, const unsigned char* data) +{ + DKNVGcontext *dk = (DKNVGcontext*)uptr; + return dk->renderer->CreateTexture(*dk, type, w, h, imageFlags, data); +} + +static int dknvg__renderDeleteTexture(void* uptr, int image) { + DKNVGcontext *dk = (DKNVGcontext*)uptr; + return dk->renderer->DeleteTexture(*dk, image); +} + +static int dknvg__renderUpdateTexture(void* uptr, int image, int x, int y, int w, int h, const unsigned char* data) { + DKNVGcontext *dk = (DKNVGcontext*)uptr; + return dk->renderer->UpdateTexture(*dk, image, x, y, w, h, data); +} + +static int dknvg__renderGetTextureSize(void* uptr, int image, int* w, int* h) { + DKNVGcontext *dk = (DKNVGcontext*)uptr; + return dk->renderer->GetTextureSize(*dk, image, w, h); +} + +static void dknvg__xformToMat3x4(float* m3, float* t) { + m3[0] = t[0]; + m3[1] = t[1]; + m3[2] = 0.0f; + m3[3] = 0.0f; + m3[4] = t[2]; + m3[5] = t[3]; + m3[6] = 0.0f; + m3[7] = 0.0f; + m3[8] = t[4]; + m3[9] = t[5]; + m3[10] = 1.0f; + m3[11] = 0.0f; +} + +static NVGcolor dknvg__premulColor(NVGcolor c) { + c.r *= c.a; + c.g *= c.a; + c.b *= c.a; + return c; +} + +static int dknvg__convertPaint(DKNVGcontext* dk, DKNVGfragUniforms* frag, NVGpaint* paint, + NVGscissor* scissor, float width, float fringe, float strokeThr) +{ + const DKNVGtextureDescriptor *tex = NULL; + float invxform[6]; + + memset(frag, 0, sizeof(*frag)); + + frag->innerCol = dknvg__premulColor(paint->innerColor); + frag->outerCol = dknvg__premulColor(paint->outerColor); + + if (scissor->extent[0] < -0.5f || scissor->extent[1] < -0.5f) { + memset(frag->scissorMat, 0, sizeof(frag->scissorMat)); + frag->scissorExt[0] = 1.0f; + frag->scissorExt[1] = 1.0f; + frag->scissorScale[0] = 1.0f; + frag->scissorScale[1] = 1.0f; + } else { + nvgTransformInverse(invxform, scissor->xform); + dknvg__xformToMat3x4(frag->scissorMat, invxform); + frag->scissorExt[0] = scissor->extent[0]; + frag->scissorExt[1] = scissor->extent[1]; + frag->scissorScale[0] = sqrtf(scissor->xform[0]*scissor->xform[0] + scissor->xform[2]*scissor->xform[2]) / fringe; + frag->scissorScale[1] = sqrtf(scissor->xform[1]*scissor->xform[1] + scissor->xform[3]*scissor->xform[3]) / fringe; + } + + memcpy(frag->extent, paint->extent, sizeof(frag->extent)); + frag->strokeMult = (width*0.5f + fringe*0.5f) / fringe; + frag->strokeThr = strokeThr; + + if (paint->image != 0) { + tex = dknvg__findTexture(dk, paint->image); + if (tex == NULL) return 0; + if ((tex->flags & NVG_IMAGE_FLIPY) != 0) { + float m1[6], m2[6]; + nvgTransformTranslate(m1, 0.0f, frag->extent[1] * 0.5f); + nvgTransformMultiply(m1, paint->xform); + nvgTransformScale(m2, 1.0f, -1.0f); + nvgTransformMultiply(m2, m1); + nvgTransformTranslate(m1, 0.0f, -frag->extent[1] * 0.5f); + nvgTransformMultiply(m1, m2); + nvgTransformInverse(invxform, m1); + } else { + nvgTransformInverse(invxform, paint->xform); + } + frag->type = NSVG_SHADER_FILLIMG; + + if (tex->type == NVG_TEXTURE_RGBA) + frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0 : 1; + else + frag->texType = 2; +// printf("frag->texType = %d\n", frag->texType); + } else { + frag->type = NSVG_SHADER_FILLGRAD; + frag->radius = paint->radius; + frag->feather = paint->feather; + nvgTransformInverse(invxform, paint->xform); + } + + dknvg__xformToMat3x4(frag->paintMat, invxform); + + return 1; +} + +static DKNVGfragUniforms* nvg__fragUniformPtr(DKNVGcontext* dk, int i); + +static void dknvg__renderViewport(void* uptr, float width, float height, float devicePixelRatio) +{ + NVG_NOTUSED(devicePixelRatio); + DKNVGcontext* dk = (DKNVGcontext*)uptr; + dk->view[0] = width; + dk->view[1] = height; +} + +static void dknvg__renderCancel(void* uptr) { + DKNVGcontext* dk = (DKNVGcontext*)uptr; + dk->nverts = 0; + dk->npaths = 0; + dk->ncalls = 0; + dk->nuniforms = 0; +} + +static int dknvg_convertBlendFuncFactor(int factor) { + switch (factor) { + case NVG_ZERO: + return DkBlendFactor_Zero; + case NVG_ONE: + return DkBlendFactor_One; + case NVG_SRC_COLOR: + return DkBlendFactor_SrcColor; + case NVG_ONE_MINUS_SRC_COLOR: + return DkBlendFactor_InvSrcColor; + case NVG_DST_COLOR: + return DkBlendFactor_DstColor; + case NVG_ONE_MINUS_DST_COLOR: + return DkBlendFactor_InvDstColor; + case NVG_SRC_ALPHA: + return DkBlendFactor_SrcAlpha; + case NVG_ONE_MINUS_SRC_ALPHA: + return DkBlendFactor_InvSrcAlpha; + case NVG_DST_ALPHA: + return DkBlendFactor_DstAlpha; + case NVG_ONE_MINUS_DST_ALPHA: + return DkBlendFactor_InvDstAlpha; + case NVG_SRC_ALPHA_SATURATE: + return DkBlendFactor_SrcAlphaSaturate; + default: + return -1; + } +} + +static DKNVGblend dknvg__blendCompositeOperation(NVGcompositeOperationState op) { + DKNVGblend blend; + blend.srcRGB = dknvg_convertBlendFuncFactor(op.srcRGB); + blend.dstRGB = dknvg_convertBlendFuncFactor(op.dstRGB); + blend.srcAlpha = dknvg_convertBlendFuncFactor(op.srcAlpha); + blend.dstAlpha = dknvg_convertBlendFuncFactor(op.dstAlpha); + + if (blend.srcRGB == -1 || blend.dstRGB == -1 || blend.srcAlpha == -1 || blend.dstAlpha == -1) { + blend.srcRGB = DkBlendFactor_One; + blend.dstRGB = DkBlendFactor_InvSrcAlpha; + blend.srcAlpha = DkBlendFactor_One; + blend.dstAlpha = DkBlendFactor_InvSrcAlpha; + } + return blend; +} + +static void dknvg__renderFlush(void* uptr) { + DKNVGcontext *dk = (DKNVGcontext*)uptr; + dk->renderer->Flush(*dk); +} + +static int dknvg__maxVertCount(const NVGpath* paths, int npaths) { + int i, count = 0; + for (i = 0; i < npaths; i++) { + count += paths[i].nfill; + count += paths[i].nstroke; + } + return count; +} + +static DKNVGcall* dknvg__allocCall(DKNVGcontext* dk) +{ + DKNVGcall* ret = NULL; + if (dk->ncalls+1 > dk->ccalls) { + DKNVGcall* calls; + int ccalls = dknvg__maxi(dk->ncalls+1, 128) + dk->ccalls/2; // 1.5x Overallocate + calls = (DKNVGcall*)realloc(dk->calls, sizeof(DKNVGcall) * ccalls); + if (calls == NULL) return NULL; + dk->calls = calls; + dk->ccalls = ccalls; + } + ret = &dk->calls[dk->ncalls++]; + memset(ret, 0, sizeof(DKNVGcall)); + return ret; +} + +static int dknvg__allocPaths(DKNVGcontext* dk, int n) +{ + int ret = 0; + if (dk->npaths+n > dk->cpaths) { + DKNVGpath* paths; + int cpaths = dknvg__maxi(dk->npaths + n, 128) + dk->cpaths/2; // 1.5x Overallocate + paths = (DKNVGpath*)realloc(dk->paths, sizeof(DKNVGpath) * cpaths); + if (paths == NULL) return -1; + dk->paths = paths; + dk->cpaths = cpaths; + } + ret = dk->npaths; + dk->npaths += n; + return ret; +} + +static int dknvg__allocVerts(DKNVGcontext* dk, int n) +{ + int ret = 0; + if (dk->nverts+n > dk->cverts) { + NVGvertex* verts; + int cverts = dknvg__maxi(dk->nverts + n, 4096) + dk->cverts/2; // 1.5x Overallocate + verts = (NVGvertex*)realloc(dk->verts, sizeof(NVGvertex) * cverts); + if (verts == NULL) return -1; + dk->verts = verts; + dk->cverts = cverts; + } + ret = dk->nverts; + dk->nverts += n; + return ret; +} + +static int dknvg__allocFragUniforms(DKNVGcontext* dk, int n) +{ + int ret = 0, structSize = dk->fragSize; + if (dk->nuniforms+n > dk->cuniforms) { + unsigned char* uniforms; + int cuniforms = dknvg__maxi(dk->nuniforms+n, 128) + dk->cuniforms/2; // 1.5x Overallocate + uniforms = (unsigned char*)realloc(dk->uniforms, structSize * cuniforms); + if (uniforms == NULL) return -1; + dk->uniforms = uniforms; + dk->cuniforms = cuniforms; + } + ret = dk->nuniforms * structSize; + dk->nuniforms += n; + return ret; +} + +static DKNVGfragUniforms* nvg__fragUniformPtr(DKNVGcontext* dk, int i) +{ + return (DKNVGfragUniforms*)&dk->uniforms[i]; +} + +static void dknvg__vset(NVGvertex* vtx, float x, float y, float u, float v) +{ + vtx->x = x; + vtx->y = y; + vtx->u = u; + vtx->v = v; +} + +static void dknvg__renderFill(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe, + const float* bounds, const NVGpath* paths, int npaths) +{ + DKNVGcontext* dk = (DKNVGcontext*)uptr; + DKNVGcall* call = dknvg__allocCall(dk); + NVGvertex* quad; + DKNVGfragUniforms* frag; + int i, maxverts, offset; + + if (call == NULL) return; + + call->type = DKNVG_FILL; + call->triangleCount = 4; + call->pathOffset = dknvg__allocPaths(dk, npaths); + if (call->pathOffset == -1) goto error; + call->pathCount = npaths; + call->image = paint->image; + call->blendFunc = dknvg__blendCompositeOperation(compositeOperation); + + if (npaths == 1 && paths[0].convex) + { + call->type = DKNVG_CONVEXFILL; + call->triangleCount = 0; // Bounding box fill quad not needed for convex fill + } + + // Allocate vertices for all the paths. + maxverts = dknvg__maxVertCount(paths, npaths) + call->triangleCount; + offset = dknvg__allocVerts(dk, maxverts); + if (offset == -1) goto error; + + for (i = 0; i < npaths; i++) { + DKNVGpath* copy = &dk->paths[call->pathOffset + i]; + const NVGpath* path = &paths[i]; + memset(copy, 0, sizeof(DKNVGpath)); + if (path->nfill > 0) { + copy->fillOffset = offset; + copy->fillCount = path->nfill; + memcpy(&dk->verts[offset], path->fill, sizeof(NVGvertex) * path->nfill); + offset += path->nfill; + } + if (path->nstroke > 0) { + copy->strokeOffset = offset; + copy->strokeCount = path->nstroke; + memcpy(&dk->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke); + offset += path->nstroke; + } + } + + // Setup uniforms for draw calls + if (call->type == DKNVG_FILL) { + // Quad + call->triangleOffset = offset; + quad = &dk->verts[call->triangleOffset]; + dknvg__vset(&quad[0], bounds[2], bounds[3], 0.5f, 1.0f); + dknvg__vset(&quad[1], bounds[2], bounds[1], 0.5f, 1.0f); + dknvg__vset(&quad[2], bounds[0], bounds[3], 0.5f, 1.0f); + dknvg__vset(&quad[3], bounds[0], bounds[1], 0.5f, 1.0f); + + call->uniformOffset = dknvg__allocFragUniforms(dk, 2); + if (call->uniformOffset == -1) goto error; + // Simple shader for stencil + frag = nvg__fragUniformPtr(dk, call->uniformOffset); + memset(frag, 0, sizeof(*frag)); + frag->strokeThr = -1.0f; + frag->type = NSVG_SHADER_SIMPLE; + // Fill shader + dknvg__convertPaint(dk, nvg__fragUniformPtr(dk, call->uniformOffset + dk->fragSize), paint, scissor, fringe, fringe, -1.0f); + } else { + call->uniformOffset = dknvg__allocFragUniforms(dk, 1); + if (call->uniformOffset == -1) goto error; + // Fill shader + dknvg__convertPaint(dk, nvg__fragUniformPtr(dk, call->uniformOffset), paint, scissor, fringe, fringe, -1.0f); + } + + return; + +error: + // We get here if call alloc was ok, but something else is not. + // Roll back the last call to prevent drawing it. + if (dk->ncalls > 0) dk->ncalls--; +} + +static void dknvg__renderStroke(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe, + float strokeWidth, const NVGpath* paths, int npaths) +{ + DKNVGcontext* dk = (DKNVGcontext*)uptr; + DKNVGcall* call = dknvg__allocCall(dk); + int i, maxverts, offset; + + if (call == NULL) { + return; + } + + call->type = DKNVG_STROKE; + call->pathOffset = dknvg__allocPaths(dk, npaths); + if (call->pathOffset == -1) goto error; + call->pathCount = npaths; + call->image = paint->image; + call->blendFunc = dknvg__blendCompositeOperation(compositeOperation); + + // Allocate vertices for all the paths. + maxverts = dknvg__maxVertCount(paths, npaths); + offset = dknvg__allocVerts(dk, maxverts); + if (offset == -1) goto error; + + for (i = 0; i < npaths; i++) { + DKNVGpath* copy = &dk->paths[call->pathOffset + i]; + const NVGpath* path = &paths[i]; + memset(copy, 0, sizeof(DKNVGpath)); + if (path->nstroke) { + copy->strokeOffset = offset; + copy->strokeCount = path->nstroke; + memcpy(&dk->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke); + offset += path->nstroke; + } + } + + if (dk->flags & NVG_STENCIL_STROKES) { + // Fill shader + call->uniformOffset = dknvg__allocFragUniforms(dk, 2); + if (call->uniformOffset == -1) goto error; + + dknvg__convertPaint(dk, nvg__fragUniformPtr(dk, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f); + dknvg__convertPaint(dk, nvg__fragUniformPtr(dk, call->uniformOffset + dk->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f); + } else { + // Fill shader + call->uniformOffset = dknvg__allocFragUniforms(dk, 1); + if (call->uniformOffset == -1) goto error; + + dknvg__convertPaint(dk, nvg__fragUniformPtr(dk, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f); + } + + return; + +error: + // We get here if call alloc was ok, but something else is not. + // Roll back the last call to prevent drawing it. + if (dk->ncalls > 0) dk->ncalls--; +} + +static void dknvg__renderTriangles(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, + const NVGvertex* verts, int nverts, float fringe) +{ + DKNVGcontext* dk = (DKNVGcontext*)uptr; + DKNVGcall* call = dknvg__allocCall(dk); + DKNVGfragUniforms* frag; + + if (call == NULL) return; + + call->type = DKNVG_TRIANGLES; + call->image = paint->image; + call->blendFunc = dknvg__blendCompositeOperation(compositeOperation); + + // Allocate vertices for all the paths. + call->triangleOffset = dknvg__allocVerts(dk, nverts); + if (call->triangleOffset == -1) goto error; + call->triangleCount = nverts; + + memcpy(&dk->verts[call->triangleOffset], verts, sizeof(NVGvertex) * nverts); + + // Fill shader + call->uniformOffset = dknvg__allocFragUniforms(dk, 1); + if (call->uniformOffset == -1) goto error; + frag = nvg__fragUniformPtr(dk, call->uniformOffset); + dknvg__convertPaint(dk, frag, paint, scissor, 1.0f, fringe, -1.0f); + frag->type = NSVG_SHADER_IMG; + + return; + +error: + // We get here if call alloc was ok, but something else is not. + // Roll back the last call to prevent drawing it. + if (dk->ncalls > 0) dk->ncalls--; +} + +static void dknvg__renderDelete(void* uptr) { + DKNVGcontext* dk = (DKNVGcontext*)uptr; + if (dk == NULL) return; + + free(dk->paths); + free(dk->verts); + free(dk->uniforms); + free(dk->calls); + + free(dk); +} + +NVGcontext* nvgCreateDk(nvg::DkRenderer *renderer, int flags) { + NVGparams params; + NVGcontext* ctx = NULL; + DKNVGcontext* dk = (DKNVGcontext*)malloc(sizeof(DKNVGcontext)); + if (dk == NULL) goto error; + memset(dk, 0, sizeof(DKNVGcontext)); + + memset(¶ms, 0, sizeof(params)); + params.renderCreate = dknvg__renderCreate; + params.renderCreateTexture = dknvg__renderCreateTexture; + params.renderDeleteTexture = dknvg__renderDeleteTexture; + params.renderUpdateTexture = dknvg__renderUpdateTexture; + params.renderGetTextureSize = dknvg__renderGetTextureSize; + params.renderViewport = dknvg__renderViewport; + params.renderCancel = dknvg__renderCancel; + params.renderFlush = dknvg__renderFlush; + params.renderFill = dknvg__renderFill; + params.renderStroke = dknvg__renderStroke; + params.renderTriangles = dknvg__renderTriangles; + params.renderDelete = dknvg__renderDelete; + params.userPtr = dk; + params.edgeAntiAlias = flags & NVG_ANTIALIAS ? 1 : 0; + + dk->renderer = renderer; + dk->flags = flags; + + ctx = nvgCreateInternal(¶ms); + if (ctx == NULL) goto error; + + return ctx; + +error: + // 'dk' is freed by nvgDeleteInternal. + if (ctx != NULL) nvgDeleteInternal(ctx); + return NULL; +} + +void nvgDeleteDk(NVGcontext* ctx) +{ + nvgDeleteInternal(ctx); +} + +#ifdef __cplusplus +} +#endif \ No newline at end of file diff --git a/troposphere/daybreak/nanovg/include/nanovg_gl.h b/troposphere/daybreak/nanovg/include/nanovg_gl.h new file mode 100644 index 000000000..69506e7bf --- /dev/null +++ b/troposphere/daybreak/nanovg/include/nanovg_gl.h @@ -0,0 +1,1672 @@ +// +// Copyright (c) 2009-2013 Mikko Mononen memon@inside.org +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// + +#ifndef NANOVG_GL_H +#define NANOVG_GL_H + +#ifdef USE_OPENGL + +#ifdef __cplusplus +extern "C" { +#endif + +// Create flags + +enum NVGcreateFlags { + // Flag indicating if geometry based anti-aliasing is used (may not be needed when using MSAA). + NVG_ANTIALIAS = 1<<0, + // Flag indicating if strokes should be drawn using stencil buffer. The rendering will be a little + // slower, but path overlaps (i.e. self-intersecting or sharp turns) will be drawn just once. + NVG_STENCIL_STROKES = 1<<1, + // Flag indicating that additional debug checks are done. + NVG_DEBUG = 1<<2, +}; + +#if defined NANOVG_GL2_IMPLEMENTATION +# define NANOVG_GL2 1 +# define NANOVG_GL_IMPLEMENTATION 1 +#elif defined NANOVG_GL3_IMPLEMENTATION +# define NANOVG_GL3 1 +# define NANOVG_GL_IMPLEMENTATION 1 +# define NANOVG_GL_USE_UNIFORMBUFFER 1 +#elif defined NANOVG_GLES2_IMPLEMENTATION +# define NANOVG_GLES2 1 +# define NANOVG_GL_IMPLEMENTATION 1 +#elif defined NANOVG_GLES3_IMPLEMENTATION +# define NANOVG_GLES3 1 +# define NANOVG_GL_IMPLEMENTATION 1 +#endif + +#define NANOVG_GL_USE_STATE_FILTER (1) + +// Creates NanoVG contexts for different OpenGL (ES) versions. +// Flags should be combination of the create flags above. + +#if defined NANOVG_GL2 + +NVGcontext* nvgCreateGL2(int flags); +void nvgDeleteGL2(NVGcontext* ctx); + +int nvglCreateImageFromHandleGL2(NVGcontext* ctx, GLuint textureId, int w, int h, int flags); +GLuint nvglImageHandleGL2(NVGcontext* ctx, int image); + +#endif + +#if defined NANOVG_GL3 + +NVGcontext* nvgCreateGL3(int flags); +void nvgDeleteGL3(NVGcontext* ctx); + +int nvglCreateImageFromHandleGL3(NVGcontext* ctx, GLuint textureId, int w, int h, int flags); +GLuint nvglImageHandleGL3(NVGcontext* ctx, int image); + +#endif + +#if defined NANOVG_GLES2 + +NVGcontext* nvgCreateGLES2(int flags); +void nvgDeleteGLES2(NVGcontext* ctx); + +int nvglCreateImageFromHandleGLES2(NVGcontext* ctx, GLuint textureId, int w, int h, int flags); +GLuint nvglImageHandleGLES2(NVGcontext* ctx, int image); + +#endif + +#if defined NANOVG_GLES3 + +NVGcontext* nvgCreateGLES3(int flags); +void nvgDeleteGLES3(NVGcontext* ctx); + +int nvglCreateImageFromHandleGLES3(NVGcontext* ctx, GLuint textureId, int w, int h, int flags); +GLuint nvglImageHandleGLES3(NVGcontext* ctx, int image); + +#endif + +// These are additional flags on top of NVGimageFlags. +enum NVGimageFlagsGL { + NVG_IMAGE_NODELETE = 1<<16, // Do not delete GL texture handle. +}; + +#ifdef __cplusplus +} +#endif + +#endif /* NANOVG_GL_H */ + +#ifdef NANOVG_GL_IMPLEMENTATION + +#include +#include +#include +#include +#include "nanovg.h" + +enum GLNVGuniformLoc { + GLNVG_LOC_VIEWSIZE, + GLNVG_LOC_TEX, + GLNVG_LOC_FRAG, + GLNVG_MAX_LOCS +}; + +enum GLNVGshaderType { + NSVG_SHADER_FILLGRAD, + NSVG_SHADER_FILLIMG, + NSVG_SHADER_SIMPLE, + NSVG_SHADER_IMG +}; + +#if NANOVG_GL_USE_UNIFORMBUFFER +enum GLNVGuniformBindings { + GLNVG_FRAG_BINDING = 0, +}; +#endif + +struct GLNVGshader { + GLuint prog; + GLuint frag; + GLuint vert; + GLint loc[GLNVG_MAX_LOCS]; +}; +typedef struct GLNVGshader GLNVGshader; + +struct GLNVGtexture { + int id; + GLuint tex; + int width, height; + int type; + int flags; +}; +typedef struct GLNVGtexture GLNVGtexture; + +struct GLNVGblend +{ + GLenum srcRGB; + GLenum dstRGB; + GLenum srcAlpha; + GLenum dstAlpha; +}; +typedef struct GLNVGblend GLNVGblend; + +enum GLNVGcallType { + GLNVG_NONE = 0, + GLNVG_FILL, + GLNVG_CONVEXFILL, + GLNVG_STROKE, + GLNVG_TRIANGLES, +}; + +struct GLNVGcall { + int type; + int image; + int pathOffset; + int pathCount; + int triangleOffset; + int triangleCount; + int uniformOffset; + GLNVGblend blendFunc; +}; +typedef struct GLNVGcall GLNVGcall; + +struct GLNVGpath { + int fillOffset; + int fillCount; + int strokeOffset; + int strokeCount; +}; +typedef struct GLNVGpath GLNVGpath; + +struct GLNVGfragUniforms { + #if NANOVG_GL_USE_UNIFORMBUFFER + float scissorMat[12]; // matrices are actually 3 vec4s + float paintMat[12]; + struct NVGcolor innerCol; + struct NVGcolor outerCol; + float scissorExt[2]; + float scissorScale[2]; + float extent[2]; + float radius; + float feather; + float strokeMult; + float strokeThr; + int texType; + int type; + #else + // note: after modifying layout or size of uniform array, + // don't forget to also update the fragment shader source! + #define NANOVG_GL_UNIFORMARRAY_SIZE 11 + union { + struct { + float scissorMat[12]; // matrices are actually 3 vec4s + float paintMat[12]; + struct NVGcolor innerCol; + struct NVGcolor outerCol; + float scissorExt[2]; + float scissorScale[2]; + float extent[2]; + float radius; + float feather; + float strokeMult; + float strokeThr; + float texType; + float type; + }; + float uniformArray[NANOVG_GL_UNIFORMARRAY_SIZE][4]; + }; + #endif +}; +typedef struct GLNVGfragUniforms GLNVGfragUniforms; + +struct GLNVGcontext { + GLNVGshader shader; + GLNVGtexture* textures; + float view[2]; + int ntextures; + int ctextures; + int textureId; + GLuint vertBuf; +#if defined NANOVG_GL3 + GLuint vertArr; +#endif +#if NANOVG_GL_USE_UNIFORMBUFFER + GLuint fragBuf; +#endif + int fragSize; + int flags; + + // Per frame buffers + GLNVGcall* calls; + int ccalls; + int ncalls; + GLNVGpath* paths; + int cpaths; + int npaths; + struct NVGvertex* verts; + int cverts; + int nverts; + unsigned char* uniforms; + int cuniforms; + int nuniforms; + + // cached state + #if NANOVG_GL_USE_STATE_FILTER + GLuint boundTexture; + GLuint stencilMask; + GLenum stencilFunc; + GLint stencilFuncRef; + GLuint stencilFuncMask; + GLNVGblend blendFunc; + #endif + + int dummyTex; +}; +typedef struct GLNVGcontext GLNVGcontext; + +static int glnvg__maxi(int a, int b) { return a > b ? a : b; } + +#ifdef NANOVG_GLES2 +static unsigned int glnvg__nearestPow2(unsigned int num) +{ + unsigned n = num > 0 ? num - 1 : 0; + n |= n >> 1; + n |= n >> 2; + n |= n >> 4; + n |= n >> 8; + n |= n >> 16; + n++; + return n; +} +#endif + +static void glnvg__bindTexture(GLNVGcontext* gl, GLuint tex) +{ +#if NANOVG_GL_USE_STATE_FILTER + if (gl->boundTexture != tex) { + gl->boundTexture = tex; + glBindTexture(GL_TEXTURE_2D, tex); + } +#else + glBindTexture(GL_TEXTURE_2D, tex); +#endif +} + +static void glnvg__stencilMask(GLNVGcontext* gl, GLuint mask) +{ +#if NANOVG_GL_USE_STATE_FILTER + if (gl->stencilMask != mask) { + gl->stencilMask = mask; + glStencilMask(mask); + } +#else + glStencilMask(mask); +#endif +} + +static void glnvg__stencilFunc(GLNVGcontext* gl, GLenum func, GLint ref, GLuint mask) +{ +#if NANOVG_GL_USE_STATE_FILTER + if ((gl->stencilFunc != func) || + (gl->stencilFuncRef != ref) || + (gl->stencilFuncMask != mask)) { + + gl->stencilFunc = func; + gl->stencilFuncRef = ref; + gl->stencilFuncMask = mask; + glStencilFunc(func, ref, mask); + } +#else + glStencilFunc(func, ref, mask); +#endif +} +static void glnvg__blendFuncSeparate(GLNVGcontext* gl, const GLNVGblend* blend) +{ +#if NANOVG_GL_USE_STATE_FILTER + if ((gl->blendFunc.srcRGB != blend->srcRGB) || + (gl->blendFunc.dstRGB != blend->dstRGB) || + (gl->blendFunc.srcAlpha != blend->srcAlpha) || + (gl->blendFunc.dstAlpha != blend->dstAlpha)) { + + gl->blendFunc = *blend; + glBlendFuncSeparate(blend->srcRGB, blend->dstRGB, blend->srcAlpha,blend->dstAlpha); + } +#else + glBlendFuncSeparate(blend->srcRGB, blend->dstRGB, blend->srcAlpha,blend->dstAlpha); +#endif +} + +static GLNVGtexture* glnvg__allocTexture(GLNVGcontext* gl) +{ + GLNVGtexture* tex = NULL; + int i; + + for (i = 0; i < gl->ntextures; i++) { + if (gl->textures[i].id == 0) { + tex = &gl->textures[i]; + break; + } + } + if (tex == NULL) { + if (gl->ntextures+1 > gl->ctextures) { + GLNVGtexture* textures; + int ctextures = glnvg__maxi(gl->ntextures+1, 4) + gl->ctextures/2; // 1.5x Overallocate + textures = (GLNVGtexture*)realloc(gl->textures, sizeof(GLNVGtexture)*ctextures); + if (textures == NULL) return NULL; + gl->textures = textures; + gl->ctextures = ctextures; + } + tex = &gl->textures[gl->ntextures++]; + } + + memset(tex, 0, sizeof(*tex)); + tex->id = ++gl->textureId; + + return tex; +} + +static GLNVGtexture* glnvg__findTexture(GLNVGcontext* gl, int id) +{ + int i; + for (i = 0; i < gl->ntextures; i++) + if (gl->textures[i].id == id) + return &gl->textures[i]; + return NULL; +} + +static int glnvg__deleteTexture(GLNVGcontext* gl, int id) +{ + int i; + for (i = 0; i < gl->ntextures; i++) { + if (gl->textures[i].id == id) { + if (gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0) + glDeleteTextures(1, &gl->textures[i].tex); + memset(&gl->textures[i], 0, sizeof(gl->textures[i])); + return 1; + } + } + return 0; +} + +static void glnvg__dumpShaderError(GLuint shader, const char* name, const char* type) +{ + GLchar str[512+1]; + GLsizei len = 0; + glGetShaderInfoLog(shader, 512, &len, str); + if (len > 512) len = 512; + str[len] = '\0'; + printf("Shader %s/%s error:\n%s\n", name, type, str); +} + +static void glnvg__dumpProgramError(GLuint prog, const char* name) +{ + GLchar str[512+1]; + GLsizei len = 0; + glGetProgramInfoLog(prog, 512, &len, str); + if (len > 512) len = 512; + str[len] = '\0'; + printf("Program %s error:\n%s\n", name, str); +} + +static void glnvg__checkError(GLNVGcontext* gl, const char* str) +{ + GLenum err; + if ((gl->flags & NVG_DEBUG) == 0) return; + err = glGetError(); + if (err != GL_NO_ERROR) { + printf("Error %08x after %s\n", err, str); + return; + } +} + +static int glnvg__createShader(GLNVGshader* shader, const char* name, const char* header, const char* opts, const char* vshader, const char* fshader) +{ + GLint status; + GLuint prog, vert, frag; + const char* str[3]; + str[0] = header; + str[1] = opts != NULL ? opts : ""; + + memset(shader, 0, sizeof(*shader)); + + prog = glCreateProgram(); + vert = glCreateShader(GL_VERTEX_SHADER); + frag = glCreateShader(GL_FRAGMENT_SHADER); + str[2] = vshader; + glShaderSource(vert, 3, str, 0); + str[2] = fshader; + glShaderSource(frag, 3, str, 0); + + glCompileShader(vert); + glGetShaderiv(vert, GL_COMPILE_STATUS, &status); + if (status != GL_TRUE) { + glnvg__dumpShaderError(vert, name, "vert"); + return 0; + } + + glCompileShader(frag); + glGetShaderiv(frag, GL_COMPILE_STATUS, &status); + if (status != GL_TRUE) { + glnvg__dumpShaderError(frag, name, "frag"); + return 0; + } + + glAttachShader(prog, vert); + glAttachShader(prog, frag); + + glBindAttribLocation(prog, 0, "vertex"); + glBindAttribLocation(prog, 1, "tcoord"); + + glLinkProgram(prog); + glGetProgramiv(prog, GL_LINK_STATUS, &status); + if (status != GL_TRUE) { + glnvg__dumpProgramError(prog, name); + return 0; + } + + shader->prog = prog; + shader->vert = vert; + shader->frag = frag; + + return 1; +} + +static void glnvg__deleteShader(GLNVGshader* shader) +{ + if (shader->prog != 0) + glDeleteProgram(shader->prog); + if (shader->vert != 0) + glDeleteShader(shader->vert); + if (shader->frag != 0) + glDeleteShader(shader->frag); +} + +static void glnvg__getUniforms(GLNVGshader* shader) +{ + shader->loc[GLNVG_LOC_VIEWSIZE] = glGetUniformLocation(shader->prog, "viewSize"); + shader->loc[GLNVG_LOC_TEX] = glGetUniformLocation(shader->prog, "tex"); + +#if NANOVG_GL_USE_UNIFORMBUFFER + shader->loc[GLNVG_LOC_FRAG] = glGetUniformBlockIndex(shader->prog, "frag"); +#else + shader->loc[GLNVG_LOC_FRAG] = glGetUniformLocation(shader->prog, "frag"); +#endif +} + +static int glnvg__renderCreateTexture(void* uptr, int type, int w, int h, int imageFlags, const unsigned char* data); + +static int glnvg__renderCreate(void* uptr) +{ + GLNVGcontext* gl = (GLNVGcontext*)uptr; + int align = 4; + + // TODO: mediump float may not be enough for GLES2 in iOS. + // see the following discussion: https://github.com/memononen/nanovg/issues/46 + static const char* shaderHeader = +#if defined NANOVG_GL2 + "#define NANOVG_GL2 1\n" +#elif defined NANOVG_GL3 + "#version 150 core\n" + "#define NANOVG_GL3 1\n" +#elif defined NANOVG_GLES2 + "#version 100\n" + "#define NANOVG_GL2 1\n" +#elif defined NANOVG_GLES3 + "#version 300 es\n" + "#define NANOVG_GL3 1\n" +#endif + +#if NANOVG_GL_USE_UNIFORMBUFFER + "#define USE_UNIFORMBUFFER 1\n" +#else + "#define UNIFORMARRAY_SIZE 11\n" +#endif + "\n"; + + static const char* fillVertShader = + "#ifdef NANOVG_GL3\n" + " uniform vec2 viewSize;\n" + " in vec2 vertex;\n" + " in vec2 tcoord;\n" + " out vec2 ftcoord;\n" + " out vec2 fpos;\n" + "#else\n" + " uniform vec2 viewSize;\n" + " attribute vec2 vertex;\n" + " attribute vec2 tcoord;\n" + " varying vec2 ftcoord;\n" + " varying vec2 fpos;\n" + "#endif\n" + "void main(void) {\n" + " ftcoord = tcoord;\n" + " fpos = vertex;\n" + " gl_Position = vec4(2.0*vertex.x/viewSize.x - 1.0, 1.0 - 2.0*vertex.y/viewSize.y, 0, 1);\n" + "}\n"; + + static const char* fillFragShader = + "#ifdef GL_ES\n" + "#if defined(GL_FRAGMENT_PRECISION_HIGH) || defined(NANOVG_GL3)\n" + " precision highp float;\n" + "#else\n" + " precision mediump float;\n" + "#endif\n" + "#endif\n" + "#ifdef NANOVG_GL3\n" + "#ifdef USE_UNIFORMBUFFER\n" + " layout(std140) uniform frag {\n" + " mat3 scissorMat;\n" + " mat3 paintMat;\n" + " vec4 innerCol;\n" + " vec4 outerCol;\n" + " vec2 scissorExt;\n" + " vec2 scissorScale;\n" + " vec2 extent;\n" + " float radius;\n" + " float feather;\n" + " float strokeMult;\n" + " float strokeThr;\n" + " int texType;\n" + " int type;\n" + " };\n" + "#else\n" // NANOVG_GL3 && !USE_UNIFORMBUFFER + " uniform vec4 frag[UNIFORMARRAY_SIZE];\n" + "#endif\n" + " uniform sampler2D tex;\n" + " in vec2 ftcoord;\n" + " in vec2 fpos;\n" + " out vec4 outColor;\n" + "#else\n" // !NANOVG_GL3 + " uniform vec4 frag[UNIFORMARRAY_SIZE];\n" + " uniform sampler2D tex;\n" + " varying vec2 ftcoord;\n" + " varying vec2 fpos;\n" + "#endif\n" + "#ifndef USE_UNIFORMBUFFER\n" + " #define scissorMat mat3(frag[0].xyz, frag[1].xyz, frag[2].xyz)\n" + " #define paintMat mat3(frag[3].xyz, frag[4].xyz, frag[5].xyz)\n" + " #define innerCol frag[6]\n" + " #define outerCol frag[7]\n" + " #define scissorExt frag[8].xy\n" + " #define scissorScale frag[8].zw\n" + " #define extent frag[9].xy\n" + " #define radius frag[9].z\n" + " #define feather frag[9].w\n" + " #define strokeMult frag[10].x\n" + " #define strokeThr frag[10].y\n" + " #define texType int(frag[10].z)\n" + " #define type int(frag[10].w)\n" + "#endif\n" + "\n" + "float sdroundrect(vec2 pt, vec2 ext, float rad) {\n" + " vec2 ext2 = ext - vec2(rad,rad);\n" + " vec2 d = abs(pt) - ext2;\n" + " return min(max(d.x,d.y),0.0) + length(max(d,0.0)) - rad;\n" + "}\n" + "\n" + "// Scissoring\n" + "float scissorMask(vec2 p) {\n" + " vec2 sc = (abs((scissorMat * vec3(p,1.0)).xy) - scissorExt);\n" + " sc = vec2(0.5,0.5) - sc * scissorScale;\n" + " return clamp(sc.x,0.0,1.0) * clamp(sc.y,0.0,1.0);\n" + "}\n" + "#ifdef EDGE_AA\n" + "// Stroke - from [0..1] to clipped pyramid, where the slope is 1px.\n" + "float strokeMask() {\n" + " return min(1.0, (1.0-abs(ftcoord.x*2.0-1.0))*strokeMult) * min(1.0, ftcoord.y);\n" + "}\n" + "#endif\n" + "\n" + "void main(void) {\n" + " vec4 result;\n" + " float scissor = scissorMask(fpos);\n" + "#ifdef EDGE_AA\n" + " float strokeAlpha = strokeMask();\n" + " if (strokeAlpha < strokeThr) discard;\n" + "#else\n" + " float strokeAlpha = 1.0;\n" + "#endif\n" + " if (type == 0) { // Gradient\n" + " // Calculate gradient color using box gradient\n" + " vec2 pt = (paintMat * vec3(fpos,1.0)).xy;\n" + " float d = clamp((sdroundrect(pt, extent, radius) + feather*0.5) / feather, 0.0, 1.0);\n" + " vec4 color = mix(innerCol,outerCol,d);\n" + " // Combine alpha\n" + " color *= strokeAlpha * scissor;\n" + " result = color;\n" + " } else if (type == 1) { // Image\n" + " // Calculate color fron texture\n" + " vec2 pt = (paintMat * vec3(fpos,1.0)).xy / extent;\n" + "#ifdef NANOVG_GL3\n" + " vec4 color = texture(tex, pt);\n" + "#else\n" + " vec4 color = texture2D(tex, pt);\n" + "#endif\n" + " if (texType == 1) color = vec4(color.xyz*color.w,color.w);" + " if (texType == 2) color = vec4(color.x);" + " // Apply color tint and alpha.\n" + " color *= innerCol;\n" + " // Combine alpha\n" + " color *= strokeAlpha * scissor;\n" + " result = color;\n" + " } else if (type == 2) { // Stencil fill\n" + " result = vec4(1,1,1,1);\n" + " } else if (type == 3) { // Textured tris\n" + "#ifdef NANOVG_GL3\n" + " vec4 color = texture(tex, ftcoord);\n" + "#else\n" + " vec4 color = texture2D(tex, ftcoord);\n" + "#endif\n" + " if (texType == 1) color = vec4(color.xyz*color.w,color.w);" + " if (texType == 2) color = vec4(color.x);" + " color *= scissor;\n" + " result = color * innerCol;\n" + " }\n" + "#ifdef NANOVG_GL3\n" + " outColor = result;\n" + "#else\n" + " gl_FragColor = result;\n" + "#endif\n" + "}\n"; + + glnvg__checkError(gl, "init"); + + if (gl->flags & NVG_ANTIALIAS) { + if (glnvg__createShader(&gl->shader, "shader", shaderHeader, "#define EDGE_AA 1\n", fillVertShader, fillFragShader) == 0) + return 0; + } else { + if (glnvg__createShader(&gl->shader, "shader", shaderHeader, NULL, fillVertShader, fillFragShader) == 0) + return 0; + } + + glnvg__checkError(gl, "uniform locations"); + glnvg__getUniforms(&gl->shader); + + // Create dynamic vertex array +#if defined NANOVG_GL3 + glGenVertexArrays(1, &gl->vertArr); +#endif + glGenBuffers(1, &gl->vertBuf); + +#if NANOVG_GL_USE_UNIFORMBUFFER + // Create UBOs + glUniformBlockBinding(gl->shader.prog, gl->shader.loc[GLNVG_LOC_FRAG], GLNVG_FRAG_BINDING); + glGenBuffers(1, &gl->fragBuf); + glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &align); +#endif + gl->fragSize = sizeof(GLNVGfragUniforms) + align - sizeof(GLNVGfragUniforms) % align; + + // Some platforms does not allow to have samples to unset textures. + // Create empty one which is bound when there's no texture specified. + gl->dummyTex = glnvg__renderCreateTexture(gl, NVG_TEXTURE_ALPHA, 1, 1, 0, NULL); + + glnvg__checkError(gl, "create done"); + + glFinish(); + + return 1; +} + +static int glnvg__renderCreateTexture(void* uptr, int type, int w, int h, int imageFlags, const unsigned char* data) +{ + GLNVGcontext* gl = (GLNVGcontext*)uptr; + GLNVGtexture* tex = glnvg__allocTexture(gl); + + if (tex == NULL) return 0; + + printf("CreateTexture: Data is null %d\n", (data == NULL)); + +#ifdef NANOVG_GLES2 + // Check for non-power of 2. + if (glnvg__nearestPow2(w) != (unsigned int)w || glnvg__nearestPow2(h) != (unsigned int)h) { + // No repeat + if ((imageFlags & NVG_IMAGE_REPEATX) != 0 || (imageFlags & NVG_IMAGE_REPEATY) != 0) { + printf("Repeat X/Y is not supported for non power-of-two textures (%d x %d)\n", w, h); + imageFlags &= ~(NVG_IMAGE_REPEATX | NVG_IMAGE_REPEATY); + } + // No mips. + if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) { + printf("Mip-maps is not support for non power-of-two textures (%d x %d)\n", w, h); + imageFlags &= ~NVG_IMAGE_GENERATE_MIPMAPS; + } + } +#endif + + glGenTextures(1, &tex->tex); + tex->width = w; + tex->height = h; + tex->type = type; + tex->flags = imageFlags; + glnvg__bindTexture(gl, tex->tex); + + glPixelStorei(GL_UNPACK_ALIGNMENT,1); +#ifndef NANOVG_GLES2 + glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width); + glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0); + glPixelStorei(GL_UNPACK_SKIP_ROWS, 0); +#endif + +#if defined (NANOVG_GL2) + // GL 1.4 and later has support for generating mipmaps using a tex parameter. + if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) { + glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE); + } +#endif + + if (type == NVG_TEXTURE_RGBA) + glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data); + else +#if defined(NANOVG_GLES2) || defined (NANOVG_GL2) + glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, w, h, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, data); +#elif defined(NANOVG_GLES3) + glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data); +#else + glTexImage2D(GL_TEXTURE_2D, 0, GL_RED, w, h, 0, GL_RED, GL_UNSIGNED_BYTE, data); +#endif + + if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) { + if (imageFlags & NVG_IMAGE_NEAREST) { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_NEAREST); + } else { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); + } + } else { + if (imageFlags & NVG_IMAGE_NEAREST) { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + } else { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + } + } + + if (imageFlags & NVG_IMAGE_NEAREST) { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + } else { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + } + + if (imageFlags & NVG_IMAGE_REPEATX) + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); + else + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + + if (imageFlags & NVG_IMAGE_REPEATY) + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); + else + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + + glPixelStorei(GL_UNPACK_ALIGNMENT, 4); +#ifndef NANOVG_GLES2 + glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); + glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0); + glPixelStorei(GL_UNPACK_SKIP_ROWS, 0); +#endif + + // The new way to build mipmaps on GLES and GL3 +#if !defined(NANOVG_GL2) + if (imageFlags & NVG_IMAGE_GENERATE_MIPMAPS) { + glGenerateMipmap(GL_TEXTURE_2D); + } +#endif + + glnvg__checkError(gl, "create tex"); + glnvg__bindTexture(gl, 0); + + return tex->id; +} + + +static int glnvg__renderDeleteTexture(void* uptr, int image) +{ + GLNVGcontext* gl = (GLNVGcontext*)uptr; + return glnvg__deleteTexture(gl, image); +} + +static int glnvg__renderUpdateTexture(void* uptr, int image, int x, int y, int w, int h, const unsigned char* data) +{ + GLNVGcontext* gl = (GLNVGcontext*)uptr; + GLNVGtexture* tex = glnvg__findTexture(gl, image); + + if (tex == NULL) return 0; + glnvg__bindTexture(gl, tex->tex); + + glPixelStorei(GL_UNPACK_ALIGNMENT,1); + +#ifndef NANOVG_GLES2 + glPixelStorei(GL_UNPACK_ROW_LENGTH, tex->width); + glPixelStorei(GL_UNPACK_SKIP_PIXELS, x); + glPixelStorei(GL_UNPACK_SKIP_ROWS, y); +#else + // No support for all of skip, need to update a whole row at a time. + if (tex->type == NVG_TEXTURE_RGBA) + data += y*tex->width*4; + else + data += y*tex->width; + x = 0; + w = tex->width; +#endif + + if (tex->type == NVG_TEXTURE_RGBA) + glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_RGBA, GL_UNSIGNED_BYTE, data); + else +#if defined(NANOVG_GLES2) || defined(NANOVG_GL2) + glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_LUMINANCE, GL_UNSIGNED_BYTE, data); +#else + glTexSubImage2D(GL_TEXTURE_2D, 0, x,y, w,h, GL_RED, GL_UNSIGNED_BYTE, data); +#endif + + glPixelStorei(GL_UNPACK_ALIGNMENT, 4); +#ifndef NANOVG_GLES2 + glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); + glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0); + glPixelStorei(GL_UNPACK_SKIP_ROWS, 0); +#endif + + glnvg__bindTexture(gl, 0); + + return 1; +} + +static int glnvg__renderGetTextureSize(void* uptr, int image, int* w, int* h) +{ + GLNVGcontext* gl = (GLNVGcontext*)uptr; + GLNVGtexture* tex = glnvg__findTexture(gl, image); + if (tex == NULL) return 0; + *w = tex->width; + *h = tex->height; + return 1; +} + +static void glnvg__xformToMat3x4(float* m3, float* t) +{ + m3[0] = t[0]; + m3[1] = t[1]; + m3[2] = 0.0f; + m3[3] = 0.0f; + m3[4] = t[2]; + m3[5] = t[3]; + m3[6] = 0.0f; + m3[7] = 0.0f; + m3[8] = t[4]; + m3[9] = t[5]; + m3[10] = 1.0f; + m3[11] = 0.0f; +} + +static NVGcolor glnvg__premulColor(NVGcolor c) +{ + c.r *= c.a; + c.g *= c.a; + c.b *= c.a; + return c; +} + +static int glnvg__convertPaint(GLNVGcontext* gl, GLNVGfragUniforms* frag, NVGpaint* paint, + NVGscissor* scissor, float width, float fringe, float strokeThr) +{ + GLNVGtexture* tex = NULL; + float invxform[6]; + + memset(frag, 0, sizeof(*frag)); + + frag->innerCol = glnvg__premulColor(paint->innerColor); + frag->outerCol = glnvg__premulColor(paint->outerColor); + + if (scissor->extent[0] < -0.5f || scissor->extent[1] < -0.5f) { + memset(frag->scissorMat, 0, sizeof(frag->scissorMat)); + frag->scissorExt[0] = 1.0f; + frag->scissorExt[1] = 1.0f; + frag->scissorScale[0] = 1.0f; + frag->scissorScale[1] = 1.0f; + } else { + nvgTransformInverse(invxform, scissor->xform); + glnvg__xformToMat3x4(frag->scissorMat, invxform); + frag->scissorExt[0] = scissor->extent[0]; + frag->scissorExt[1] = scissor->extent[1]; + frag->scissorScale[0] = sqrtf(scissor->xform[0]*scissor->xform[0] + scissor->xform[2]*scissor->xform[2]) / fringe; + frag->scissorScale[1] = sqrtf(scissor->xform[1]*scissor->xform[1] + scissor->xform[3]*scissor->xform[3]) / fringe; + } + + memcpy(frag->extent, paint->extent, sizeof(frag->extent)); + frag->strokeMult = (width*0.5f + fringe*0.5f) / fringe; + frag->strokeThr = strokeThr; + + if (paint->image != 0) { + tex = glnvg__findTexture(gl, paint->image); + if (tex == NULL) return 0; + if ((tex->flags & NVG_IMAGE_FLIPY) != 0) { + float m1[6], m2[6]; + nvgTransformTranslate(m1, 0.0f, frag->extent[1] * 0.5f); + nvgTransformMultiply(m1, paint->xform); + nvgTransformScale(m2, 1.0f, -1.0f); + nvgTransformMultiply(m2, m1); + nvgTransformTranslate(m1, 0.0f, -frag->extent[1] * 0.5f); + nvgTransformMultiply(m1, m2); + nvgTransformInverse(invxform, m1); + } else { + nvgTransformInverse(invxform, paint->xform); + } + frag->type = NSVG_SHADER_FILLIMG; + + #if NANOVG_GL_USE_UNIFORMBUFFER + if (tex->type == NVG_TEXTURE_RGBA) + frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0 : 1; + else + frag->texType = 2; + #else + if (tex->type == NVG_TEXTURE_RGBA) + frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0.0f : 1.0f; + else + frag->texType = 2.0f; + #endif +// printf("frag->texType = %d\n", frag->texType); + } else { + frag->type = NSVG_SHADER_FILLGRAD; + frag->radius = paint->radius; + frag->feather = paint->feather; + nvgTransformInverse(invxform, paint->xform); + } + + glnvg__xformToMat3x4(frag->paintMat, invxform); + + return 1; +} + +static GLNVGfragUniforms* nvg__fragUniformPtr(GLNVGcontext* gl, int i); + +static void glnvg__setUniforms(GLNVGcontext* gl, int uniformOffset, int image) +{ + GLNVGtexture* tex = NULL; +#if NANOVG_GL_USE_UNIFORMBUFFER + glBindBufferRange(GL_UNIFORM_BUFFER, GLNVG_FRAG_BINDING, gl->fragBuf, uniformOffset, sizeof(GLNVGfragUniforms)); +#else + GLNVGfragUniforms* frag = nvg__fragUniformPtr(gl, uniformOffset); + glUniform4fv(gl->shader.loc[GLNVG_LOC_FRAG], NANOVG_GL_UNIFORMARRAY_SIZE, &(frag->uniformArray[0][0])); +#endif + + if (image != 0) { + tex = glnvg__findTexture(gl, image); + } + // If no image is set, use empty texture + if (tex == NULL) { + tex = glnvg__findTexture(gl, gl->dummyTex); + } + glnvg__bindTexture(gl, tex != NULL ? tex->tex : 0); + glnvg__checkError(gl, "tex paint tex"); +} + +static void glnvg__renderViewport(void* uptr, float width, float height, float devicePixelRatio) +{ + NVG_NOTUSED(devicePixelRatio); + GLNVGcontext* gl = (GLNVGcontext*)uptr; + gl->view[0] = width; + gl->view[1] = height; +} + +static void glnvg__fill(GLNVGcontext* gl, GLNVGcall* call) +{ + GLNVGpath* paths = &gl->paths[call->pathOffset]; + int i, npaths = call->pathCount; + + // Draw shapes + glEnable(GL_STENCIL_TEST); + glnvg__stencilMask(gl, 0xff); + glnvg__stencilFunc(gl, GL_ALWAYS, 0, 0xff); + glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE); + + // set bindpoint for solid loc + glnvg__setUniforms(gl, call->uniformOffset, 0); + glnvg__checkError(gl, "fill simple"); + + glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_INCR_WRAP); + glStencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_DECR_WRAP); + glDisable(GL_CULL_FACE); + for (i = 0; i < npaths; i++) + glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount); + glEnable(GL_CULL_FACE); + + // Draw anti-aliased pixels + glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); + + glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image); + glnvg__checkError(gl, "fill fill"); + + if (gl->flags & NVG_ANTIALIAS) { + glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff); + glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); + // Draw fringes + for (i = 0; i < npaths; i++) + glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); + } + + // Draw fill + glnvg__stencilFunc(gl, GL_NOTEQUAL, 0x0, 0xff); + glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO); + glDrawArrays(GL_TRIANGLE_STRIP, call->triangleOffset, call->triangleCount); + + glDisable(GL_STENCIL_TEST); +} + +static void glnvg__convexFill(GLNVGcontext* gl, GLNVGcall* call) +{ + GLNVGpath* paths = &gl->paths[call->pathOffset]; + int i, npaths = call->pathCount; + + glnvg__setUniforms(gl, call->uniformOffset, call->image); + glnvg__checkError(gl, "convex fill"); + + for (i = 0; i < npaths; i++) { + glDrawArrays(GL_TRIANGLE_FAN, paths[i].fillOffset, paths[i].fillCount); + // Draw fringes + if (paths[i].strokeCount > 0) { + glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); + } + } +} + +static void glnvg__stroke(GLNVGcontext* gl, GLNVGcall* call) +{ + GLNVGpath* paths = &gl->paths[call->pathOffset]; + int npaths = call->pathCount, i; + + if (gl->flags & NVG_STENCIL_STROKES) { + + glEnable(GL_STENCIL_TEST); + glnvg__stencilMask(gl, 0xff); + + // Fill the stroke base without overlap + glnvg__stencilFunc(gl, GL_EQUAL, 0x0, 0xff); + glStencilOp(GL_KEEP, GL_KEEP, GL_INCR); + glnvg__setUniforms(gl, call->uniformOffset + gl->fragSize, call->image); + glnvg__checkError(gl, "stroke fill 0"); + for (i = 0; i < npaths; i++) + glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); + + // Draw anti-aliased pixels. + glnvg__setUniforms(gl, call->uniformOffset, call->image); + glnvg__stencilFunc(gl, GL_EQUAL, 0x00, 0xff); + glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); + for (i = 0; i < npaths; i++) + glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); + + // Clear stencil buffer. + glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE); + glnvg__stencilFunc(gl, GL_ALWAYS, 0x0, 0xff); + glStencilOp(GL_ZERO, GL_ZERO, GL_ZERO); + glnvg__checkError(gl, "stroke fill 1"); + for (i = 0; i < npaths; i++) { + glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); + } + glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); + + glDisable(GL_STENCIL_TEST); + +// glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f); + + } else { + glnvg__setUniforms(gl, call->uniformOffset, call->image); + glnvg__checkError(gl, "stroke fill"); + // Draw Strokes + for (i = 0; i < npaths; i++) + glDrawArrays(GL_TRIANGLE_STRIP, paths[i].strokeOffset, paths[i].strokeCount); + } +} + +static void glnvg__triangles(GLNVGcontext* gl, GLNVGcall* call) +{ + glnvg__setUniforms(gl, call->uniformOffset, call->image); + glnvg__checkError(gl, "triangles fill"); + + glDrawArrays(GL_TRIANGLES, call->triangleOffset, call->triangleCount); +} + +static void glnvg__renderCancel(void* uptr) { + GLNVGcontext* gl = (GLNVGcontext*)uptr; + gl->nverts = 0; + gl->npaths = 0; + gl->ncalls = 0; + gl->nuniforms = 0; +} + +static GLenum glnvg_convertBlendFuncFactor(int factor) +{ + if (factor == NVG_ZERO) + return GL_ZERO; + if (factor == NVG_ONE) + return GL_ONE; + if (factor == NVG_SRC_COLOR) + return GL_SRC_COLOR; + if (factor == NVG_ONE_MINUS_SRC_COLOR) + return GL_ONE_MINUS_SRC_COLOR; + if (factor == NVG_DST_COLOR) + return GL_DST_COLOR; + if (factor == NVG_ONE_MINUS_DST_COLOR) + return GL_ONE_MINUS_DST_COLOR; + if (factor == NVG_SRC_ALPHA) + return GL_SRC_ALPHA; + if (factor == NVG_ONE_MINUS_SRC_ALPHA) + return GL_ONE_MINUS_SRC_ALPHA; + if (factor == NVG_DST_ALPHA) + return GL_DST_ALPHA; + if (factor == NVG_ONE_MINUS_DST_ALPHA) + return GL_ONE_MINUS_DST_ALPHA; + if (factor == NVG_SRC_ALPHA_SATURATE) + return GL_SRC_ALPHA_SATURATE; + return GL_INVALID_ENUM; +} + +static GLNVGblend glnvg__blendCompositeOperation(NVGcompositeOperationState op) +{ + GLNVGblend blend; + blend.srcRGB = glnvg_convertBlendFuncFactor(op.srcRGB); + blend.dstRGB = glnvg_convertBlendFuncFactor(op.dstRGB); + blend.srcAlpha = glnvg_convertBlendFuncFactor(op.srcAlpha); + blend.dstAlpha = glnvg_convertBlendFuncFactor(op.dstAlpha); + if (blend.srcRGB == GL_INVALID_ENUM || blend.dstRGB == GL_INVALID_ENUM || blend.srcAlpha == GL_INVALID_ENUM || blend.dstAlpha == GL_INVALID_ENUM) + { + blend.srcRGB = GL_ONE; + blend.dstRGB = GL_ONE_MINUS_SRC_ALPHA; + blend.srcAlpha = GL_ONE; + blend.dstAlpha = GL_ONE_MINUS_SRC_ALPHA; + } + return blend; +} + +static void glnvg__renderFlush(void* uptr) +{ + GLNVGcontext* gl = (GLNVGcontext*)uptr; + int i; + + if (gl->ncalls > 0) { + + // Setup require GL state. + glUseProgram(gl->shader.prog); + + glEnable(GL_CULL_FACE); + glCullFace(GL_BACK); + glFrontFace(GL_CCW); + glEnable(GL_BLEND); + glDisable(GL_DEPTH_TEST); + glDisable(GL_SCISSOR_TEST); + glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); + glStencilMask(0xffffffff); + glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); + glStencilFunc(GL_ALWAYS, 0, 0xffffffff); + glActiveTexture(GL_TEXTURE0); + glBindTexture(GL_TEXTURE_2D, 0); + #if NANOVG_GL_USE_STATE_FILTER + gl->boundTexture = 0; + gl->stencilMask = 0xffffffff; + gl->stencilFunc = GL_ALWAYS; + gl->stencilFuncRef = 0; + gl->stencilFuncMask = 0xffffffff; + gl->blendFunc.srcRGB = GL_INVALID_ENUM; + gl->blendFunc.srcAlpha = GL_INVALID_ENUM; + gl->blendFunc.dstRGB = GL_INVALID_ENUM; + gl->blendFunc.dstAlpha = GL_INVALID_ENUM; + #endif + +#if NANOVG_GL_USE_UNIFORMBUFFER + // Upload ubo for frag shaders + glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf); + glBufferData(GL_UNIFORM_BUFFER, gl->nuniforms * gl->fragSize, gl->uniforms, GL_STREAM_DRAW); +#endif + + // Upload vertex data +#if defined NANOVG_GL3 + glBindVertexArray(gl->vertArr); +#endif + glBindBuffer(GL_ARRAY_BUFFER, gl->vertBuf); + glBufferData(GL_ARRAY_BUFFER, gl->nverts * sizeof(NVGvertex), gl->verts, GL_STREAM_DRAW); + glEnableVertexAttribArray(0); + glEnableVertexAttribArray(1); + glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid*)(size_t)0); + glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(NVGvertex), (const GLvoid*)(0 + 2*sizeof(float))); + + // Set view and texture just once per frame. + glUniform1i(gl->shader.loc[GLNVG_LOC_TEX], 0); + glUniform2fv(gl->shader.loc[GLNVG_LOC_VIEWSIZE], 1, gl->view); + +#if NANOVG_GL_USE_UNIFORMBUFFER + glBindBuffer(GL_UNIFORM_BUFFER, gl->fragBuf); +#endif + + for (i = 0; i < gl->ncalls; i++) { + GLNVGcall* call = &gl->calls[i]; + glnvg__blendFuncSeparate(gl,&call->blendFunc); + if (call->type == GLNVG_FILL) { + glnvg__fill(gl, call); + } + else if (call->type == GLNVG_CONVEXFILL) { + glnvg__convexFill(gl, call); + } + else if (call->type == GLNVG_STROKE) { + glnvg__stroke(gl, call); + } + else if (call->type == GLNVG_TRIANGLES) { + glnvg__triangles(gl, call); + } + } + + glDisableVertexAttribArray(0); + glDisableVertexAttribArray(1); +#if defined NANOVG_GL3 + glBindVertexArray(0); +#endif + glDisable(GL_CULL_FACE); + glBindBuffer(GL_ARRAY_BUFFER, 0); + glUseProgram(0); + glnvg__bindTexture(gl, 0); + } + + // Reset calls + gl->nverts = 0; + gl->npaths = 0; + gl->ncalls = 0; + gl->nuniforms = 0; +} + +static int glnvg__maxVertCount(const NVGpath* paths, int npaths) +{ + int i, count = 0; + for (i = 0; i < npaths; i++) { + count += paths[i].nfill; + count += paths[i].nstroke; + } + return count; +} + +static GLNVGcall* glnvg__allocCall(GLNVGcontext* gl) +{ + GLNVGcall* ret = NULL; + if (gl->ncalls+1 > gl->ccalls) { + GLNVGcall* calls; + int ccalls = glnvg__maxi(gl->ncalls+1, 128) + gl->ccalls/2; // 1.5x Overallocate + calls = (GLNVGcall*)realloc(gl->calls, sizeof(GLNVGcall) * ccalls); + if (calls == NULL) return NULL; + gl->calls = calls; + gl->ccalls = ccalls; + } + ret = &gl->calls[gl->ncalls++]; + memset(ret, 0, sizeof(GLNVGcall)); + return ret; +} + +static int glnvg__allocPaths(GLNVGcontext* gl, int n) +{ + int ret = 0; + if (gl->npaths+n > gl->cpaths) { + GLNVGpath* paths; + int cpaths = glnvg__maxi(gl->npaths + n, 128) + gl->cpaths/2; // 1.5x Overallocate + paths = (GLNVGpath*)realloc(gl->paths, sizeof(GLNVGpath) * cpaths); + if (paths == NULL) return -1; + gl->paths = paths; + gl->cpaths = cpaths; + } + ret = gl->npaths; + gl->npaths += n; + return ret; +} + +static int glnvg__allocVerts(GLNVGcontext* gl, int n) +{ + int ret = 0; + if (gl->nverts+n > gl->cverts) { + NVGvertex* verts; + int cverts = glnvg__maxi(gl->nverts + n, 4096) + gl->cverts/2; // 1.5x Overallocate + verts = (NVGvertex*)realloc(gl->verts, sizeof(NVGvertex) * cverts); + if (verts == NULL) return -1; + gl->verts = verts; + gl->cverts = cverts; + } + ret = gl->nverts; + gl->nverts += n; + return ret; +} + +static int glnvg__allocFragUniforms(GLNVGcontext* gl, int n) +{ + int ret = 0, structSize = gl->fragSize; + if (gl->nuniforms+n > gl->cuniforms) { + unsigned char* uniforms; + int cuniforms = glnvg__maxi(gl->nuniforms+n, 128) + gl->cuniforms/2; // 1.5x Overallocate + uniforms = (unsigned char*)realloc(gl->uniforms, structSize * cuniforms); + if (uniforms == NULL) return -1; + gl->uniforms = uniforms; + gl->cuniforms = cuniforms; + } + ret = gl->nuniforms * structSize; + gl->nuniforms += n; + return ret; +} + +static GLNVGfragUniforms* nvg__fragUniformPtr(GLNVGcontext* gl, int i) +{ + return (GLNVGfragUniforms*)&gl->uniforms[i]; +} + +static void glnvg__vset(NVGvertex* vtx, float x, float y, float u, float v) +{ + vtx->x = x; + vtx->y = y; + vtx->u = u; + vtx->v = v; +} + +static void glnvg__renderFill(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe, + const float* bounds, const NVGpath* paths, int npaths) +{ + GLNVGcontext* gl = (GLNVGcontext*)uptr; + GLNVGcall* call = glnvg__allocCall(gl); + NVGvertex* quad; + GLNVGfragUniforms* frag; + int i, maxverts, offset; + + if (call == NULL) return; + + call->type = GLNVG_FILL; + call->triangleCount = 4; + call->pathOffset = glnvg__allocPaths(gl, npaths); + if (call->pathOffset == -1) goto error; + call->pathCount = npaths; + call->image = paint->image; + call->blendFunc = glnvg__blendCompositeOperation(compositeOperation); + + if (npaths == 1 && paths[0].convex) + { + call->type = GLNVG_CONVEXFILL; + call->triangleCount = 0; // Bounding box fill quad not needed for convex fill + } + + // Allocate vertices for all the paths. + maxverts = glnvg__maxVertCount(paths, npaths) + call->triangleCount; + offset = glnvg__allocVerts(gl, maxverts); + if (offset == -1) goto error; + + for (i = 0; i < npaths; i++) { + GLNVGpath* copy = &gl->paths[call->pathOffset + i]; + const NVGpath* path = &paths[i]; + memset(copy, 0, sizeof(GLNVGpath)); + if (path->nfill > 0) { + copy->fillOffset = offset; + copy->fillCount = path->nfill; + memcpy(&gl->verts[offset], path->fill, sizeof(NVGvertex) * path->nfill); + offset += path->nfill; + } + if (path->nstroke > 0) { + copy->strokeOffset = offset; + copy->strokeCount = path->nstroke; + memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke); + offset += path->nstroke; + } + } + + // Setup uniforms for draw calls + if (call->type == GLNVG_FILL) { + // Quad + call->triangleOffset = offset; + quad = &gl->verts[call->triangleOffset]; + glnvg__vset(&quad[0], bounds[2], bounds[3], 0.5f, 1.0f); + glnvg__vset(&quad[1], bounds[2], bounds[1], 0.5f, 1.0f); + glnvg__vset(&quad[2], bounds[0], bounds[3], 0.5f, 1.0f); + glnvg__vset(&quad[3], bounds[0], bounds[1], 0.5f, 1.0f); + + call->uniformOffset = glnvg__allocFragUniforms(gl, 2); + if (call->uniformOffset == -1) goto error; + // Simple shader for stencil + frag = nvg__fragUniformPtr(gl, call->uniformOffset); + memset(frag, 0, sizeof(*frag)); + frag->strokeThr = -1.0f; + frag->type = NSVG_SHADER_SIMPLE; + // Fill shader + glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, fringe, fringe, -1.0f); + } else { + call->uniformOffset = glnvg__allocFragUniforms(gl, 1); + if (call->uniformOffset == -1) goto error; + // Fill shader + glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, fringe, fringe, -1.0f); + } + + return; + +error: + // We get here if call alloc was ok, but something else is not. + // Roll back the last call to prevent drawing it. + if (gl->ncalls > 0) gl->ncalls--; +} + +static void glnvg__renderStroke(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, float fringe, + float strokeWidth, const NVGpath* paths, int npaths) +{ + GLNVGcontext* gl = (GLNVGcontext*)uptr; + GLNVGcall* call = glnvg__allocCall(gl); + int i, maxverts, offset; + + if (call == NULL) return; + + call->type = GLNVG_STROKE; + call->pathOffset = glnvg__allocPaths(gl, npaths); + if (call->pathOffset == -1) goto error; + call->pathCount = npaths; + call->image = paint->image; + call->blendFunc = glnvg__blendCompositeOperation(compositeOperation); + + // Allocate vertices for all the paths. + maxverts = glnvg__maxVertCount(paths, npaths); + offset = glnvg__allocVerts(gl, maxverts); + if (offset == -1) goto error; + + for (i = 0; i < npaths; i++) { + GLNVGpath* copy = &gl->paths[call->pathOffset + i]; + const NVGpath* path = &paths[i]; + memset(copy, 0, sizeof(GLNVGpath)); + if (path->nstroke) { + copy->strokeOffset = offset; + copy->strokeCount = path->nstroke; + memcpy(&gl->verts[offset], path->stroke, sizeof(NVGvertex) * path->nstroke); + offset += path->nstroke; + } + } + + if (gl->flags & NVG_STENCIL_STROKES) { + // Fill shader + call->uniformOffset = glnvg__allocFragUniforms(gl, 2); + if (call->uniformOffset == -1) goto error; + + glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f); + glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, strokeWidth, fringe, 1.0f - 0.5f/255.0f); + + } else { + // Fill shader + call->uniformOffset = glnvg__allocFragUniforms(gl, 1); + if (call->uniformOffset == -1) goto error; + glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe, -1.0f); + } + + return; + +error: + // We get here if call alloc was ok, but something else is not. + // Roll back the last call to prevent drawing it. + if (gl->ncalls > 0) gl->ncalls--; +} + +static void glnvg__renderTriangles(void* uptr, NVGpaint* paint, NVGcompositeOperationState compositeOperation, NVGscissor* scissor, + const NVGvertex* verts, int nverts, float fringe) +{ + GLNVGcontext* gl = (GLNVGcontext*)uptr; + GLNVGcall* call = glnvg__allocCall(gl); + GLNVGfragUniforms* frag; + + if (call == NULL) return; + + call->type = GLNVG_TRIANGLES; + call->image = paint->image; + call->blendFunc = glnvg__blendCompositeOperation(compositeOperation); + + // Allocate vertices for all the paths. + call->triangleOffset = glnvg__allocVerts(gl, nverts); + if (call->triangleOffset == -1) goto error; + call->triangleCount = nverts; + + memcpy(&gl->verts[call->triangleOffset], verts, sizeof(NVGvertex) * nverts); + + // Fill shader + call->uniformOffset = glnvg__allocFragUniforms(gl, 1); + if (call->uniformOffset == -1) goto error; + frag = nvg__fragUniformPtr(gl, call->uniformOffset); + glnvg__convertPaint(gl, frag, paint, scissor, 1.0f, fringe, -1.0f); + frag->type = NSVG_SHADER_IMG; + + return; + +error: + // We get here if call alloc was ok, but something else is not. + // Roll back the last call to prevent drawing it. + if (gl->ncalls > 0) gl->ncalls--; +} + +static void glnvg__renderDelete(void* uptr) +{ + GLNVGcontext* gl = (GLNVGcontext*)uptr; + int i; + if (gl == NULL) return; + + glnvg__deleteShader(&gl->shader); + +#if NANOVG_GL3 +#if NANOVG_GL_USE_UNIFORMBUFFER + if (gl->fragBuf != 0) + glDeleteBuffers(1, &gl->fragBuf); +#endif + if (gl->vertArr != 0) + glDeleteVertexArrays(1, &gl->vertArr); +#endif + if (gl->vertBuf != 0) + glDeleteBuffers(1, &gl->vertBuf); + + for (i = 0; i < gl->ntextures; i++) { + if (gl->textures[i].tex != 0 && (gl->textures[i].flags & NVG_IMAGE_NODELETE) == 0) + glDeleteTextures(1, &gl->textures[i].tex); + } + free(gl->textures); + + free(gl->paths); + free(gl->verts); + free(gl->uniforms); + free(gl->calls); + + free(gl); +} + + +#if defined NANOVG_GL2 +NVGcontext* nvgCreateGL2(int flags) +#elif defined NANOVG_GL3 +NVGcontext* nvgCreateGL3(int flags) +#elif defined NANOVG_GLES2 +NVGcontext* nvgCreateGLES2(int flags) +#elif defined NANOVG_GLES3 +NVGcontext* nvgCreateGLES3(int flags) +#endif +{ + NVGparams params; + NVGcontext* ctx = NULL; + GLNVGcontext* gl = (GLNVGcontext*)malloc(sizeof(GLNVGcontext)); + if (gl == NULL) goto error; + memset(gl, 0, sizeof(GLNVGcontext)); + + memset(¶ms, 0, sizeof(params)); + params.renderCreate = glnvg__renderCreate; + params.renderCreateTexture = glnvg__renderCreateTexture; + params.renderDeleteTexture = glnvg__renderDeleteTexture; + params.renderUpdateTexture = glnvg__renderUpdateTexture; + params.renderGetTextureSize = glnvg__renderGetTextureSize; + params.renderViewport = glnvg__renderViewport; + params.renderCancel = glnvg__renderCancel; + params.renderFlush = glnvg__renderFlush; + params.renderFill = glnvg__renderFill; + params.renderStroke = glnvg__renderStroke; + params.renderTriangles = glnvg__renderTriangles; + params.renderDelete = glnvg__renderDelete; + params.userPtr = gl; + params.edgeAntiAlias = flags & NVG_ANTIALIAS ? 1 : 0; + + gl->flags = flags; + + ctx = nvgCreateInternal(¶ms); + if (ctx == NULL) goto error; + + return ctx; + +error: + // 'gl' is freed by nvgDeleteInternal. + if (ctx != NULL) nvgDeleteInternal(ctx); + return NULL; +} + +#if defined NANOVG_GL2 +void nvgDeleteGL2(NVGcontext* ctx) +#elif defined NANOVG_GL3 +void nvgDeleteGL3(NVGcontext* ctx) +#elif defined NANOVG_GLES2 +void nvgDeleteGLES2(NVGcontext* ctx) +#elif defined NANOVG_GLES3 +void nvgDeleteGLES3(NVGcontext* ctx) +#endif +{ + nvgDeleteInternal(ctx); +} + +#if defined NANOVG_GL2 +int nvglCreateImageFromHandleGL2(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags) +#elif defined NANOVG_GL3 +int nvglCreateImageFromHandleGL3(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags) +#elif defined NANOVG_GLES2 +int nvglCreateImageFromHandleGLES2(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags) +#elif defined NANOVG_GLES3 +int nvglCreateImageFromHandleGLES3(NVGcontext* ctx, GLuint textureId, int w, int h, int imageFlags) +#endif +{ + GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(ctx)->userPtr; + GLNVGtexture* tex = glnvg__allocTexture(gl); + + if (tex == NULL) return 0; + + tex->type = NVG_TEXTURE_RGBA; + tex->tex = textureId; + tex->flags = imageFlags; + tex->width = w; + tex->height = h; + + return tex->id; +} + +#if defined NANOVG_GL2 +GLuint nvglImageHandleGL2(NVGcontext* ctx, int image) +#elif defined NANOVG_GL3 +GLuint nvglImageHandleGL3(NVGcontext* ctx, int image) +#elif defined NANOVG_GLES2 +GLuint nvglImageHandleGLES2(NVGcontext* ctx, int image) +#elif defined NANOVG_GLES3 +GLuint nvglImageHandleGLES3(NVGcontext* ctx, int image) +#endif +{ + GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(ctx)->userPtr; + GLNVGtexture* tex = glnvg__findTexture(gl, image); + return tex->tex; +} + +#endif /* USE_OPENGL */ + +#endif /* NANOVG_GL_IMPLEMENTATION */ diff --git a/troposphere/daybreak/nanovg/shaders/fill_aa_fsh.glsl b/troposphere/daybreak/nanovg/shaders/fill_aa_fsh.glsl new file mode 100644 index 000000000..01604450d --- /dev/null +++ b/troposphere/daybreak/nanovg/shaders/fill_aa_fsh.glsl @@ -0,0 +1,83 @@ +#version 460 + +layout(binding = 0) uniform sampler2D tex; + +layout(std140, binding = 0) uniform frag { + mat3 scissorMat; + mat3 paintMat; + vec4 innerCol; + vec4 outerCol; + vec2 scissorExt; + vec2 scissorScale; + vec2 extent; + float radius; + float feather; + float strokeMult; + float strokeThr; + int texType; + int type; +}; + +layout(location = 0) in vec2 ftcoord; +layout(location = 1) in vec2 fpos; +layout(location = 0) out vec4 outColor; + +float sdroundrect(vec2 pt, vec2 ext, float rad) { + vec2 ext2 = ext - vec2(rad,rad); + vec2 d = abs(pt) - ext2; + return min(max(d.x,d.y),0.0) + length(max(d,0.0)) - rad; +} + +// Scissoring +float scissorMask(vec2 p) { + vec2 sc = (abs((scissorMat * vec3(p,1.0)).xy) - scissorExt); + sc = vec2(0.5,0.5) - sc * scissorScale; + return clamp(sc.x,0.0,1.0) * clamp(sc.y,0.0,1.0); +} + +// Stroke - from [0..1] to clipped pyramid, where the slope is 1px. +float strokeMask() { + return min(1.0, (1.0-abs(ftcoord.x*2.0-1.0))*strokeMult) * min(1.0, ftcoord.y); +} + +void main(void) { + vec4 result; + float scissor = scissorMask(fpos); + float strokeAlpha = strokeMask(); + + if (strokeAlpha < strokeThr) discard; + + if (type == 0) { // Gradient + // Calculate gradient color using box gradient + vec2 pt = (paintMat * vec3(fpos,1.0)).xy; + float d = clamp((sdroundrect(pt, extent, radius) + feather*0.5) / feather, 0.0, 1.0); + vec4 color = mix(innerCol,outerCol,d); + // Combine alpha + color *= strokeAlpha * scissor; + result = color; + } else if (type == 1) { // Image + // Calculate color fron texture + vec2 pt = (paintMat * vec3(fpos,1.0)).xy / extent; + vec4 color = texture(tex, pt); + + if (texType == 1) color = vec4(color.xyz*color.w,color.w); + if (texType == 2) color = vec4(color.x); + // Apply color tint and alpha. + color *= innerCol; + // Combine alpha + color *= strokeAlpha * scissor; + result = color; + } else if (type == 2) { // Stencil fill + result = vec4(1,1,1,1); + } else if (type == 3) { // Textured tris + + vec4 color = texture(tex, ftcoord); + + if (texType == 1) color = vec4(color.xyz*color.w,color.w); + if (texType == 2) color = vec4(color.x); + color *= scissor; + result = color * innerCol; + } + + outColor = result; +}; \ No newline at end of file diff --git a/troposphere/daybreak/nanovg/shaders/fill_fsh.glsl b/troposphere/daybreak/nanovg/shaders/fill_fsh.glsl new file mode 100644 index 000000000..6b6e2cc80 --- /dev/null +++ b/troposphere/daybreak/nanovg/shaders/fill_fsh.glsl @@ -0,0 +1,76 @@ +#version 460 + +layout(binding = 0) uniform sampler2D tex; + +layout(std140, binding = 0) uniform frag { + mat3 scissorMat; + mat3 paintMat; + vec4 innerCol; + vec4 outerCol; + vec2 scissorExt; + vec2 scissorScale; + vec2 extent; + float radius; + float feather; + float strokeMult; + float strokeThr; + int texType; + int type; +}; + +layout(location = 0) in vec2 ftcoord; +layout(location = 1) in vec2 fpos; +layout(location = 0) out vec4 outColor; + +float sdroundrect(vec2 pt, vec2 ext, float rad) { + vec2 ext2 = ext - vec2(rad,rad); + vec2 d = abs(pt) - ext2; + return min(max(d.x,d.y),0.0) + length(max(d,0.0)) - rad; +} + +// Scissoring +float scissorMask(vec2 p) { + vec2 sc = (abs((scissorMat * vec3(p,1.0)).xy) - scissorExt); + sc = vec2(0.5,0.5) - sc * scissorScale; + return clamp(sc.x,0.0,1.0) * clamp(sc.y,0.0,1.0); +} + +void main(void) { + vec4 result; + float scissor = scissorMask(fpos); + float strokeAlpha = 1.0; + + if (type == 0) { // Gradient + // Calculate gradient color using box gradient + vec2 pt = (paintMat * vec3(fpos,1.0)).xy; + float d = clamp((sdroundrect(pt, extent, radius) + feather*0.5) / feather, 0.0, 1.0); + vec4 color = mix(innerCol,outerCol,d); + // Combine alpha + color *= strokeAlpha * scissor; + result = color; + } else if (type == 1) { // Image + // Calculate color fron texture + vec2 pt = (paintMat * vec3(fpos,1.0)).xy / extent; + vec4 color = texture(tex, pt); + + if (texType == 1) color = vec4(color.xyz*color.w,color.w); + if (texType == 2) color = vec4(color.x); + // Apply color tint and alpha. + color *= innerCol; + // Combine alpha + color *= strokeAlpha * scissor; + result = color; + } else if (type == 2) { // Stencil fill + result = vec4(1,1,1,1); + } else if (type == 3) { // Textured tris + + vec4 color = texture(tex, ftcoord); + + if (texType == 1) color = vec4(color.xyz*color.w,color.w); + if (texType == 2) color = vec4(color.x); + color *= scissor; + result = color * innerCol; + } + + outColor = result; +}; \ No newline at end of file diff --git a/troposphere/daybreak/nanovg/shaders/fill_vsh.glsl b/troposphere/daybreak/nanovg/shaders/fill_vsh.glsl new file mode 100644 index 000000000..1e711a71c --- /dev/null +++ b/troposphere/daybreak/nanovg/shaders/fill_vsh.glsl @@ -0,0 +1,17 @@ +#version 460 + +layout (location = 0) in vec2 vertex; +layout (location = 1) in vec2 tcoord; +layout (location = 0) out vec2 ftcoord; +layout (location = 1) out vec2 fpos; + +layout (std140, binding = 0) uniform View +{ + vec2 size; +} view; + +void main(void) { + ftcoord = tcoord; + fpos = vertex; + gl_Position = vec4(2.0*vertex.x/view.size.x - 1.0, 1.0 - 2.0*vertex.y/view.size.y, 0, 1); +}; \ No newline at end of file diff --git a/troposphere/daybreak/nanovg/source/dk_renderer.cpp b/troposphere/daybreak/nanovg/source/dk_renderer.cpp new file mode 100644 index 000000000..c9706a4de --- /dev/null +++ b/troposphere/daybreak/nanovg/source/dk_renderer.cpp @@ -0,0 +1,545 @@ +#include "dk_renderer.hpp" + +#include +#include +#include +#include +#include +#include + +#define GLM_FORCE_DEFAULT_ALIGNED_GENTYPES /* Enforces GLSL std140/std430 alignment rules for glm types. */ +#define GLM_FORCE_INTRINSICS /* Enables usage of SIMD CPU instructions (requiring the above as well). */ +#include + +namespace nvg { + + namespace { + + constexpr std::array VertexBufferState = { DkVtxBufferState{sizeof(NVGvertex), 0}, }; + + constexpr std::array VertexAttribState = { + DkVtxAttribState{0, 0, offsetof(NVGvertex, x), DkVtxAttribSize_2x32, DkVtxAttribType_Float, 0}, + DkVtxAttribState{0, 0, offsetof(NVGvertex, u), DkVtxAttribSize_2x32, DkVtxAttribType_Float, 0}, + }; + + struct View { + glm::vec2 size; + }; + + void UpdateImage(dk::Image &image, CMemPool &scratchPool, dk::Device device, dk::Queue transferQueue, int type, int x, int y, int w, int h, const u8 *data) { + /* Do not proceed if no data is provided upfront. */ + if (data == nullptr) { + return; + } + + /* Allocate memory from the pool for the image. */ + const size_t imageSize = type == NVG_TEXTURE_RGBA ? w * h * 4 : w * h; + CMemPool::Handle tempimgmem = scratchPool.allocate(imageSize, DK_IMAGE_LINEAR_STRIDE_ALIGNMENT); + memcpy(tempimgmem.getCpuAddr(), data, imageSize); + + dk::UniqueCmdBuf tempcmdbuf = dk::CmdBufMaker{device}.create(); + CMemPool::Handle tempcmdmem = scratchPool.allocate(DK_MEMBLOCK_ALIGNMENT); + tempcmdbuf.addMemory(tempcmdmem.getMemBlock(), tempcmdmem.getOffset(), tempcmdmem.getSize()); + + dk::ImageView imageView{image}; + tempcmdbuf.copyBufferToImage({ tempimgmem.getGpuAddr() }, imageView, { static_cast(x), static_cast(y), 0, static_cast(w), static_cast(h), 1 }); + + transferQueue.submitCommands(tempcmdbuf.finishList()); + transferQueue.waitIdle(); + + /* Destroy temp mem. */ + tempcmdmem.destroy(); + tempimgmem.destroy(); + } + + } + + Texture::Texture(int id) : m_id(id) { /* ... */ } + + Texture::~Texture() { + m_image_mem.destroy(); + } + + void Texture::Initialize(CMemPool &image_pool, CMemPool &scratch_pool, dk::Device device, dk::Queue queue, int type, int w, int h, int image_flags, const u8 *data) { + m_texture_descriptor = { + .width = w, + .height = h, + .type = type, + .flags = image_flags, + }; + + /* Create an image layout. */ + dk::ImageLayout layout; + auto layout_maker = dk::ImageLayoutMaker{device}.setFlags(0).setDimensions(w, h); + if (type == NVG_TEXTURE_RGBA) { + layout_maker.setFormat(DkImageFormat_RGBA8_Unorm); + } else { + layout_maker.setFormat(DkImageFormat_R8_Unorm); + } + layout_maker.initialize(layout); + + /* Initialize image. */ + m_image_mem = image_pool.allocate(layout.getSize(), layout.getAlignment()); + m_image.initialize(layout, m_image_mem.getMemBlock(), m_image_mem.getOffset()); + m_image_descriptor.initialize(m_image); + + /* Only update the image if the data isn't null. */ + if (data != nullptr) { + UpdateImage(m_image, scratch_pool, device, queue, type, 0, 0, w, h, data); + } + } + + int Texture::GetId() { + return m_id; + } + + const DKNVGtextureDescriptor &Texture::GetDescriptor() { + return m_texture_descriptor; + } + + dk::Image &Texture::GetImage() { + return m_image; + } + + dk::ImageDescriptor &Texture::GetImageDescriptor() { + return m_image_descriptor; + } + + DkRenderer::DkRenderer(unsigned int view_width, unsigned int view_height, dk::Device device, dk::Queue queue, CMemPool &image_mem_pool, CMemPool &code_mem_pool, CMemPool &data_mem_pool) : + m_view_width(view_width), m_view_height(view_height), m_device(device), m_queue(queue), m_image_mem_pool(image_mem_pool), m_code_mem_pool(code_mem_pool), m_data_mem_pool(data_mem_pool), m_image_descriptor_mappings({0}) + { + /* Create a dynamic command buffer and allocate memory for it. */ + m_dyn_cmd_buf = dk::CmdBufMaker{m_device}.create(); + m_dyn_cmd_mem.allocate(m_data_mem_pool, DynamicCmdSize); + + m_image_descriptor_set.allocate(m_data_mem_pool); + m_sampler_descriptor_set.allocate(m_data_mem_pool); + + m_view_uniform_buffer = m_data_mem_pool.allocate(sizeof(View), DK_UNIFORM_BUF_ALIGNMENT); + m_frag_uniform_buffer = m_data_mem_pool.allocate(sizeof(FragmentUniformSize), DK_UNIFORM_BUF_ALIGNMENT); + + /* Create and bind preset samplers. */ + dk::UniqueCmdBuf init_cmd_buf = dk::CmdBufMaker{m_device}.create(); + CMemPool::Handle init_cmd_mem = m_data_mem_pool.allocate(DK_MEMBLOCK_ALIGNMENT); + init_cmd_buf.addMemory(init_cmd_mem.getMemBlock(), init_cmd_mem.getOffset(), init_cmd_mem.getSize()); + + for (u8 i = 0; i < SamplerType_Total; i++) { + const DkFilter filter = (i & SamplerType_Nearest) ? DkFilter_Nearest : DkFilter_Linear; + const DkMipFilter mip_filter = (i & SamplerType_Nearest) ? DkMipFilter_Nearest : DkMipFilter_Linear; + const DkWrapMode u_wrap_mode = (i & SamplerType_RepeatX) ? DkWrapMode_Repeat : DkWrapMode_ClampToEdge; + const DkWrapMode v_wrap_mode = (i & SamplerType_RepeatY) ? DkWrapMode_Repeat : DkWrapMode_ClampToEdge; + + auto sampler = dk::Sampler{}; + auto sampler_descriptor = dk::SamplerDescriptor{}; + sampler.setFilter(filter, filter, (i & SamplerType_MipFilter) ? mip_filter : DkMipFilter_None); + sampler.setWrapMode(u_wrap_mode, v_wrap_mode); + sampler_descriptor.initialize(sampler); + m_sampler_descriptor_set.update(init_cmd_buf, i, sampler_descriptor); + } + + /* Flush the descriptor cache. */ + init_cmd_buf.barrier(DkBarrier_None, DkInvalidateFlags_Descriptors); + + m_sampler_descriptor_set.bindForSamplers(init_cmd_buf); + m_image_descriptor_set.bindForImages(init_cmd_buf); + + m_queue.submitCommands(init_cmd_buf.finishList()); + m_queue.waitIdle(); + + init_cmd_mem.destroy(); + init_cmd_buf.destroy(); + } + + DkRenderer::~DkRenderer() { + if (m_vertex_buffer) { + m_vertex_buffer->destroy(); + } + + m_view_uniform_buffer.destroy(); + m_frag_uniform_buffer.destroy(); + m_textures.clear(); + } + + int DkRenderer::AcquireImageDescriptor(std::shared_ptr texture, int image) { + int free_image_descriptor = m_last_image_descriptor + 1; + int mapping = 0; + + for (int desc = 0; desc <= m_last_image_descriptor; desc++) { + mapping = m_image_descriptor_mappings[desc]; + + /* We've found the image descriptor requested. */ + if (mapping == image) { + return desc; + } + + /* Update the free image descriptor. */ + if (mapping == 0 && free_image_descriptor == m_last_image_descriptor + 1) { + free_image_descriptor = desc; + } + } + + /* No descriptors are free. */ + if (free_image_descriptor >= static_cast(MaxImages)) { + return -1; + } + + /* Update descriptor sets. */ + m_image_descriptor_set.update(m_dyn_cmd_buf, free_image_descriptor, texture->GetImageDescriptor()); + + /* Flush the descriptor cache. */ + m_dyn_cmd_buf.barrier(DkBarrier_None, DkInvalidateFlags_Descriptors); + + /* Update the map. */ + m_image_descriptor_mappings[free_image_descriptor] = image; + m_last_image_descriptor = free_image_descriptor; + return free_image_descriptor; + } + + void DkRenderer::FreeImageDescriptor(int image) { + for (int desc = 0; desc <= m_last_image_descriptor; desc++) { + if (m_image_descriptor_mappings[desc] == image) { + m_image_descriptor_mappings[desc] = 0; + } + } + } + + void DkRenderer::UpdateVertexBuffer(const void *data, size_t size) { + /* Destroy the existing vertex buffer if it is too small. */ + if (m_vertex_buffer && m_vertex_buffer->getSize() < size) { + m_vertex_buffer->destroy(); + m_vertex_buffer.reset(); + } + + /* Create a new buffer if needed. */ + if (!m_vertex_buffer) { + m_vertex_buffer = m_data_mem_pool.allocate(size); + } + + /* Copy data to the vertex buffer if it exists. */ + if (m_vertex_buffer) { + memcpy(m_vertex_buffer->getCpuAddr(), data, size); + } + } + + void DkRenderer::SetUniforms(const DKNVGcontext &ctx, int offset, int image) { + m_dyn_cmd_buf.pushConstants(m_frag_uniform_buffer.getGpuAddr(), m_frag_uniform_buffer.getSize(), 0, ctx.fragSize, ctx.uniforms + offset); + m_dyn_cmd_buf.bindUniformBuffer(DkStage_Fragment, 0, m_frag_uniform_buffer.getGpuAddr(), m_frag_uniform_buffer.getSize()); + + /* Attempt to find a texture. */ + const auto texture = this->FindTexture(image); + if (texture == nullptr) { + return; + } + + /* Acquire an image descriptor. */ + const int image_desc_id = this->AcquireImageDescriptor(texture, image); + if (image_desc_id == -1) { + return; + } + + const int image_flags = texture->GetDescriptor().flags; + uint32_t sampler_id = 0; + + if (image_flags & NVG_IMAGE_GENERATE_MIPMAPS) sampler_id |= SamplerType_MipFilter; + if (image_flags & NVG_IMAGE_NEAREST) sampler_id |= SamplerType_Nearest; + if (image_flags & NVG_IMAGE_REPEATX) sampler_id |= SamplerType_RepeatX; + if (image_flags & NVG_IMAGE_REPEATY) sampler_id |= SamplerType_RepeatY; + + m_dyn_cmd_buf.bindTextures(DkStage_Fragment, 0, dkMakeTextureHandle(image_desc_id, sampler_id)); + } + + void DkRenderer::DrawFill(const DKNVGcontext &ctx, const DKNVGcall &call) { + DKNVGpath *paths = &ctx.paths[call.pathOffset]; + int npaths = call.pathCount; + + /* Set the stencils to be used. */ + m_dyn_cmd_buf.setStencil(DkFace_FrontAndBack, 0xFF, 0x0, 0xFF); + + /* Set the depth stencil state. */ + auto depth_stencil_state = dk::DepthStencilState{} + .setStencilTestEnable(true) + .setStencilFrontCompareOp(DkCompareOp_Always) + .setStencilFrontFailOp(DkStencilOp_Keep) + .setStencilFrontDepthFailOp(DkStencilOp_Keep) + .setStencilFrontPassOp(DkStencilOp_IncrWrap) + .setStencilBackCompareOp(DkCompareOp_Always) + .setStencilBackFailOp(DkStencilOp_Keep) + .setStencilBackDepthFailOp(DkStencilOp_Keep) + .setStencilBackPassOp(DkStencilOp_DecrWrap); + m_dyn_cmd_buf.bindDepthStencilState(depth_stencil_state); + + /* Configure for shape drawing. */ + m_dyn_cmd_buf.bindColorWriteState(dk::ColorWriteState{}.setMask(0, 0)); + this->SetUniforms(ctx, call.uniformOffset, 0); + m_dyn_cmd_buf.bindRasterizerState(dk::RasterizerState{}.setCullMode(DkFace_None)); + + /* Draw vertices. */ + for (int i = 0; i < npaths; i++) { + m_dyn_cmd_buf.draw(DkPrimitive_TriangleFan, paths[i].fillCount, 1, paths[i].fillOffset, 0); + } + + m_dyn_cmd_buf.bindColorWriteState(dk::ColorWriteState{}); + this->SetUniforms(ctx, call.uniformOffset + ctx.fragSize, call.image); + m_dyn_cmd_buf.bindRasterizerState(dk::RasterizerState{}); + + if (ctx.flags & NVG_ANTIALIAS) { + /* Configure stencil anti-aliasing. */ + depth_stencil_state + .setStencilFrontCompareOp(DkCompareOp_Equal) + .setStencilFrontFailOp(DkStencilOp_Keep) + .setStencilFrontDepthFailOp(DkStencilOp_Keep) + .setStencilFrontPassOp(DkStencilOp_Keep) + .setStencilBackCompareOp(DkCompareOp_Equal) + .setStencilBackFailOp(DkStencilOp_Keep) + .setStencilBackDepthFailOp(DkStencilOp_Keep) + .setStencilBackPassOp(DkStencilOp_Keep); + m_dyn_cmd_buf.bindDepthStencilState(depth_stencil_state); + + /* Draw fringes. */ + for (int i = 0; i < npaths; i++) { + m_dyn_cmd_buf.draw(DkPrimitive_TriangleStrip, paths[i].strokeCount, 1, paths[i].strokeOffset, 0); + } + } + + /* Configure and draw fill. */ + depth_stencil_state + .setStencilFrontCompareOp(DkCompareOp_NotEqual) + .setStencilFrontFailOp(DkStencilOp_Zero) + .setStencilFrontDepthFailOp(DkStencilOp_Zero) + .setStencilFrontPassOp(DkStencilOp_Zero) + .setStencilBackCompareOp(DkCompareOp_NotEqual) + .setStencilBackFailOp(DkStencilOp_Zero) + .setStencilBackDepthFailOp(DkStencilOp_Zero) + .setStencilBackPassOp(DkStencilOp_Zero); + m_dyn_cmd_buf.bindDepthStencilState(depth_stencil_state); + + m_dyn_cmd_buf.draw(DkPrimitive_TriangleStrip, call.triangleCount, 1, call.triangleOffset, 0); + + /* Reset the depth stencil state to default. */ + m_dyn_cmd_buf.bindDepthStencilState(dk::DepthStencilState{}); + } + + void DkRenderer::DrawConvexFill(const DKNVGcontext &ctx, const DKNVGcall &call) { + DKNVGpath *paths = &ctx.paths[call.pathOffset]; + int npaths = call.pathCount; + + this->SetUniforms(ctx, call.uniformOffset, call.image); + + for (int i = 0; i < npaths; i++) { + m_dyn_cmd_buf.draw(DkPrimitive_TriangleFan, paths[i].fillCount, 1, paths[i].fillOffset, 0); + + /* Draw fringes. */ + if (paths[i].strokeCount > 0) { + m_dyn_cmd_buf.draw(DkPrimitive_TriangleStrip, paths[i].strokeCount, 1, paths[i].strokeOffset, 0); + } + } + } + + void DkRenderer::DrawStroke(const DKNVGcontext &ctx, const DKNVGcall &call) { + DKNVGpath* paths = &ctx.paths[call.pathOffset]; + int npaths = call.pathCount; + + if (ctx.flags & NVG_STENCIL_STROKES) { + /* Set the stencil to be used. */ + m_dyn_cmd_buf.setStencil(DkFace_Front, 0xFF, 0x0, 0xFF); + + /* Configure for filling the stroke base without overlap. */ + auto depth_stencil_state = dk::DepthStencilState{} + .setStencilTestEnable(true) + .setStencilFrontCompareOp(DkCompareOp_Equal) + .setStencilFrontFailOp(DkStencilOp_Keep) + .setStencilFrontDepthFailOp(DkStencilOp_Keep) + .setStencilFrontPassOp(DkStencilOp_Incr); + m_dyn_cmd_buf.bindDepthStencilState(depth_stencil_state); + this->SetUniforms(ctx, call.uniformOffset + ctx.fragSize, call.image); + + /* Draw vertices. */ + for (int i = 0; i < npaths; i++) { + m_dyn_cmd_buf.draw(DkPrimitive_TriangleStrip, paths[i].strokeCount, 1, paths[i].strokeOffset, 0); + } + + /* Configure for drawing anti-aliased pixels. */ + depth_stencil_state.setStencilFrontPassOp(DkStencilOp_Keep); + m_dyn_cmd_buf.bindDepthStencilState(depth_stencil_state); + this->SetUniforms(ctx, call.uniformOffset, call.image); + + /* Draw vertices. */ + for (int i = 0; i < npaths; i++) { + m_dyn_cmd_buf.draw(DkPrimitive_TriangleStrip, paths[i].strokeCount, 1, paths[i].strokeOffset, 0); + } + + /* Configure for clearing the stencil buffer. */ + depth_stencil_state + .setStencilTestEnable(true) + .setStencilFrontCompareOp(DkCompareOp_Always) + .setStencilFrontFailOp(DkStencilOp_Zero) + .setStencilFrontDepthFailOp(DkStencilOp_Zero) + .setStencilFrontPassOp(DkStencilOp_Zero); + m_dyn_cmd_buf.bindDepthStencilState(depth_stencil_state); + + /* Draw vertices. */ + for (int i = 0; i < npaths; i++) { + m_dyn_cmd_buf.draw(DkPrimitive_TriangleStrip, paths[i].strokeCount, 1, paths[i].strokeOffset, 0); + } + + /* Reset the depth stencil state to default. */ + m_dyn_cmd_buf.bindDepthStencilState(dk::DepthStencilState{}); + } else { + this->SetUniforms(ctx, call.uniformOffset, call.image); + + /* Draw vertices. */ + for (int i = 0; i < npaths; i++) { + m_dyn_cmd_buf.draw(DkPrimitive_TriangleStrip, paths[i].strokeCount, 1, paths[i].strokeOffset, 0); + } + } + } + + void DkRenderer::DrawTriangles(const DKNVGcontext &ctx, const DKNVGcall &call) { + this->SetUniforms(ctx, call.uniformOffset, call.image); + m_dyn_cmd_buf.draw(DkPrimitive_Triangles, call.triangleCount, 1, call.triangleOffset, 0); + } + + int DkRenderer::Create(DKNVGcontext &ctx) { + m_vertex_shader.load(m_code_mem_pool, "romfs:/shaders/fill_vsh.dksh"); + + /* Load the appropriate fragment shader depending on whether AA is enabled. */ + if (ctx.flags & NVG_ANTIALIAS) { + m_fragment_shader.load(m_code_mem_pool, "romfs:/shaders/fill_aa_fsh.dksh"); + } else { + m_fragment_shader.load(m_code_mem_pool, "romfs:/shaders/fill_fsh.dksh"); + } + + /* Set the size of fragment uniforms. */ + ctx.fragSize = FragmentUniformSize; + return 1; + } + + std::shared_ptr DkRenderer::FindTexture(int id) { + for (auto it = m_textures.begin(); it != m_textures.end(); it++) { + if ((*it)->GetId() == id) { + return *it; + } + } + + return nullptr; + } + + int DkRenderer::CreateTexture(const DKNVGcontext &ctx, int type, int w, int h, int image_flags, const unsigned char* data) { + const auto texture_id = m_next_texture_id++; + auto texture = std::make_shared(texture_id); + texture->Initialize(m_image_mem_pool, m_data_mem_pool, m_device, m_queue, type, w, h, image_flags, data); + m_textures.push_back(texture); + return texture->GetId(); + } + + int DkRenderer::DeleteTexture(const DKNVGcontext &ctx, int image) { + bool found = false; + + for (auto it = m_textures.begin(); it != m_textures.end();) { + /* Remove textures with the given id. */ + if ((*it)->GetId() == image) { + it = m_textures.erase(it); + found = true; + } else { + ++it; + } + } + + /* Free any used image descriptors. */ + this->FreeImageDescriptor(image); + return found; + } + + int DkRenderer::UpdateTexture(const DKNVGcontext &ctx, int image, int x, int y, int w, int h, const unsigned char *data) { + const std::shared_ptr texture = this->FindTexture(image); + + /* Could not find a texture. */ + if (texture == nullptr) { + return 0; + } + + const DKNVGtextureDescriptor &tex_desc = texture->GetDescriptor(); + if (tex_desc.type == NVG_TEXTURE_RGBA) { + data += y * tex_desc.width*4; + } else { + data += y * tex_desc.width; + } + x = 0; + w = tex_desc.width; + + UpdateImage(texture->GetImage(), m_data_mem_pool, m_device, m_queue, tex_desc.type, x, y, w, h, data); + return 1; + } + + int DkRenderer::GetTextureSize(const DKNVGcontext &ctx, int image, int *w, int *h) { + const auto descriptor = this->GetTextureDescriptor(ctx, image); + if (descriptor == nullptr) { + return 0; + } + + *w = descriptor->width; + *h = descriptor->height; + return 1; + } + + const DKNVGtextureDescriptor *DkRenderer::GetTextureDescriptor(const DKNVGcontext &ctx, int id) { + for (auto it = m_textures.begin(); it != m_textures.end(); it++) { + if ((*it)->GetId() == id) { + return &(*it)->GetDescriptor(); + } + } + + return nullptr; + } + + void DkRenderer::Flush(DKNVGcontext &ctx) { + if (ctx.ncalls > 0) { + /* Prepare dynamic command buffer. */ + m_dyn_cmd_mem.begin(m_dyn_cmd_buf); + + /* Update buffers with data. */ + this->UpdateVertexBuffer(ctx.verts, ctx.nverts * sizeof(NVGvertex)); + + /* Enable blending. */ + m_dyn_cmd_buf.bindColorState(dk::ColorState{}.setBlendEnable(0, true)); + + /* Setup. */ + m_dyn_cmd_buf.bindShaders(DkStageFlag_GraphicsMask, { m_vertex_shader, m_fragment_shader }); + m_dyn_cmd_buf.bindVtxAttribState(VertexAttribState); + m_dyn_cmd_buf.bindVtxBufferState(VertexBufferState); + m_dyn_cmd_buf.bindVtxBuffer(0, m_vertex_buffer->getGpuAddr(), m_vertex_buffer->getSize()); + + /* Push the view size to the uniform buffer and bind it. */ + const auto view = View{glm::vec2{m_view_width, m_view_height}}; + m_dyn_cmd_buf.pushConstants(m_view_uniform_buffer.getGpuAddr(), m_view_uniform_buffer.getSize(), 0, sizeof(view), &view); + m_dyn_cmd_buf.bindUniformBuffer(DkStage_Vertex, 0, m_view_uniform_buffer.getGpuAddr(), m_view_uniform_buffer.getSize()); + + /* Iterate over calls. */ + for (int i = 0; i < ctx.ncalls; i++) { + const DKNVGcall &call = ctx.calls[i]; + + /* Perform blending. */ + m_dyn_cmd_buf.bindBlendStates(0, { dk::BlendState{}.setFactors(static_cast(call.blendFunc.srcRGB), static_cast(call.blendFunc.dstRGB), static_cast(call.blendFunc.srcAlpha), static_cast(call.blendFunc.dstRGB)) }); + + if (call.type == DKNVG_FILL) { + this->DrawFill(ctx, call); + } else if (call.type == DKNVG_CONVEXFILL) { + this->DrawConvexFill(ctx, call); + } else if (call.type == DKNVG_STROKE) { + this->DrawStroke(ctx, call); + } else if (call.type == DKNVG_TRIANGLES) { + this->DrawTriangles(ctx, call); + } + } + + m_queue.submitCommands(m_dyn_cmd_mem.end(m_dyn_cmd_buf)); + } + + /* Reset calls. */ + ctx.nverts = 0; + ctx.npaths = 0; + ctx.ncalls = 0; + ctx.nuniforms = 0; + } + +} \ No newline at end of file diff --git a/troposphere/daybreak/nanovg/source/framework/CApplication.cpp b/troposphere/daybreak/nanovg/source/framework/CApplication.cpp new file mode 100644 index 000000000..34786a3d8 --- /dev/null +++ b/troposphere/daybreak/nanovg/source/framework/CApplication.cpp @@ -0,0 +1,69 @@ +/* +** Sample Framework for deko3d Applications +** CApplication.cpp: Wrapper class containing common application boilerplate +*/ +#include "CApplication.h" + +CApplication::CApplication() +{ + appletLockExit(); + appletSetFocusHandlingMode(AppletFocusHandlingMode_NoSuspend); +} + +CApplication::~CApplication() +{ + appletSetFocusHandlingMode(AppletFocusHandlingMode_SuspendHomeSleep); + appletUnlockExit(); +} + +void CApplication::run() +{ + u64 tick_ref = armGetSystemTick(); + u64 tick_saved = tick_ref; + bool focused = appletGetFocusState() == AppletFocusState_Focused; + + onOperationMode(appletGetOperationMode()); + + for (;;) + { + u32 msg = 0; + Result rc = appletGetMessage(&msg); + if (R_SUCCEEDED(rc)) + { + bool should_close = !appletProcessMessage(msg); + if (should_close) + return; + + switch (msg) + { + case AppletMessage_FocusStateChanged: + { + bool old_focused = focused; + AppletFocusState state = appletGetFocusState(); + focused = state == AppletFocusState_Focused; + + onFocusState(state); + if (focused == old_focused) + break; + if (focused) + { + appletSetFocusHandlingMode(AppletFocusHandlingMode_NoSuspend); + tick_ref += armGetSystemTick() - tick_saved; + } + else + { + tick_saved = armGetSystemTick(); + appletSetFocusHandlingMode(AppletFocusHandlingMode_SuspendHomeSleepNotify); + } + break; + } + case AppletMessage_OperationModeChanged: + onOperationMode(appletGetOperationMode()); + break; + } + } + + if (focused && !onFrame(armTicksToNs(armGetSystemTick() - tick_ref))) + break; + } +} diff --git a/troposphere/daybreak/nanovg/source/framework/CExternalImage.cpp b/troposphere/daybreak/nanovg/source/framework/CExternalImage.cpp new file mode 100644 index 000000000..37b6a26b0 --- /dev/null +++ b/troposphere/daybreak/nanovg/source/framework/CExternalImage.cpp @@ -0,0 +1,37 @@ +/* +** Sample Framework for deko3d Applications +** CExternalImage.cpp: Utility class for loading images from the filesystem +*/ +#include "CExternalImage.h" +#include "FileLoader.h" + +bool CExternalImage::load(CMemPool& imagePool, CMemPool& scratchPool, dk::Device device, dk::Queue transferQueue, const char* path, uint32_t width, uint32_t height, DkImageFormat format, uint32_t flags) +{ + CMemPool::Handle tempimgmem = LoadFile(scratchPool, path, DK_IMAGE_LINEAR_STRIDE_ALIGNMENT); + if (!tempimgmem) + return false; + + dk::UniqueCmdBuf tempcmdbuf = dk::CmdBufMaker{device}.create(); + CMemPool::Handle tempcmdmem = scratchPool.allocate(DK_MEMBLOCK_ALIGNMENT); + tempcmdbuf.addMemory(tempcmdmem.getMemBlock(), tempcmdmem.getOffset(), tempcmdmem.getSize()); + + dk::ImageLayout layout; + dk::ImageLayoutMaker{device} + .setFlags(flags) + .setFormat(format) + .setDimensions(width, height) + .initialize(layout); + + m_mem = imagePool.allocate(layout.getSize(), layout.getAlignment()); + m_image.initialize(layout, m_mem.getMemBlock(), m_mem.getOffset()); + m_descriptor.initialize(m_image); + + dk::ImageView imageView{m_image}; + tempcmdbuf.copyBufferToImage({ tempimgmem.getGpuAddr() }, imageView, { 0, 0, 0, width, height, 1 }); + transferQueue.submitCommands(tempcmdbuf.finishList()); + transferQueue.waitIdle(); + + tempcmdmem.destroy(); + tempimgmem.destroy(); + return true; +} diff --git a/troposphere/daybreak/nanovg/source/framework/CIntrusiveTree.cpp b/troposphere/daybreak/nanovg/source/framework/CIntrusiveTree.cpp new file mode 100644 index 000000000..9f21b63db --- /dev/null +++ b/troposphere/daybreak/nanovg/source/framework/CIntrusiveTree.cpp @@ -0,0 +1,214 @@ +/* +** Sample Framework for deko3d Applications +** CIntrusiveTree.cpp: Intrusive red-black tree helper class +*/ +#include "CIntrusiveTree.h" + +// This red-black tree implementation is mostly based on mtheall's work, +// which can be found here: +// https://github.com/smealum/ctrulib/tree/master/libctru/source/util/rbtree + +void CIntrusiveTreeBase::rotate(N* node, N::Leaf leaf) +{ + N *tmp = node->child(leaf); + N *parent = node->getParent(); + + node->child(leaf) = tmp->child(!leaf); + if (tmp->child(!leaf)) + tmp->child(!leaf)->setParent(node); + + tmp->child(!leaf) = node; + tmp->setParent(parent); + + if (parent) + { + if (node == parent->child(!leaf)) + parent->child(!leaf) = tmp; + else + parent->child(leaf) = tmp; + } + else + m_root = tmp; + + node->setParent(tmp); +} + +void CIntrusiveTreeBase::recolor(N* parent, N* node) +{ + N *sibling; + + while ((!node || node->isBlack()) && node != m_root) + { + N::Leaf leaf = node == parent->left() ? N::Right : N::Left; + sibling = parent->child(leaf); + + if (sibling->isRed()) + { + sibling->setBlack(); + parent->setRed(); + rotate(parent, leaf); + sibling = parent->child(leaf); + } + + N::Color clr[2]; + clr[N::Left] = sibling->left() ? sibling->left()->getColor() : N::Black; + clr[N::Right] = sibling->right() ? sibling->right()->getColor() : N::Black; + + if (clr[N::Left] == N::Black && clr[N::Right] == N::Black) + { + sibling->setRed(); + node = parent; + parent = node->getParent(); + } + else + { + if (clr[leaf] == N::Black) + { + sibling->child(!leaf)->setBlack(); + sibling->setRed(); + rotate(sibling, !leaf); + sibling = parent->child(leaf); + } + + sibling->setColor(parent->getColor()); + parent->setBlack(); + sibling->child(leaf)->setBlack(); + rotate(parent, leaf); + + node = m_root; + } + } + + if (node) + node->setBlack(); +} + +auto CIntrusiveTreeBase::walk(N* node, N::Leaf leaf) const -> N* +{ + if (node->child(leaf)) + { + node = node->child(leaf); + while (node->child(!leaf)) + node = node->child(!leaf); + } + else + { + N *parent = node->getParent(); + while (parent && node == parent->child(leaf)) + { + node = parent; + parent = node->getParent(); + } + node = parent; + } + + return node; +} + +void CIntrusiveTreeBase::insert(N* node, N* parent) +{ + node->left() = node->right() = nullptr; + node->setParent(parent); + node->setRed(); + + while ((parent = node->getParent()) && parent->isRed()) + { + N *grandparent = parent->getParent(); + N::Leaf leaf = parent == grandparent->left() ? N::Right : N::Left; + N *uncle = grandparent->child(leaf); + + if (uncle && uncle->isRed()) + { + uncle->setBlack(); + parent->setBlack(); + grandparent->setRed(); + + node = grandparent; + } + else + { + if (parent->child(leaf) == node) + { + rotate(parent, leaf); + + N* tmp = parent; + parent = node; + node = tmp; + } + + parent->setBlack(); + grandparent->setRed(); + rotate(grandparent, !leaf); + } + } + + m_root->setBlack(); +} + +void CIntrusiveTreeBase::remove(N* node) +{ + N::Color color; + N *child, *parent; + + if (node->left() && node->right()) + { + N *old = node; + + node = node->right(); + while (node->left()) + node = node->left(); + + parent = old->getParent(); + if (parent) + { + if (parent->left() == old) + parent->left() = node; + else + parent->right() = node; + } + else + m_root = node; + + child = node->right(); + parent = node->getParent(); + color = node->getColor(); + + if (parent == old) + parent = node; + else + { + if (child) + child->setParent(parent); + parent->left() = child; + + node->right() = old->right(); + old->right()->setParent(node); + } + + node->setParent(old->getParent()); + node->setColor(old->getColor()); + node->left() = old->left(); + old->left()->setParent(node); + } + else + { + child = node->left() ? node->right() : node->left(); + parent = node->getParent(); + color = node->getColor(); + + if (child) + child->setParent(parent); + if (parent) + { + if (parent->left() == node) + parent->left() = child; + else + parent->right() = child; + } + else + m_root = child; + } + + if (color == N::Black) + recolor(parent, child); +} diff --git a/troposphere/daybreak/nanovg/source/framework/CMemPool.cpp b/troposphere/daybreak/nanovg/source/framework/CMemPool.cpp new file mode 100644 index 000000000..fb3bd10ce --- /dev/null +++ b/troposphere/daybreak/nanovg/source/framework/CMemPool.cpp @@ -0,0 +1,175 @@ +/* +** Sample Framework for deko3d Applications +** CMemPool.cpp: Pooled dynamic memory allocation manager class +*/ +#include "CMemPool.h" + +inline auto CMemPool::_newSlice() -> Slice* +{ + Slice* ret = m_sliceHeap.pop(); + if (!ret) ret = (Slice*)::malloc(sizeof(Slice)); + return ret; +} + +inline void CMemPool::_deleteSlice(Slice* s) +{ + if (!s) return; + m_sliceHeap.add(s); +} + +CMemPool::~CMemPool() +{ + m_memMap.iterate([](Slice* s) { ::free(s); }); + m_sliceHeap.iterate([](Slice* s) { ::free(s); }); + m_blocks.iterate([](Block* blk) { + blk->m_obj.destroy(); + ::free(blk); + }); +} + +auto CMemPool::allocate(uint32_t size, uint32_t alignment) -> Handle +{ + if (!size) return nullptr; + if (alignment & (alignment - 1)) return nullptr; + size = (size + alignment - 1) &~ (alignment - 1); +#ifdef DEBUG_CMEMPOOL + printf("Allocating size=%u alignment=0x%x\n", size, alignment); + { + Slice* temp = /*m_freeList*/m_memMap.first(); + while (temp) + { + printf("-- blk %p | 0x%08x-0x%08x | %s used\n", temp->m_block, temp->m_start, temp->m_end, temp->m_pool ? " " : "not"); + temp = /*m_freeList*/m_memMap.next(temp); + } + } +#endif + + uint32_t start_offset = 0; + uint32_t end_offset = 0; + Slice* slice = m_freeList.find(size, decltype(m_freeList)::LowerBound); + while (slice) + { +#ifdef DEBUG_CMEMPOOL + printf(" * Checking slice 0x%x - 0x%x\n", slice->m_start, slice->m_end); +#endif + start_offset = (slice->m_start + alignment - 1) &~ (alignment - 1); + end_offset = start_offset + size; + if (end_offset <= slice->m_end) + break; + slice = m_freeList.next(slice); + } + + if (!slice) + { + Block* blk = (Block*)::malloc(sizeof(Block)); + if (!blk) + return nullptr; + + uint32_t unusableSize = (m_flags & DkMemBlockFlags_Code) ? DK_SHADER_CODE_UNUSABLE_SIZE : 0; + uint32_t blkSize = m_blockSize - unusableSize; + blkSize = size > blkSize ? size : blkSize; + blkSize = (blkSize + unusableSize + DK_MEMBLOCK_ALIGNMENT - 1) &~ (DK_MEMBLOCK_ALIGNMENT - 1); +#ifdef DEBUG_CMEMPOOL + printf(" ! Allocating block of size 0x%x\n", blkSize); +#endif + blk->m_obj = dk::MemBlockMaker{m_dev, blkSize}.setFlags(m_flags).create(); + if (!blk->m_obj) + { + ::free(blk); + return nullptr; + } + + slice = _newSlice(); + if (!slice) + { + blk->m_obj.destroy(); + ::free(blk); + return nullptr; + } + + slice->m_pool = nullptr; + slice->m_block = blk; + slice->m_start = 0; + slice->m_end = blkSize - unusableSize; + m_memMap.add(slice); + + blk->m_cpuAddr = blk->m_obj.getCpuAddr(); + blk->m_gpuAddr = blk->m_obj.getGpuAddr(); + m_blocks.add(blk); + + start_offset = 0; + end_offset = size; + } + else + { +#ifdef DEBUG_CMEMPOOL + printf(" * found it\n"); +#endif + m_freeList.remove(slice); + } + + if (start_offset != slice->m_start) + { + Slice* t = _newSlice(); + if (!t) goto _bad; + t->m_pool = nullptr; + t->m_block = slice->m_block; + t->m_start = slice->m_start; + t->m_end = start_offset; +#ifdef DEBUG_CMEMPOOL + printf("-> subdivide left: %08x-%08x\n", t->m_start, t->m_end); +#endif + m_memMap.addBefore(slice, t); + m_freeList.insert(t, true); + slice->m_start = start_offset; + } + + if (end_offset != slice->m_end) + { + Slice* t = _newSlice(); + if (!t) goto _bad; + t->m_pool = nullptr; + t->m_block = slice->m_block; + t->m_start = end_offset; + t->m_end = slice->m_end; +#ifdef DEBUG_CMEMPOOL + printf("-> subdivide right: %08x-%08x\n", t->m_start, t->m_end); +#endif + m_memMap.addAfter(slice, t); + m_freeList.insert(t, true); + slice->m_end = end_offset; + } + + slice->m_pool = this; + return slice; + +_bad: + m_freeList.insert(slice, true); + return nullptr; +} + +void CMemPool::_destroy(Slice* slice) +{ + slice->m_pool = nullptr; + + Slice* left = m_memMap.prev(slice); + Slice* right = m_memMap.next(slice); + + if (left && left->canCoalesce(*slice)) + { + slice->m_start = left->m_start; + m_freeList.remove(left); + m_memMap.remove(left); + _deleteSlice(left); + } + + if (right && slice->canCoalesce(*right)) + { + slice->m_end = right->m_end; + m_freeList.remove(right); + m_memMap.remove(right); + _deleteSlice(right); + } + + m_freeList.insert(slice, true); +} diff --git a/troposphere/daybreak/nanovg/source/framework/CShader.cpp b/troposphere/daybreak/nanovg/source/framework/CShader.cpp new file mode 100644 index 000000000..6c5361c24 --- /dev/null +++ b/troposphere/daybreak/nanovg/source/framework/CShader.cpp @@ -0,0 +1,62 @@ +/* +** Sample Framework for deko3d Applications +** CShader.cpp: Utility class for loading shaders from the filesystem +*/ +#include "CShader.h" + +struct DkshHeader +{ + uint32_t magic; // DKSH_MAGIC + uint32_t header_sz; // sizeof(DkshHeader) + uint32_t control_sz; + uint32_t code_sz; + uint32_t programs_off; + uint32_t num_programs; +}; + +bool CShader::load(CMemPool& pool, const char* path) +{ + FILE* f; + DkshHeader hdr; + void* ctrlmem; + + m_codemem.destroy(); + + f = fopen(path, "rb"); + if (!f) return false; + + if (!fread(&hdr, sizeof(hdr), 1, f)) + goto _fail0; + + ctrlmem = malloc(hdr.control_sz); + if (!ctrlmem) + goto _fail0; + + rewind(f); + if (!fread(ctrlmem, hdr.control_sz, 1, f)) + goto _fail1; + + m_codemem = pool.allocate(hdr.code_sz, DK_SHADER_CODE_ALIGNMENT); + if (!m_codemem) + goto _fail1; + + if (!fread(m_codemem.getCpuAddr(), hdr.code_sz, 1, f)) + goto _fail2; + + dk::ShaderMaker{m_codemem.getMemBlock(), m_codemem.getOffset()} + .setControl(ctrlmem) + .setProgramId(0) + .initialize(m_shader); + + free(ctrlmem); + fclose(f); + return true; + +_fail2: + m_codemem.destroy(); +_fail1: + free(ctrlmem); +_fail0: + fclose(f); + return false; +} diff --git a/troposphere/daybreak/nanovg/source/framework/FileLoader.cpp b/troposphere/daybreak/nanovg/source/framework/FileLoader.cpp new file mode 100644 index 000000000..a9651bf95 --- /dev/null +++ b/troposphere/daybreak/nanovg/source/framework/FileLoader.cpp @@ -0,0 +1,27 @@ +/* +** Sample Framework for deko3d Applications +** FileLoader.cpp: Helpers for loading data from the filesystem directly into GPU memory +*/ +#include "FileLoader.h" + +CMemPool::Handle LoadFile(CMemPool& pool, const char* path, uint32_t alignment) +{ + FILE *f = fopen(path, "rb"); + if (!f) return nullptr; + + fseek(f, 0, SEEK_END); + uint32_t fsize = ftell(f); + rewind(f); + + CMemPool::Handle mem = pool.allocate(fsize, alignment); + if (!mem) + { + fclose(f); + return nullptr; + } + + fread(mem.getCpuAddr(), fsize, 1, f); + fclose(f); + + return mem; +} diff --git a/troposphere/daybreak/nanovg/source/framework/LICENSE b/troposphere/daybreak/nanovg/source/framework/LICENSE new file mode 100644 index 000000000..183debc3e --- /dev/null +++ b/troposphere/daybreak/nanovg/source/framework/LICENSE @@ -0,0 +1,18 @@ +Copyright (C) 2020 fincs + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any +damages arising from the use of this software. + +Permission is granted to anyone to use this software for any +purpose, including commercial applications, and to alter it and +redistribute it freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you + must not claim that you wrote the original software. If you use + this software in a product, an acknowledgment in the product + documentation would be appreciated but is not required. +2. Altered source versions must be plainly marked as such, and + must not be misrepresented as being the original software. +3. This notice may not be removed or altered from any source + distribution. diff --git a/troposphere/daybreak/nanovg/source/nanovg.c b/troposphere/daybreak/nanovg/source/nanovg.c new file mode 100644 index 000000000..aa863547b --- /dev/null +++ b/troposphere/daybreak/nanovg/source/nanovg.c @@ -0,0 +1,2926 @@ +// +// Copyright (c) 2013 Mikko Mononen memon@inside.org +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// + +#include +#include +#include +#include + +#include "nanovg.h" +#define FONTSTASH_IMPLEMENTATION +#include "fontstash.h" +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" + +#ifdef _MSC_VER +#pragma warning(disable: 4100) // unreferenced formal parameter +#pragma warning(disable: 4127) // conditional expression is constant +#pragma warning(disable: 4204) // nonstandard extension used : non-constant aggregate initializer +#pragma warning(disable: 4706) // assignment within conditional expression +#endif + +#define NVG_INIT_FONTIMAGE_SIZE 512 +#define NVG_MAX_FONTIMAGE_SIZE 2048 +#define NVG_MAX_FONTIMAGES 4 + +#define NVG_INIT_COMMANDS_SIZE 256 +#define NVG_INIT_POINTS_SIZE 128 +#define NVG_INIT_PATHS_SIZE 16 +#define NVG_INIT_VERTS_SIZE 256 +#define NVG_MAX_STATES 32 + +#define NVG_KAPPA90 0.5522847493f // Length proportional to radius of a cubic bezier handle for 90deg arcs. + +#define NVG_COUNTOF(arr) (sizeof(arr) / sizeof(0[arr])) + + +enum NVGcommands { + NVG_MOVETO = 0, + NVG_LINETO = 1, + NVG_BEZIERTO = 2, + NVG_CLOSE = 3, + NVG_WINDING = 4, +}; + +enum NVGpointFlags +{ + NVG_PT_CORNER = 0x01, + NVG_PT_LEFT = 0x02, + NVG_PT_BEVEL = 0x04, + NVG_PR_INNERBEVEL = 0x08, +}; + +struct NVGstate { + NVGcompositeOperationState compositeOperation; + int shapeAntiAlias; + NVGpaint fill; + NVGpaint stroke; + float strokeWidth; + float miterLimit; + int lineJoin; + int lineCap; + float alpha; + float xform[6]; + NVGscissor scissor; + float fontSize; + float letterSpacing; + float lineHeight; + float fontBlur; + int textAlign; + int fontId; +}; +typedef struct NVGstate NVGstate; + +struct NVGpoint { + float x,y; + float dx, dy; + float len; + float dmx, dmy; + unsigned char flags; +}; +typedef struct NVGpoint NVGpoint; + +struct NVGpathCache { + NVGpoint* points; + int npoints; + int cpoints; + NVGpath* paths; + int npaths; + int cpaths; + NVGvertex* verts; + int nverts; + int cverts; + float bounds[4]; +}; +typedef struct NVGpathCache NVGpathCache; + +struct NVGcontext { + NVGparams params; + float* commands; + int ccommands; + int ncommands; + float commandx, commandy; + NVGstate states[NVG_MAX_STATES]; + int nstates; + NVGpathCache* cache; + float tessTol; + float distTol; + float fringeWidth; + float devicePxRatio; + struct FONScontext* fs; + int fontImages[NVG_MAX_FONTIMAGES]; + int fontImageIdx; + int drawCallCount; + int fillTriCount; + int strokeTriCount; + int textTriCount; +}; + +static float nvg__sqrtf(float a) { return sqrtf(a); } +static float nvg__modf(float a, float b) { return fmodf(a, b); } +static float nvg__sinf(float a) { return sinf(a); } +static float nvg__cosf(float a) { return cosf(a); } +static float nvg__tanf(float a) { return tanf(a); } +static float nvg__atan2f(float a,float b) { return atan2f(a, b); } +static float nvg__acosf(float a) { return acosf(a); } + +static int nvg__mini(int a, int b) { return a < b ? a : b; } +static int nvg__maxi(int a, int b) { return a > b ? a : b; } +static int nvg__clampi(int a, int mn, int mx) { return a < mn ? mn : (a > mx ? mx : a); } +static float nvg__minf(float a, float b) { return a < b ? a : b; } +static float nvg__maxf(float a, float b) { return a > b ? a : b; } +static float nvg__absf(float a) { return a >= 0.0f ? a : -a; } +static float nvg__signf(float a) { return a >= 0.0f ? 1.0f : -1.0f; } +static float nvg__clampf(float a, float mn, float mx) { return a < mn ? mn : (a > mx ? mx : a); } +static float nvg__cross(float dx0, float dy0, float dx1, float dy1) { return dx1*dy0 - dx0*dy1; } + +static float nvg__normalize(float *x, float* y) +{ + float d = nvg__sqrtf((*x)*(*x) + (*y)*(*y)); + if (d > 1e-6f) { + float id = 1.0f / d; + *x *= id; + *y *= id; + } + return d; +} + + +static void nvg__deletePathCache(NVGpathCache* c) +{ + if (c == NULL) return; + if (c->points != NULL) free(c->points); + if (c->paths != NULL) free(c->paths); + if (c->verts != NULL) free(c->verts); + free(c); +} + +static NVGpathCache* nvg__allocPathCache(void) +{ + NVGpathCache* c = (NVGpathCache*)malloc(sizeof(NVGpathCache)); + if (c == NULL) goto error; + memset(c, 0, sizeof(NVGpathCache)); + + c->points = (NVGpoint*)malloc(sizeof(NVGpoint)*NVG_INIT_POINTS_SIZE); + if (!c->points) goto error; + c->npoints = 0; + c->cpoints = NVG_INIT_POINTS_SIZE; + + c->paths = (NVGpath*)malloc(sizeof(NVGpath)*NVG_INIT_PATHS_SIZE); + if (!c->paths) goto error; + c->npaths = 0; + c->cpaths = NVG_INIT_PATHS_SIZE; + + c->verts = (NVGvertex*)malloc(sizeof(NVGvertex)*NVG_INIT_VERTS_SIZE); + if (!c->verts) goto error; + c->nverts = 0; + c->cverts = NVG_INIT_VERTS_SIZE; + + return c; +error: + nvg__deletePathCache(c); + return NULL; +} + +static void nvg__setDevicePixelRatio(NVGcontext* ctx, float ratio) +{ + ctx->tessTol = 0.25f / ratio; + ctx->distTol = 0.01f / ratio; + ctx->fringeWidth = 1.0f / ratio; + ctx->devicePxRatio = ratio; +} + +static NVGcompositeOperationState nvg__compositeOperationState(int op) +{ + int sfactor, dfactor; + + if (op == NVG_SOURCE_OVER) + { + sfactor = NVG_ONE; + dfactor = NVG_ONE_MINUS_SRC_ALPHA; + } + else if (op == NVG_SOURCE_IN) + { + sfactor = NVG_DST_ALPHA; + dfactor = NVG_ZERO; + } + else if (op == NVG_SOURCE_OUT) + { + sfactor = NVG_ONE_MINUS_DST_ALPHA; + dfactor = NVG_ZERO; + } + else if (op == NVG_ATOP) + { + sfactor = NVG_DST_ALPHA; + dfactor = NVG_ONE_MINUS_SRC_ALPHA; + } + else if (op == NVG_DESTINATION_OVER) + { + sfactor = NVG_ONE_MINUS_DST_ALPHA; + dfactor = NVG_ONE; + } + else if (op == NVG_DESTINATION_IN) + { + sfactor = NVG_ZERO; + dfactor = NVG_SRC_ALPHA; + } + else if (op == NVG_DESTINATION_OUT) + { + sfactor = NVG_ZERO; + dfactor = NVG_ONE_MINUS_SRC_ALPHA; + } + else if (op == NVG_DESTINATION_ATOP) + { + sfactor = NVG_ONE_MINUS_DST_ALPHA; + dfactor = NVG_SRC_ALPHA; + } + else if (op == NVG_LIGHTER) + { + sfactor = NVG_ONE; + dfactor = NVG_ONE; + } + else if (op == NVG_COPY) + { + sfactor = NVG_ONE; + dfactor = NVG_ZERO; + } + else if (op == NVG_XOR) + { + sfactor = NVG_ONE_MINUS_DST_ALPHA; + dfactor = NVG_ONE_MINUS_SRC_ALPHA; + } + else + { + sfactor = NVG_ONE; + dfactor = NVG_ZERO; + } + + NVGcompositeOperationState state; + state.srcRGB = sfactor; + state.dstRGB = dfactor; + state.srcAlpha = sfactor; + state.dstAlpha = dfactor; + return state; +} + +static NVGstate* nvg__getState(NVGcontext* ctx) +{ + return &ctx->states[ctx->nstates-1]; +} + +NVGcontext* nvgCreateInternal(NVGparams* params) +{ + FONSparams fontParams; + NVGcontext* ctx = (NVGcontext*)malloc(sizeof(NVGcontext)); + int i; + if (ctx == NULL) goto error; + memset(ctx, 0, sizeof(NVGcontext)); + + ctx->params = *params; + for (i = 0; i < NVG_MAX_FONTIMAGES; i++) + ctx->fontImages[i] = 0; + + ctx->commands = (float*)malloc(sizeof(float)*NVG_INIT_COMMANDS_SIZE); + if (!ctx->commands) goto error; + ctx->ncommands = 0; + ctx->ccommands = NVG_INIT_COMMANDS_SIZE; + + ctx->cache = nvg__allocPathCache(); + if (ctx->cache == NULL) goto error; + + nvgSave(ctx); + nvgReset(ctx); + + nvg__setDevicePixelRatio(ctx, 1.0f); + + if (ctx->params.renderCreate(ctx->params.userPtr) == 0) goto error; + + // Init font rendering + memset(&fontParams, 0, sizeof(fontParams)); + fontParams.width = NVG_INIT_FONTIMAGE_SIZE; + fontParams.height = NVG_INIT_FONTIMAGE_SIZE; + fontParams.flags = FONS_ZERO_TOPLEFT; + fontParams.renderCreate = NULL; + fontParams.renderUpdate = NULL; + fontParams.renderDraw = NULL; + fontParams.renderDelete = NULL; + fontParams.userPtr = NULL; + ctx->fs = fonsCreateInternal(&fontParams); + if (ctx->fs == NULL) goto error; + + // Create font texture + ctx->fontImages[0] = ctx->params.renderCreateTexture(ctx->params.userPtr, NVG_TEXTURE_ALPHA, fontParams.width, fontParams.height, 0, NULL); + if (ctx->fontImages[0] == 0) goto error; + ctx->fontImageIdx = 0; + + return ctx; + +error: + nvgDeleteInternal(ctx); + return 0; +} + +NVGparams* nvgInternalParams(NVGcontext* ctx) +{ + return &ctx->params; +} + +void nvgDeleteInternal(NVGcontext* ctx) +{ + int i; + if (ctx == NULL) return; + if (ctx->commands != NULL) free(ctx->commands); + if (ctx->cache != NULL) nvg__deletePathCache(ctx->cache); + + if (ctx->fs) + fonsDeleteInternal(ctx->fs); + + for (i = 0; i < NVG_MAX_FONTIMAGES; i++) { + if (ctx->fontImages[i] != 0) { + nvgDeleteImage(ctx, ctx->fontImages[i]); + ctx->fontImages[i] = 0; + } + } + + if (ctx->params.renderDelete != NULL) + ctx->params.renderDelete(ctx->params.userPtr); + + free(ctx); +} + +void nvgBeginFrame(NVGcontext* ctx, float windowWidth, float windowHeight, float devicePixelRatio) +{ +/* printf("Tris: draws:%d fill:%d stroke:%d text:%d TOT:%d\n", + ctx->drawCallCount, ctx->fillTriCount, ctx->strokeTriCount, ctx->textTriCount, + ctx->fillTriCount+ctx->strokeTriCount+ctx->textTriCount);*/ + + ctx->nstates = 0; + nvgSave(ctx); + nvgReset(ctx); + + nvg__setDevicePixelRatio(ctx, devicePixelRatio); + + ctx->params.renderViewport(ctx->params.userPtr, windowWidth, windowHeight, devicePixelRatio); + + ctx->drawCallCount = 0; + ctx->fillTriCount = 0; + ctx->strokeTriCount = 0; + ctx->textTriCount = 0; +} + +void nvgCancelFrame(NVGcontext* ctx) +{ + ctx->params.renderCancel(ctx->params.userPtr); +} + +void nvgEndFrame(NVGcontext* ctx) +{ + ctx->params.renderFlush(ctx->params.userPtr); + if (ctx->fontImageIdx != 0) { + int fontImage = ctx->fontImages[ctx->fontImageIdx]; + int i, j, iw, ih; + // delete images that smaller than current one + if (fontImage == 0) + return; + nvgImageSize(ctx, fontImage, &iw, &ih); + for (i = j = 0; i < ctx->fontImageIdx; i++) { + if (ctx->fontImages[i] != 0) { + int nw, nh; + nvgImageSize(ctx, ctx->fontImages[i], &nw, &nh); + if (nw < iw || nh < ih) + nvgDeleteImage(ctx, ctx->fontImages[i]); + else + ctx->fontImages[j++] = ctx->fontImages[i]; + } + } + // make current font image to first + ctx->fontImages[j++] = ctx->fontImages[0]; + ctx->fontImages[0] = fontImage; + ctx->fontImageIdx = 0; + // clear all images after j + for (i = j; i < NVG_MAX_FONTIMAGES; i++) + ctx->fontImages[i] = 0; + } +} + +NVGcolor nvgRGB(unsigned char r, unsigned char g, unsigned char b) +{ + return nvgRGBA(r,g,b,255); +} + +NVGcolor nvgRGBf(float r, float g, float b) +{ + return nvgRGBAf(r,g,b,1.0f); +} + +NVGcolor nvgRGBA(unsigned char r, unsigned char g, unsigned char b, unsigned char a) +{ + NVGcolor color; + // Use longer initialization to suppress warning. + color.r = r / 255.0f; + color.g = g / 255.0f; + color.b = b / 255.0f; + color.a = a / 255.0f; + return color; +} + +NVGcolor nvgRGBAf(float r, float g, float b, float a) +{ + NVGcolor color; + // Use longer initialization to suppress warning. + color.r = r; + color.g = g; + color.b = b; + color.a = a; + return color; +} + +NVGcolor nvgTransRGBA(NVGcolor c, unsigned char a) +{ + c.a = a / 255.0f; + return c; +} + +NVGcolor nvgTransRGBAf(NVGcolor c, float a) +{ + c.a = a; + return c; +} + +NVGcolor nvgLerpRGBA(NVGcolor c0, NVGcolor c1, float u) +{ + int i; + float oneminu; + NVGcolor cint = {{{0}}}; + + u = nvg__clampf(u, 0.0f, 1.0f); + oneminu = 1.0f - u; + for( i = 0; i <4; i++ ) + { + cint.rgba[i] = c0.rgba[i] * oneminu + c1.rgba[i] * u; + } + + return cint; +} + +NVGcolor nvgHSL(float h, float s, float l) +{ + return nvgHSLA(h,s,l,255); +} + +static float nvg__hue(float h, float m1, float m2) +{ + if (h < 0) h += 1; + if (h > 1) h -= 1; + if (h < 1.0f/6.0f) + return m1 + (m2 - m1) * h * 6.0f; + else if (h < 3.0f/6.0f) + return m2; + else if (h < 4.0f/6.0f) + return m1 + (m2 - m1) * (2.0f/3.0f - h) * 6.0f; + return m1; +} + +NVGcolor nvgHSLA(float h, float s, float l, unsigned char a) +{ + float m1, m2; + NVGcolor col; + h = nvg__modf(h, 1.0f); + if (h < 0.0f) h += 1.0f; + s = nvg__clampf(s, 0.0f, 1.0f); + l = nvg__clampf(l, 0.0f, 1.0f); + m2 = l <= 0.5f ? (l * (1 + s)) : (l + s - l * s); + m1 = 2 * l - m2; + col.r = nvg__clampf(nvg__hue(h + 1.0f/3.0f, m1, m2), 0.0f, 1.0f); + col.g = nvg__clampf(nvg__hue(h, m1, m2), 0.0f, 1.0f); + col.b = nvg__clampf(nvg__hue(h - 1.0f/3.0f, m1, m2), 0.0f, 1.0f); + col.a = a/255.0f; + return col; +} + +void nvgTransformIdentity(float* t) +{ + t[0] = 1.0f; t[1] = 0.0f; + t[2] = 0.0f; t[3] = 1.0f; + t[4] = 0.0f; t[5] = 0.0f; +} + +void nvgTransformTranslate(float* t, float tx, float ty) +{ + t[0] = 1.0f; t[1] = 0.0f; + t[2] = 0.0f; t[3] = 1.0f; + t[4] = tx; t[5] = ty; +} + +void nvgTransformScale(float* t, float sx, float sy) +{ + t[0] = sx; t[1] = 0.0f; + t[2] = 0.0f; t[3] = sy; + t[4] = 0.0f; t[5] = 0.0f; +} + +void nvgTransformRotate(float* t, float a) +{ + float cs = nvg__cosf(a), sn = nvg__sinf(a); + t[0] = cs; t[1] = sn; + t[2] = -sn; t[3] = cs; + t[4] = 0.0f; t[5] = 0.0f; +} + +void nvgTransformSkewX(float* t, float a) +{ + t[0] = 1.0f; t[1] = 0.0f; + t[2] = nvg__tanf(a); t[3] = 1.0f; + t[4] = 0.0f; t[5] = 0.0f; +} + +void nvgTransformSkewY(float* t, float a) +{ + t[0] = 1.0f; t[1] = nvg__tanf(a); + t[2] = 0.0f; t[3] = 1.0f; + t[4] = 0.0f; t[5] = 0.0f; +} + +void nvgTransformMultiply(float* t, const float* s) +{ + float t0 = t[0] * s[0] + t[1] * s[2]; + float t2 = t[2] * s[0] + t[3] * s[2]; + float t4 = t[4] * s[0] + t[5] * s[2] + s[4]; + t[1] = t[0] * s[1] + t[1] * s[3]; + t[3] = t[2] * s[1] + t[3] * s[3]; + t[5] = t[4] * s[1] + t[5] * s[3] + s[5]; + t[0] = t0; + t[2] = t2; + t[4] = t4; +} + +void nvgTransformPremultiply(float* t, const float* s) +{ + float s2[6]; + memcpy(s2, s, sizeof(float)*6); + nvgTransformMultiply(s2, t); + memcpy(t, s2, sizeof(float)*6); +} + +int nvgTransformInverse(float* inv, const float* t) +{ + double invdet, det = (double)t[0] * t[3] - (double)t[2] * t[1]; + if (det > -1e-6 && det < 1e-6) { + nvgTransformIdentity(inv); + return 0; + } + invdet = 1.0 / det; + inv[0] = (float)(t[3] * invdet); + inv[2] = (float)(-t[2] * invdet); + inv[4] = (float)(((double)t[2] * t[5] - (double)t[3] * t[4]) * invdet); + inv[1] = (float)(-t[1] * invdet); + inv[3] = (float)(t[0] * invdet); + inv[5] = (float)(((double)t[1] * t[4] - (double)t[0] * t[5]) * invdet); + return 1; +} + +void nvgTransformPoint(float* dx, float* dy, const float* t, float sx, float sy) +{ + *dx = sx*t[0] + sy*t[2] + t[4]; + *dy = sx*t[1] + sy*t[3] + t[5]; +} + +float nvgDegToRad(float deg) +{ + return deg / 180.0f * NVG_PI; +} + +float nvgRadToDeg(float rad) +{ + return rad / NVG_PI * 180.0f; +} + +static void nvg__setPaintColor(NVGpaint* p, NVGcolor color) +{ + memset(p, 0, sizeof(*p)); + nvgTransformIdentity(p->xform); + p->radius = 0.0f; + p->feather = 1.0f; + p->innerColor = color; + p->outerColor = color; +} + + +// State handling +void nvgSave(NVGcontext* ctx) +{ + if (ctx->nstates >= NVG_MAX_STATES) + return; + if (ctx->nstates > 0) + memcpy(&ctx->states[ctx->nstates], &ctx->states[ctx->nstates-1], sizeof(NVGstate)); + ctx->nstates++; +} + +void nvgRestore(NVGcontext* ctx) +{ + if (ctx->nstates <= 1) + return; + ctx->nstates--; +} + +void nvgReset(NVGcontext* ctx) +{ + NVGstate* state = nvg__getState(ctx); + memset(state, 0, sizeof(*state)); + + nvg__setPaintColor(&state->fill, nvgRGBA(255,255,255,255)); + nvg__setPaintColor(&state->stroke, nvgRGBA(0,0,0,255)); + state->compositeOperation = nvg__compositeOperationState(NVG_SOURCE_OVER); + state->shapeAntiAlias = 1; + state->strokeWidth = 1.0f; + state->miterLimit = 10.0f; + state->lineCap = NVG_BUTT; + state->lineJoin = NVG_MITER; + state->alpha = 1.0f; + nvgTransformIdentity(state->xform); + + state->scissor.extent[0] = -1.0f; + state->scissor.extent[1] = -1.0f; + + state->fontSize = 16.0f; + state->letterSpacing = 0.0f; + state->lineHeight = 1.0f; + state->fontBlur = 0.0f; + state->textAlign = NVG_ALIGN_LEFT | NVG_ALIGN_BASELINE; + state->fontId = 0; +} + +// State setting +void nvgShapeAntiAlias(NVGcontext* ctx, int enabled) +{ + NVGstate* state = nvg__getState(ctx); + state->shapeAntiAlias = enabled; +} + +void nvgStrokeWidth(NVGcontext* ctx, float width) +{ + NVGstate* state = nvg__getState(ctx); + state->strokeWidth = width; +} + +void nvgMiterLimit(NVGcontext* ctx, float limit) +{ + NVGstate* state = nvg__getState(ctx); + state->miterLimit = limit; +} + +void nvgLineCap(NVGcontext* ctx, int cap) +{ + NVGstate* state = nvg__getState(ctx); + state->lineCap = cap; +} + +void nvgLineJoin(NVGcontext* ctx, int join) +{ + NVGstate* state = nvg__getState(ctx); + state->lineJoin = join; +} + +void nvgGlobalAlpha(NVGcontext* ctx, float alpha) +{ + NVGstate* state = nvg__getState(ctx); + state->alpha = alpha; +} + +void nvgTransform(NVGcontext* ctx, float a, float b, float c, float d, float e, float f) +{ + NVGstate* state = nvg__getState(ctx); + float t[6] = { a, b, c, d, e, f }; + nvgTransformPremultiply(state->xform, t); +} + +void nvgResetTransform(NVGcontext* ctx) +{ + NVGstate* state = nvg__getState(ctx); + nvgTransformIdentity(state->xform); +} + +void nvgTranslate(NVGcontext* ctx, float x, float y) +{ + NVGstate* state = nvg__getState(ctx); + float t[6]; + nvgTransformTranslate(t, x,y); + nvgTransformPremultiply(state->xform, t); +} + +void nvgRotate(NVGcontext* ctx, float angle) +{ + NVGstate* state = nvg__getState(ctx); + float t[6]; + nvgTransformRotate(t, angle); + nvgTransformPremultiply(state->xform, t); +} + +void nvgSkewX(NVGcontext* ctx, float angle) +{ + NVGstate* state = nvg__getState(ctx); + float t[6]; + nvgTransformSkewX(t, angle); + nvgTransformPremultiply(state->xform, t); +} + +void nvgSkewY(NVGcontext* ctx, float angle) +{ + NVGstate* state = nvg__getState(ctx); + float t[6]; + nvgTransformSkewY(t, angle); + nvgTransformPremultiply(state->xform, t); +} + +void nvgScale(NVGcontext* ctx, float x, float y) +{ + NVGstate* state = nvg__getState(ctx); + float t[6]; + nvgTransformScale(t, x,y); + nvgTransformPremultiply(state->xform, t); +} + +void nvgCurrentTransform(NVGcontext* ctx, float* xform) +{ + NVGstate* state = nvg__getState(ctx); + if (xform == NULL) return; + memcpy(xform, state->xform, sizeof(float)*6); +} + +void nvgStrokeColor(NVGcontext* ctx, NVGcolor color) +{ + NVGstate* state = nvg__getState(ctx); + nvg__setPaintColor(&state->stroke, color); +} + +void nvgStrokePaint(NVGcontext* ctx, NVGpaint paint) +{ + NVGstate* state = nvg__getState(ctx); + state->stroke = paint; + nvgTransformMultiply(state->stroke.xform, state->xform); +} + +void nvgFillColor(NVGcontext* ctx, NVGcolor color) +{ + NVGstate* state = nvg__getState(ctx); + nvg__setPaintColor(&state->fill, color); +} + +void nvgFillPaint(NVGcontext* ctx, NVGpaint paint) +{ + NVGstate* state = nvg__getState(ctx); + state->fill = paint; + nvgTransformMultiply(state->fill.xform, state->xform); +} + +int nvgCreateImage(NVGcontext* ctx, const char* filename, int imageFlags) +{ + int w, h, n, image; + unsigned char* img; + stbi_set_unpremultiply_on_load(1); + stbi_convert_iphone_png_to_rgb(1); + img = stbi_load(filename, &w, &h, &n, 4); + if (img == NULL) { +// printf("Failed to load %s - %s\n", filename, stbi_failure_reason()); + return 0; + } + image = nvgCreateImageRGBA(ctx, w, h, imageFlags, img); + stbi_image_free(img); + return image; +} + +int nvgCreateImageMem(NVGcontext* ctx, int imageFlags, unsigned char* data, int ndata) +{ + int w, h, n, image; + unsigned char* img = stbi_load_from_memory(data, ndata, &w, &h, &n, 4); + if (img == NULL) { +// printf("Failed to load %s - %s\n", filename, stbi_failure_reason()); + return 0; + } + image = nvgCreateImageRGBA(ctx, w, h, imageFlags, img); + stbi_image_free(img); + return image; +} + +int nvgCreateImageRGBA(NVGcontext* ctx, int w, int h, int imageFlags, const unsigned char* data) +{ + return ctx->params.renderCreateTexture(ctx->params.userPtr, NVG_TEXTURE_RGBA, w, h, imageFlags, data); +} + +void nvgUpdateImage(NVGcontext* ctx, int image, const unsigned char* data) +{ + int w, h; + ctx->params.renderGetTextureSize(ctx->params.userPtr, image, &w, &h); + ctx->params.renderUpdateTexture(ctx->params.userPtr, image, 0,0, w,h, data); +} + +void nvgImageSize(NVGcontext* ctx, int image, int* w, int* h) +{ + ctx->params.renderGetTextureSize(ctx->params.userPtr, image, w, h); +} + +void nvgDeleteImage(NVGcontext* ctx, int image) +{ + ctx->params.renderDeleteTexture(ctx->params.userPtr, image); +} + +NVGpaint nvgLinearGradient(NVGcontext* ctx, + float sx, float sy, float ex, float ey, + NVGcolor icol, NVGcolor ocol) +{ + NVGpaint p; + float dx, dy, d; + const float large = 1e5; + NVG_NOTUSED(ctx); + memset(&p, 0, sizeof(p)); + + // Calculate transform aligned to the line + dx = ex - sx; + dy = ey - sy; + d = sqrtf(dx*dx + dy*dy); + if (d > 0.0001f) { + dx /= d; + dy /= d; + } else { + dx = 0; + dy = 1; + } + + p.xform[0] = dy; p.xform[1] = -dx; + p.xform[2] = dx; p.xform[3] = dy; + p.xform[4] = sx - dx*large; p.xform[5] = sy - dy*large; + + p.extent[0] = large; + p.extent[1] = large + d*0.5f; + + p.radius = 0.0f; + + p.feather = nvg__maxf(1.0f, d); + + p.innerColor = icol; + p.outerColor = ocol; + + return p; +} + +NVGpaint nvgRadialGradient(NVGcontext* ctx, + float cx, float cy, float inr, float outr, + NVGcolor icol, NVGcolor ocol) +{ + NVGpaint p; + float r = (inr+outr)*0.5f; + float f = (outr-inr); + NVG_NOTUSED(ctx); + memset(&p, 0, sizeof(p)); + + nvgTransformIdentity(p.xform); + p.xform[4] = cx; + p.xform[5] = cy; + + p.extent[0] = r; + p.extent[1] = r; + + p.radius = r; + + p.feather = nvg__maxf(1.0f, f); + + p.innerColor = icol; + p.outerColor = ocol; + + return p; +} + +NVGpaint nvgBoxGradient(NVGcontext* ctx, + float x, float y, float w, float h, float r, float f, + NVGcolor icol, NVGcolor ocol) +{ + NVGpaint p; + NVG_NOTUSED(ctx); + memset(&p, 0, sizeof(p)); + + nvgTransformIdentity(p.xform); + p.xform[4] = x+w*0.5f; + p.xform[5] = y+h*0.5f; + + p.extent[0] = w*0.5f; + p.extent[1] = h*0.5f; + + p.radius = r; + + p.feather = nvg__maxf(1.0f, f); + + p.innerColor = icol; + p.outerColor = ocol; + + return p; +} + + +NVGpaint nvgImagePattern(NVGcontext* ctx, + float cx, float cy, float w, float h, float angle, + int image, float alpha) +{ + NVGpaint p; + NVG_NOTUSED(ctx); + memset(&p, 0, sizeof(p)); + + nvgTransformRotate(p.xform, angle); + p.xform[4] = cx; + p.xform[5] = cy; + + p.extent[0] = w; + p.extent[1] = h; + + p.image = image; + + p.innerColor = p.outerColor = nvgRGBAf(1,1,1,alpha); + + return p; +} + +// Scissoring +void nvgScissor(NVGcontext* ctx, float x, float y, float w, float h) +{ + NVGstate* state = nvg__getState(ctx); + + w = nvg__maxf(0.0f, w); + h = nvg__maxf(0.0f, h); + + nvgTransformIdentity(state->scissor.xform); + state->scissor.xform[4] = x+w*0.5f; + state->scissor.xform[5] = y+h*0.5f; + nvgTransformMultiply(state->scissor.xform, state->xform); + + state->scissor.extent[0] = w*0.5f; + state->scissor.extent[1] = h*0.5f; +} + +static void nvg__isectRects(float* dst, + float ax, float ay, float aw, float ah, + float bx, float by, float bw, float bh) +{ + float minx = nvg__maxf(ax, bx); + float miny = nvg__maxf(ay, by); + float maxx = nvg__minf(ax+aw, bx+bw); + float maxy = nvg__minf(ay+ah, by+bh); + dst[0] = minx; + dst[1] = miny; + dst[2] = nvg__maxf(0.0f, maxx - minx); + dst[3] = nvg__maxf(0.0f, maxy - miny); +} + +void nvgIntersectScissor(NVGcontext* ctx, float x, float y, float w, float h) +{ + NVGstate* state = nvg__getState(ctx); + float pxform[6], invxorm[6]; + float rect[4]; + float ex, ey, tex, tey; + + // If no previous scissor has been set, set the scissor as current scissor. + if (state->scissor.extent[0] < 0) { + nvgScissor(ctx, x, y, w, h); + return; + } + + // Transform the current scissor rect into current transform space. + // If there is difference in rotation, this will be approximation. + memcpy(pxform, state->scissor.xform, sizeof(float)*6); + ex = state->scissor.extent[0]; + ey = state->scissor.extent[1]; + nvgTransformInverse(invxorm, state->xform); + nvgTransformMultiply(pxform, invxorm); + tex = ex*nvg__absf(pxform[0]) + ey*nvg__absf(pxform[2]); + tey = ex*nvg__absf(pxform[1]) + ey*nvg__absf(pxform[3]); + + // Intersect rects. + nvg__isectRects(rect, pxform[4]-tex,pxform[5]-tey,tex*2,tey*2, x,y,w,h); + + nvgScissor(ctx, rect[0], rect[1], rect[2], rect[3]); +} + +void nvgResetScissor(NVGcontext* ctx) +{ + NVGstate* state = nvg__getState(ctx); + memset(state->scissor.xform, 0, sizeof(state->scissor.xform)); + state->scissor.extent[0] = -1.0f; + state->scissor.extent[1] = -1.0f; +} + +// Global composite operation. +void nvgGlobalCompositeOperation(NVGcontext* ctx, int op) +{ + NVGstate* state = nvg__getState(ctx); + state->compositeOperation = nvg__compositeOperationState(op); +} + +void nvgGlobalCompositeBlendFunc(NVGcontext* ctx, int sfactor, int dfactor) +{ + nvgGlobalCompositeBlendFuncSeparate(ctx, sfactor, dfactor, sfactor, dfactor); +} + +void nvgGlobalCompositeBlendFuncSeparate(NVGcontext* ctx, int srcRGB, int dstRGB, int srcAlpha, int dstAlpha) +{ + NVGcompositeOperationState op; + op.srcRGB = srcRGB; + op.dstRGB = dstRGB; + op.srcAlpha = srcAlpha; + op.dstAlpha = dstAlpha; + + NVGstate* state = nvg__getState(ctx); + state->compositeOperation = op; +} + +static int nvg__ptEquals(float x1, float y1, float x2, float y2, float tol) +{ + float dx = x2 - x1; + float dy = y2 - y1; + return dx*dx + dy*dy < tol*tol; +} + +static float nvg__distPtSeg(float x, float y, float px, float py, float qx, float qy) +{ + float pqx, pqy, dx, dy, d, t; + pqx = qx-px; + pqy = qy-py; + dx = x-px; + dy = y-py; + d = pqx*pqx + pqy*pqy; + t = pqx*dx + pqy*dy; + if (d > 0) t /= d; + if (t < 0) t = 0; + else if (t > 1) t = 1; + dx = px + t*pqx - x; + dy = py + t*pqy - y; + return dx*dx + dy*dy; +} + +static void nvg__appendCommands(NVGcontext* ctx, float* vals, int nvals) +{ + NVGstate* state = nvg__getState(ctx); + int i; + + if (ctx->ncommands+nvals > ctx->ccommands) { + float* commands; + int ccommands = ctx->ncommands+nvals + ctx->ccommands/2; + commands = (float*)realloc(ctx->commands, sizeof(float)*ccommands); + if (commands == NULL) return; + ctx->commands = commands; + ctx->ccommands = ccommands; + } + + if ((int)vals[0] != NVG_CLOSE && (int)vals[0] != NVG_WINDING) { + ctx->commandx = vals[nvals-2]; + ctx->commandy = vals[nvals-1]; + } + + // transform commands + i = 0; + while (i < nvals) { + int cmd = (int)vals[i]; + switch (cmd) { + case NVG_MOVETO: + nvgTransformPoint(&vals[i+1],&vals[i+2], state->xform, vals[i+1],vals[i+2]); + i += 3; + break; + case NVG_LINETO: + nvgTransformPoint(&vals[i+1],&vals[i+2], state->xform, vals[i+1],vals[i+2]); + i += 3; + break; + case NVG_BEZIERTO: + nvgTransformPoint(&vals[i+1],&vals[i+2], state->xform, vals[i+1],vals[i+2]); + nvgTransformPoint(&vals[i+3],&vals[i+4], state->xform, vals[i+3],vals[i+4]); + nvgTransformPoint(&vals[i+5],&vals[i+6], state->xform, vals[i+5],vals[i+6]); + i += 7; + break; + case NVG_CLOSE: + i++; + break; + case NVG_WINDING: + i += 2; + break; + default: + i++; + } + } + + memcpy(&ctx->commands[ctx->ncommands], vals, nvals*sizeof(float)); + + ctx->ncommands += nvals; +} + + +static void nvg__clearPathCache(NVGcontext* ctx) +{ + ctx->cache->npoints = 0; + ctx->cache->npaths = 0; +} + +static NVGpath* nvg__lastPath(NVGcontext* ctx) +{ + if (ctx->cache->npaths > 0) + return &ctx->cache->paths[ctx->cache->npaths-1]; + return NULL; +} + +static void nvg__addPath(NVGcontext* ctx) +{ + NVGpath* path; + if (ctx->cache->npaths+1 > ctx->cache->cpaths) { + NVGpath* paths; + int cpaths = ctx->cache->npaths+1 + ctx->cache->cpaths/2; + paths = (NVGpath*)realloc(ctx->cache->paths, sizeof(NVGpath)*cpaths); + if (paths == NULL) return; + ctx->cache->paths = paths; + ctx->cache->cpaths = cpaths; + } + path = &ctx->cache->paths[ctx->cache->npaths]; + memset(path, 0, sizeof(*path)); + path->first = ctx->cache->npoints; + path->winding = NVG_CCW; + + ctx->cache->npaths++; +} + +static NVGpoint* nvg__lastPoint(NVGcontext* ctx) +{ + if (ctx->cache->npoints > 0) + return &ctx->cache->points[ctx->cache->npoints-1]; + return NULL; +} + +static void nvg__addPoint(NVGcontext* ctx, float x, float y, int flags) +{ + NVGpath* path = nvg__lastPath(ctx); + NVGpoint* pt; + if (path == NULL) return; + + if (path->count > 0 && ctx->cache->npoints > 0) { + pt = nvg__lastPoint(ctx); + if (nvg__ptEquals(pt->x,pt->y, x,y, ctx->distTol)) { + pt->flags |= flags; + return; + } + } + + if (ctx->cache->npoints+1 > ctx->cache->cpoints) { + NVGpoint* points; + int cpoints = ctx->cache->npoints+1 + ctx->cache->cpoints/2; + points = (NVGpoint*)realloc(ctx->cache->points, sizeof(NVGpoint)*cpoints); + if (points == NULL) return; + ctx->cache->points = points; + ctx->cache->cpoints = cpoints; + } + + pt = &ctx->cache->points[ctx->cache->npoints]; + memset(pt, 0, sizeof(*pt)); + pt->x = x; + pt->y = y; + pt->flags = (unsigned char)flags; + + ctx->cache->npoints++; + path->count++; +} + +static void nvg__closePath(NVGcontext* ctx) +{ + NVGpath* path = nvg__lastPath(ctx); + if (path == NULL) return; + path->closed = 1; +} + +static void nvg__pathWinding(NVGcontext* ctx, int winding) +{ + NVGpath* path = nvg__lastPath(ctx); + if (path == NULL) return; + path->winding = winding; +} + +static float nvg__getAverageScale(float *t) +{ + float sx = sqrtf(t[0]*t[0] + t[2]*t[2]); + float sy = sqrtf(t[1]*t[1] + t[3]*t[3]); + return (sx + sy) * 0.5f; +} + +static NVGvertex* nvg__allocTempVerts(NVGcontext* ctx, int nverts) +{ + if (nverts > ctx->cache->cverts) { + NVGvertex* verts; + int cverts = (nverts + 0xff) & ~0xff; // Round up to prevent allocations when things change just slightly. + verts = (NVGvertex*)realloc(ctx->cache->verts, sizeof(NVGvertex)*cverts); + if (verts == NULL) return NULL; + ctx->cache->verts = verts; + ctx->cache->cverts = cverts; + } + + return ctx->cache->verts; +} + +static float nvg__triarea2(float ax, float ay, float bx, float by, float cx, float cy) +{ + float abx = bx - ax; + float aby = by - ay; + float acx = cx - ax; + float acy = cy - ay; + return acx*aby - abx*acy; +} + +static float nvg__polyArea(NVGpoint* pts, int npts) +{ + int i; + float area = 0; + for (i = 2; i < npts; i++) { + NVGpoint* a = &pts[0]; + NVGpoint* b = &pts[i-1]; + NVGpoint* c = &pts[i]; + area += nvg__triarea2(a->x,a->y, b->x,b->y, c->x,c->y); + } + return area * 0.5f; +} + +static void nvg__polyReverse(NVGpoint* pts, int npts) +{ + NVGpoint tmp; + int i = 0, j = npts-1; + while (i < j) { + tmp = pts[i]; + pts[i] = pts[j]; + pts[j] = tmp; + i++; + j--; + } +} + + +static void nvg__vset(NVGvertex* vtx, float x, float y, float u, float v) +{ + vtx->x = x; + vtx->y = y; + vtx->u = u; + vtx->v = v; +} + +static void nvg__tesselateBezier(NVGcontext* ctx, + float x1, float y1, float x2, float y2, + float x3, float y3, float x4, float y4, + int level, int type) +{ + float x12,y12,x23,y23,x34,y34,x123,y123,x234,y234,x1234,y1234; + float dx,dy,d2,d3; + + if (level > 10) return; + + x12 = (x1+x2)*0.5f; + y12 = (y1+y2)*0.5f; + x23 = (x2+x3)*0.5f; + y23 = (y2+y3)*0.5f; + x34 = (x3+x4)*0.5f; + y34 = (y3+y4)*0.5f; + x123 = (x12+x23)*0.5f; + y123 = (y12+y23)*0.5f; + + dx = x4 - x1; + dy = y4 - y1; + d2 = nvg__absf(((x2 - x4) * dy - (y2 - y4) * dx)); + d3 = nvg__absf(((x3 - x4) * dy - (y3 - y4) * dx)); + + if ((d2 + d3)*(d2 + d3) < ctx->tessTol * (dx*dx + dy*dy)) { + nvg__addPoint(ctx, x4, y4, type); + return; + } + +/* if (nvg__absf(x1+x3-x2-x2) + nvg__absf(y1+y3-y2-y2) + nvg__absf(x2+x4-x3-x3) + nvg__absf(y2+y4-y3-y3) < ctx->tessTol) { + nvg__addPoint(ctx, x4, y4, type); + return; + }*/ + + x234 = (x23+x34)*0.5f; + y234 = (y23+y34)*0.5f; + x1234 = (x123+x234)*0.5f; + y1234 = (y123+y234)*0.5f; + + nvg__tesselateBezier(ctx, x1,y1, x12,y12, x123,y123, x1234,y1234, level+1, 0); + nvg__tesselateBezier(ctx, x1234,y1234, x234,y234, x34,y34, x4,y4, level+1, type); +} + +static void nvg__flattenPaths(NVGcontext* ctx) +{ + NVGpathCache* cache = ctx->cache; +// NVGstate* state = nvg__getState(ctx); + NVGpoint* last; + NVGpoint* p0; + NVGpoint* p1; + NVGpoint* pts; + NVGpath* path; + int i, j; + float* cp1; + float* cp2; + float* p; + float area; + + if (cache->npaths > 0) + return; + + // Flatten + i = 0; + while (i < ctx->ncommands) { + int cmd = (int)ctx->commands[i]; + switch (cmd) { + case NVG_MOVETO: + nvg__addPath(ctx); + p = &ctx->commands[i+1]; + nvg__addPoint(ctx, p[0], p[1], NVG_PT_CORNER); + i += 3; + break; + case NVG_LINETO: + p = &ctx->commands[i+1]; + nvg__addPoint(ctx, p[0], p[1], NVG_PT_CORNER); + i += 3; + break; + case NVG_BEZIERTO: + last = nvg__lastPoint(ctx); + if (last != NULL) { + cp1 = &ctx->commands[i+1]; + cp2 = &ctx->commands[i+3]; + p = &ctx->commands[i+5]; + nvg__tesselateBezier(ctx, last->x,last->y, cp1[0],cp1[1], cp2[0],cp2[1], p[0],p[1], 0, NVG_PT_CORNER); + } + i += 7; + break; + case NVG_CLOSE: + nvg__closePath(ctx); + i++; + break; + case NVG_WINDING: + nvg__pathWinding(ctx, (int)ctx->commands[i+1]); + i += 2; + break; + default: + i++; + } + } + + cache->bounds[0] = cache->bounds[1] = 1e6f; + cache->bounds[2] = cache->bounds[3] = -1e6f; + + // Calculate the direction and length of line segments. + for (j = 0; j < cache->npaths; j++) { + path = &cache->paths[j]; + pts = &cache->points[path->first]; + + // If the first and last points are the same, remove the last, mark as closed path. + p0 = &pts[path->count-1]; + p1 = &pts[0]; + if (nvg__ptEquals(p0->x,p0->y, p1->x,p1->y, ctx->distTol)) { + path->count--; + p0 = &pts[path->count-1]; + path->closed = 1; + } + + // Enforce winding. + if (path->count > 2) { + area = nvg__polyArea(pts, path->count); + if (path->winding == NVG_CCW && area < 0.0f) + nvg__polyReverse(pts, path->count); + if (path->winding == NVG_CW && area > 0.0f) + nvg__polyReverse(pts, path->count); + } + + for(i = 0; i < path->count; i++) { + // Calculate segment direction and length + p0->dx = p1->x - p0->x; + p0->dy = p1->y - p0->y; + p0->len = nvg__normalize(&p0->dx, &p0->dy); + // Update bounds + cache->bounds[0] = nvg__minf(cache->bounds[0], p0->x); + cache->bounds[1] = nvg__minf(cache->bounds[1], p0->y); + cache->bounds[2] = nvg__maxf(cache->bounds[2], p0->x); + cache->bounds[3] = nvg__maxf(cache->bounds[3], p0->y); + // Advance + p0 = p1++; + } + } +} + +static int nvg__curveDivs(float r, float arc, float tol) +{ + float da = acosf(r / (r + tol)) * 2.0f; + return nvg__maxi(2, (int)ceilf(arc / da)); +} + +static void nvg__chooseBevel(int bevel, NVGpoint* p0, NVGpoint* p1, float w, + float* x0, float* y0, float* x1, float* y1) +{ + if (bevel) { + *x0 = p1->x + p0->dy * w; + *y0 = p1->y - p0->dx * w; + *x1 = p1->x + p1->dy * w; + *y1 = p1->y - p1->dx * w; + } else { + *x0 = p1->x + p1->dmx * w; + *y0 = p1->y + p1->dmy * w; + *x1 = p1->x + p1->dmx * w; + *y1 = p1->y + p1->dmy * w; + } +} + +static NVGvertex* nvg__roundJoin(NVGvertex* dst, NVGpoint* p0, NVGpoint* p1, + float lw, float rw, float lu, float ru, int ncap, + float fringe) +{ + int i, n; + float dlx0 = p0->dy; + float dly0 = -p0->dx; + float dlx1 = p1->dy; + float dly1 = -p1->dx; + NVG_NOTUSED(fringe); + + if (p1->flags & NVG_PT_LEFT) { + float lx0,ly0,lx1,ly1,a0,a1; + nvg__chooseBevel(p1->flags & NVG_PR_INNERBEVEL, p0, p1, lw, &lx0,&ly0, &lx1,&ly1); + a0 = atan2f(-dly0, -dlx0); + a1 = atan2f(-dly1, -dlx1); + if (a1 > a0) a1 -= NVG_PI*2; + + nvg__vset(dst, lx0, ly0, lu,1); dst++; + nvg__vset(dst, p1->x - dlx0*rw, p1->y - dly0*rw, ru,1); dst++; + + n = nvg__clampi((int)ceilf(((a0 - a1) / NVG_PI) * ncap), 2, ncap); + for (i = 0; i < n; i++) { + float u = i/(float)(n-1); + float a = a0 + u*(a1-a0); + float rx = p1->x + cosf(a) * rw; + float ry = p1->y + sinf(a) * rw; + nvg__vset(dst, p1->x, p1->y, 0.5f,1); dst++; + nvg__vset(dst, rx, ry, ru,1); dst++; + } + + nvg__vset(dst, lx1, ly1, lu,1); dst++; + nvg__vset(dst, p1->x - dlx1*rw, p1->y - dly1*rw, ru,1); dst++; + + } else { + float rx0,ry0,rx1,ry1,a0,a1; + nvg__chooseBevel(p1->flags & NVG_PR_INNERBEVEL, p0, p1, -rw, &rx0,&ry0, &rx1,&ry1); + a0 = atan2f(dly0, dlx0); + a1 = atan2f(dly1, dlx1); + if (a1 < a0) a1 += NVG_PI*2; + + nvg__vset(dst, p1->x + dlx0*rw, p1->y + dly0*rw, lu,1); dst++; + nvg__vset(dst, rx0, ry0, ru,1); dst++; + + n = nvg__clampi((int)ceilf(((a1 - a0) / NVG_PI) * ncap), 2, ncap); + for (i = 0; i < n; i++) { + float u = i/(float)(n-1); + float a = a0 + u*(a1-a0); + float lx = p1->x + cosf(a) * lw; + float ly = p1->y + sinf(a) * lw; + nvg__vset(dst, lx, ly, lu,1); dst++; + nvg__vset(dst, p1->x, p1->y, 0.5f,1); dst++; + } + + nvg__vset(dst, p1->x + dlx1*rw, p1->y + dly1*rw, lu,1); dst++; + nvg__vset(dst, rx1, ry1, ru,1); dst++; + + } + return dst; +} + +static NVGvertex* nvg__bevelJoin(NVGvertex* dst, NVGpoint* p0, NVGpoint* p1, + float lw, float rw, float lu, float ru, float fringe) +{ + float rx0,ry0,rx1,ry1; + float lx0,ly0,lx1,ly1; + float dlx0 = p0->dy; + float dly0 = -p0->dx; + float dlx1 = p1->dy; + float dly1 = -p1->dx; + NVG_NOTUSED(fringe); + + if (p1->flags & NVG_PT_LEFT) { + nvg__chooseBevel(p1->flags & NVG_PR_INNERBEVEL, p0, p1, lw, &lx0,&ly0, &lx1,&ly1); + + nvg__vset(dst, lx0, ly0, lu,1); dst++; + nvg__vset(dst, p1->x - dlx0*rw, p1->y - dly0*rw, ru,1); dst++; + + if (p1->flags & NVG_PT_BEVEL) { + nvg__vset(dst, lx0, ly0, lu,1); dst++; + nvg__vset(dst, p1->x - dlx0*rw, p1->y - dly0*rw, ru,1); dst++; + + nvg__vset(dst, lx1, ly1, lu,1); dst++; + nvg__vset(dst, p1->x - dlx1*rw, p1->y - dly1*rw, ru,1); dst++; + } else { + rx0 = p1->x - p1->dmx * rw; + ry0 = p1->y - p1->dmy * rw; + + nvg__vset(dst, p1->x, p1->y, 0.5f,1); dst++; + nvg__vset(dst, p1->x - dlx0*rw, p1->y - dly0*rw, ru,1); dst++; + + nvg__vset(dst, rx0, ry0, ru,1); dst++; + nvg__vset(dst, rx0, ry0, ru,1); dst++; + + nvg__vset(dst, p1->x, p1->y, 0.5f,1); dst++; + nvg__vset(dst, p1->x - dlx1*rw, p1->y - dly1*rw, ru,1); dst++; + } + + nvg__vset(dst, lx1, ly1, lu,1); dst++; + nvg__vset(dst, p1->x - dlx1*rw, p1->y - dly1*rw, ru,1); dst++; + + } else { + nvg__chooseBevel(p1->flags & NVG_PR_INNERBEVEL, p0, p1, -rw, &rx0,&ry0, &rx1,&ry1); + + nvg__vset(dst, p1->x + dlx0*lw, p1->y + dly0*lw, lu,1); dst++; + nvg__vset(dst, rx0, ry0, ru,1); dst++; + + if (p1->flags & NVG_PT_BEVEL) { + nvg__vset(dst, p1->x + dlx0*lw, p1->y + dly0*lw, lu,1); dst++; + nvg__vset(dst, rx0, ry0, ru,1); dst++; + + nvg__vset(dst, p1->x + dlx1*lw, p1->y + dly1*lw, lu,1); dst++; + nvg__vset(dst, rx1, ry1, ru,1); dst++; + } else { + lx0 = p1->x + p1->dmx * lw; + ly0 = p1->y + p1->dmy * lw; + + nvg__vset(dst, p1->x + dlx0*lw, p1->y + dly0*lw, lu,1); dst++; + nvg__vset(dst, p1->x, p1->y, 0.5f,1); dst++; + + nvg__vset(dst, lx0, ly0, lu,1); dst++; + nvg__vset(dst, lx0, ly0, lu,1); dst++; + + nvg__vset(dst, p1->x + dlx1*lw, p1->y + dly1*lw, lu,1); dst++; + nvg__vset(dst, p1->x, p1->y, 0.5f,1); dst++; + } + + nvg__vset(dst, p1->x + dlx1*lw, p1->y + dly1*lw, lu,1); dst++; + nvg__vset(dst, rx1, ry1, ru,1); dst++; + } + + return dst; +} + +static NVGvertex* nvg__buttCapStart(NVGvertex* dst, NVGpoint* p, + float dx, float dy, float w, float d, + float aa, float u0, float u1) +{ + float px = p->x - dx*d; + float py = p->y - dy*d; + float dlx = dy; + float dly = -dx; + nvg__vset(dst, px + dlx*w - dx*aa, py + dly*w - dy*aa, u0,0); dst++; + nvg__vset(dst, px - dlx*w - dx*aa, py - dly*w - dy*aa, u1,0); dst++; + nvg__vset(dst, px + dlx*w, py + dly*w, u0,1); dst++; + nvg__vset(dst, px - dlx*w, py - dly*w, u1,1); dst++; + return dst; +} + +static NVGvertex* nvg__buttCapEnd(NVGvertex* dst, NVGpoint* p, + float dx, float dy, float w, float d, + float aa, float u0, float u1) +{ + float px = p->x + dx*d; + float py = p->y + dy*d; + float dlx = dy; + float dly = -dx; + nvg__vset(dst, px + dlx*w, py + dly*w, u0,1); dst++; + nvg__vset(dst, px - dlx*w, py - dly*w, u1,1); dst++; + nvg__vset(dst, px + dlx*w + dx*aa, py + dly*w + dy*aa, u0,0); dst++; + nvg__vset(dst, px - dlx*w + dx*aa, py - dly*w + dy*aa, u1,0); dst++; + return dst; +} + + +static NVGvertex* nvg__roundCapStart(NVGvertex* dst, NVGpoint* p, + float dx, float dy, float w, int ncap, + float aa, float u0, float u1) +{ + int i; + float px = p->x; + float py = p->y; + float dlx = dy; + float dly = -dx; + NVG_NOTUSED(aa); + for (i = 0; i < ncap; i++) { + float a = i/(float)(ncap-1)*NVG_PI; + float ax = cosf(a) * w, ay = sinf(a) * w; + nvg__vset(dst, px - dlx*ax - dx*ay, py - dly*ax - dy*ay, u0,1); dst++; + nvg__vset(dst, px, py, 0.5f,1); dst++; + } + nvg__vset(dst, px + dlx*w, py + dly*w, u0,1); dst++; + nvg__vset(dst, px - dlx*w, py - dly*w, u1,1); dst++; + return dst; +} + +static NVGvertex* nvg__roundCapEnd(NVGvertex* dst, NVGpoint* p, + float dx, float dy, float w, int ncap, + float aa, float u0, float u1) +{ + int i; + float px = p->x; + float py = p->y; + float dlx = dy; + float dly = -dx; + NVG_NOTUSED(aa); + nvg__vset(dst, px + dlx*w, py + dly*w, u0,1); dst++; + nvg__vset(dst, px - dlx*w, py - dly*w, u1,1); dst++; + for (i = 0; i < ncap; i++) { + float a = i/(float)(ncap-1)*NVG_PI; + float ax = cosf(a) * w, ay = sinf(a) * w; + nvg__vset(dst, px, py, 0.5f,1); dst++; + nvg__vset(dst, px - dlx*ax + dx*ay, py - dly*ax + dy*ay, u0,1); dst++; + } + return dst; +} + + +static void nvg__calculateJoins(NVGcontext* ctx, float w, int lineJoin, float miterLimit) +{ + NVGpathCache* cache = ctx->cache; + int i, j; + float iw = 0.0f; + + if (w > 0.0f) iw = 1.0f / w; + + // Calculate which joins needs extra vertices to append, and gather vertex count. + for (i = 0; i < cache->npaths; i++) { + NVGpath* path = &cache->paths[i]; + NVGpoint* pts = &cache->points[path->first]; + NVGpoint* p0 = &pts[path->count-1]; + NVGpoint* p1 = &pts[0]; + int nleft = 0; + + path->nbevel = 0; + + for (j = 0; j < path->count; j++) { + float dlx0, dly0, dlx1, dly1, dmr2, cross, limit; + dlx0 = p0->dy; + dly0 = -p0->dx; + dlx1 = p1->dy; + dly1 = -p1->dx; + // Calculate extrusions + p1->dmx = (dlx0 + dlx1) * 0.5f; + p1->dmy = (dly0 + dly1) * 0.5f; + dmr2 = p1->dmx*p1->dmx + p1->dmy*p1->dmy; + if (dmr2 > 0.000001f) { + float scale = 1.0f / dmr2; + if (scale > 600.0f) { + scale = 600.0f; + } + p1->dmx *= scale; + p1->dmy *= scale; + } + + // Clear flags, but keep the corner. + p1->flags = (p1->flags & NVG_PT_CORNER) ? NVG_PT_CORNER : 0; + + // Keep track of left turns. + cross = p1->dx * p0->dy - p0->dx * p1->dy; + if (cross > 0.0f) { + nleft++; + p1->flags |= NVG_PT_LEFT; + } + + // Calculate if we should use bevel or miter for inner join. + limit = nvg__maxf(1.01f, nvg__minf(p0->len, p1->len) * iw); + if ((dmr2 * limit*limit) < 1.0f) + p1->flags |= NVG_PR_INNERBEVEL; + + // Check to see if the corner needs to be beveled. + if (p1->flags & NVG_PT_CORNER) { + if ((dmr2 * miterLimit*miterLimit) < 1.0f || lineJoin == NVG_BEVEL || lineJoin == NVG_ROUND) { + p1->flags |= NVG_PT_BEVEL; + } + } + + if ((p1->flags & (NVG_PT_BEVEL | NVG_PR_INNERBEVEL)) != 0) + path->nbevel++; + + p0 = p1++; + } + + path->convex = (nleft == path->count) ? 1 : 0; + } +} + + +static int nvg__expandStroke(NVGcontext* ctx, float w, float fringe, int lineCap, int lineJoin, float miterLimit) +{ + NVGpathCache* cache = ctx->cache; + NVGvertex* verts; + NVGvertex* dst; + int cverts, i, j; + float aa = fringe;//ctx->fringeWidth; + float u0 = 0.0f, u1 = 1.0f; + int ncap = nvg__curveDivs(w, NVG_PI, ctx->tessTol); // Calculate divisions per half circle. + + w += aa * 0.5f; + + // Disable the gradient used for antialiasing when antialiasing is not used. + if (aa == 0.0f) { + u0 = 0.5f; + u1 = 0.5f; + } + + nvg__calculateJoins(ctx, w, lineJoin, miterLimit); + + // Calculate max vertex usage. + cverts = 0; + for (i = 0; i < cache->npaths; i++) { + NVGpath* path = &cache->paths[i]; + int loop = (path->closed == 0) ? 0 : 1; + if (lineJoin == NVG_ROUND) + cverts += (path->count + path->nbevel*(ncap+2) + 1) * 2; // plus one for loop + else + cverts += (path->count + path->nbevel*5 + 1) * 2; // plus one for loop + if (loop == 0) { + // space for caps + if (lineCap == NVG_ROUND) { + cverts += (ncap*2 + 2)*2; + } else { + cverts += (3+3)*2; + } + } + } + + verts = nvg__allocTempVerts(ctx, cverts); + if (verts == NULL) return 0; + + for (i = 0; i < cache->npaths; i++) { + NVGpath* path = &cache->paths[i]; + NVGpoint* pts = &cache->points[path->first]; + NVGpoint* p0; + NVGpoint* p1; + int s, e, loop; + float dx, dy; + + path->fill = 0; + path->nfill = 0; + + // Calculate fringe or stroke + loop = (path->closed == 0) ? 0 : 1; + dst = verts; + path->stroke = dst; + + if (loop) { + // Looping + p0 = &pts[path->count-1]; + p1 = &pts[0]; + s = 0; + e = path->count; + } else { + // Add cap + p0 = &pts[0]; + p1 = &pts[1]; + s = 1; + e = path->count-1; + } + + if (loop == 0) { + // Add cap + dx = p1->x - p0->x; + dy = p1->y - p0->y; + nvg__normalize(&dx, &dy); + if (lineCap == NVG_BUTT) + dst = nvg__buttCapStart(dst, p0, dx, dy, w, -aa*0.5f, aa, u0, u1); + else if (lineCap == NVG_BUTT || lineCap == NVG_SQUARE) + dst = nvg__buttCapStart(dst, p0, dx, dy, w, w-aa, aa, u0, u1); + else if (lineCap == NVG_ROUND) + dst = nvg__roundCapStart(dst, p0, dx, dy, w, ncap, aa, u0, u1); + } + + for (j = s; j < e; ++j) { + if ((p1->flags & (NVG_PT_BEVEL | NVG_PR_INNERBEVEL)) != 0) { + if (lineJoin == NVG_ROUND) { + dst = nvg__roundJoin(dst, p0, p1, w, w, u0, u1, ncap, aa); + } else { + dst = nvg__bevelJoin(dst, p0, p1, w, w, u0, u1, aa); + } + } else { + nvg__vset(dst, p1->x + (p1->dmx * w), p1->y + (p1->dmy * w), u0,1); dst++; + nvg__vset(dst, p1->x - (p1->dmx * w), p1->y - (p1->dmy * w), u1,1); dst++; + } + p0 = p1++; + } + + if (loop) { + // Loop it + nvg__vset(dst, verts[0].x, verts[0].y, u0,1); dst++; + nvg__vset(dst, verts[1].x, verts[1].y, u1,1); dst++; + } else { + // Add cap + dx = p1->x - p0->x; + dy = p1->y - p0->y; + nvg__normalize(&dx, &dy); + if (lineCap == NVG_BUTT) + dst = nvg__buttCapEnd(dst, p1, dx, dy, w, -aa*0.5f, aa, u0, u1); + else if (lineCap == NVG_BUTT || lineCap == NVG_SQUARE) + dst = nvg__buttCapEnd(dst, p1, dx, dy, w, w-aa, aa, u0, u1); + else if (lineCap == NVG_ROUND) + dst = nvg__roundCapEnd(dst, p1, dx, dy, w, ncap, aa, u0, u1); + } + + path->nstroke = (int)(dst - verts); + + verts = dst; + } + + return 1; +} + +static int nvg__expandFill(NVGcontext* ctx, float w, int lineJoin, float miterLimit) +{ + NVGpathCache* cache = ctx->cache; + NVGvertex* verts; + NVGvertex* dst; + int cverts, convex, i, j; + float aa = ctx->fringeWidth; + int fringe = w > 0.0f; + + nvg__calculateJoins(ctx, w, lineJoin, miterLimit); + + // Calculate max vertex usage. + cverts = 0; + for (i = 0; i < cache->npaths; i++) { + NVGpath* path = &cache->paths[i]; + cverts += path->count + path->nbevel + 1; + if (fringe) + cverts += (path->count + path->nbevel*5 + 1) * 2; // plus one for loop + } + + verts = nvg__allocTempVerts(ctx, cverts); + if (verts == NULL) return 0; + + convex = cache->npaths == 1 && cache->paths[0].convex; + + for (i = 0; i < cache->npaths; i++) { + NVGpath* path = &cache->paths[i]; + NVGpoint* pts = &cache->points[path->first]; + NVGpoint* p0; + NVGpoint* p1; + float rw, lw, woff; + float ru, lu; + + // Calculate shape vertices. + woff = 0.5f*aa; + dst = verts; + path->fill = dst; + + if (fringe) { + // Looping + p0 = &pts[path->count-1]; + p1 = &pts[0]; + for (j = 0; j < path->count; ++j) { + if (p1->flags & NVG_PT_BEVEL) { + float dlx0 = p0->dy; + float dly0 = -p0->dx; + float dlx1 = p1->dy; + float dly1 = -p1->dx; + if (p1->flags & NVG_PT_LEFT) { + float lx = p1->x + p1->dmx * woff; + float ly = p1->y + p1->dmy * woff; + nvg__vset(dst, lx, ly, 0.5f,1); dst++; + } else { + float lx0 = p1->x + dlx0 * woff; + float ly0 = p1->y + dly0 * woff; + float lx1 = p1->x + dlx1 * woff; + float ly1 = p1->y + dly1 * woff; + nvg__vset(dst, lx0, ly0, 0.5f,1); dst++; + nvg__vset(dst, lx1, ly1, 0.5f,1); dst++; + } + } else { + nvg__vset(dst, p1->x + (p1->dmx * woff), p1->y + (p1->dmy * woff), 0.5f,1); dst++; + } + p0 = p1++; + } + } else { + for (j = 0; j < path->count; ++j) { + nvg__vset(dst, pts[j].x, pts[j].y, 0.5f,1); + dst++; + } + } + + path->nfill = (int)(dst - verts); + verts = dst; + + // Calculate fringe + if (fringe) { + lw = w + woff; + rw = w - woff; + lu = 0; + ru = 1; + dst = verts; + path->stroke = dst; + + // Create only half a fringe for convex shapes so that + // the shape can be rendered without stenciling. + if (convex) { + lw = woff; // This should generate the same vertex as fill inset above. + lu = 0.5f; // Set outline fade at middle. + } + + // Looping + p0 = &pts[path->count-1]; + p1 = &pts[0]; + + for (j = 0; j < path->count; ++j) { + if ((p1->flags & (NVG_PT_BEVEL | NVG_PR_INNERBEVEL)) != 0) { + dst = nvg__bevelJoin(dst, p0, p1, lw, rw, lu, ru, ctx->fringeWidth); + } else { + nvg__vset(dst, p1->x + (p1->dmx * lw), p1->y + (p1->dmy * lw), lu,1); dst++; + nvg__vset(dst, p1->x - (p1->dmx * rw), p1->y - (p1->dmy * rw), ru,1); dst++; + } + p0 = p1++; + } + + // Loop it + nvg__vset(dst, verts[0].x, verts[0].y, lu,1); dst++; + nvg__vset(dst, verts[1].x, verts[1].y, ru,1); dst++; + + path->nstroke = (int)(dst - verts); + verts = dst; + } else { + path->stroke = NULL; + path->nstroke = 0; + } + } + + return 1; +} + + +// Draw +void nvgBeginPath(NVGcontext* ctx) +{ + ctx->ncommands = 0; + nvg__clearPathCache(ctx); +} + +void nvgMoveTo(NVGcontext* ctx, float x, float y) +{ + float vals[] = { NVG_MOVETO, x, y }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgLineTo(NVGcontext* ctx, float x, float y) +{ + float vals[] = { NVG_LINETO, x, y }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgBezierTo(NVGcontext* ctx, float c1x, float c1y, float c2x, float c2y, float x, float y) +{ + float vals[] = { NVG_BEZIERTO, c1x, c1y, c2x, c2y, x, y }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgQuadTo(NVGcontext* ctx, float cx, float cy, float x, float y) +{ + float x0 = ctx->commandx; + float y0 = ctx->commandy; + float vals[] = { NVG_BEZIERTO, + x0 + 2.0f/3.0f*(cx - x0), y0 + 2.0f/3.0f*(cy - y0), + x + 2.0f/3.0f*(cx - x), y + 2.0f/3.0f*(cy - y), + x, y }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgArcTo(NVGcontext* ctx, float x1, float y1, float x2, float y2, float radius) +{ + float x0 = ctx->commandx; + float y0 = ctx->commandy; + float dx0,dy0, dx1,dy1, a, d, cx,cy, a0,a1; + int dir; + + if (ctx->ncommands == 0) { + return; + } + + // Handle degenerate cases. + if (nvg__ptEquals(x0,y0, x1,y1, ctx->distTol) || + nvg__ptEquals(x1,y1, x2,y2, ctx->distTol) || + nvg__distPtSeg(x1,y1, x0,y0, x2,y2) < ctx->distTol*ctx->distTol || + radius < ctx->distTol) { + nvgLineTo(ctx, x1,y1); + return; + } + + // Calculate tangential circle to lines (x0,y0)-(x1,y1) and (x1,y1)-(x2,y2). + dx0 = x0-x1; + dy0 = y0-y1; + dx1 = x2-x1; + dy1 = y2-y1; + nvg__normalize(&dx0,&dy0); + nvg__normalize(&dx1,&dy1); + a = nvg__acosf(dx0*dx1 + dy0*dy1); + d = radius / nvg__tanf(a/2.0f); + +// printf("a=%f° d=%f\n", a/NVG_PI*180.0f, d); + + if (d > 10000.0f) { + nvgLineTo(ctx, x1,y1); + return; + } + + if (nvg__cross(dx0,dy0, dx1,dy1) > 0.0f) { + cx = x1 + dx0*d + dy0*radius; + cy = y1 + dy0*d + -dx0*radius; + a0 = nvg__atan2f(dx0, -dy0); + a1 = nvg__atan2f(-dx1, dy1); + dir = NVG_CW; +// printf("CW c=(%f, %f) a0=%f° a1=%f°\n", cx, cy, a0/NVG_PI*180.0f, a1/NVG_PI*180.0f); + } else { + cx = x1 + dx0*d + -dy0*radius; + cy = y1 + dy0*d + dx0*radius; + a0 = nvg__atan2f(-dx0, dy0); + a1 = nvg__atan2f(dx1, -dy1); + dir = NVG_CCW; +// printf("CCW c=(%f, %f) a0=%f° a1=%f°\n", cx, cy, a0/NVG_PI*180.0f, a1/NVG_PI*180.0f); + } + + nvgArc(ctx, cx, cy, radius, a0, a1, dir); +} + +void nvgClosePath(NVGcontext* ctx) +{ + float vals[] = { NVG_CLOSE }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgPathWinding(NVGcontext* ctx, int dir) +{ + float vals[] = { NVG_WINDING, (float)dir }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgArc(NVGcontext* ctx, float cx, float cy, float r, float a0, float a1, int dir) +{ + float a = 0, da = 0, hda = 0, kappa = 0; + float dx = 0, dy = 0, x = 0, y = 0, tanx = 0, tany = 0; + float px = 0, py = 0, ptanx = 0, ptany = 0; + float vals[3 + 5*7 + 100]; + int i, ndivs, nvals; + int move = ctx->ncommands > 0 ? NVG_LINETO : NVG_MOVETO; + + // Clamp angles + da = a1 - a0; + if (dir == NVG_CW) { + if (nvg__absf(da) >= NVG_PI*2) { + da = NVG_PI*2; + } else { + while (da < 0.0f) da += NVG_PI*2; + } + } else { + if (nvg__absf(da) >= NVG_PI*2) { + da = -NVG_PI*2; + } else { + while (da > 0.0f) da -= NVG_PI*2; + } + } + + // Split arc into max 90 degree segments. + ndivs = nvg__maxi(1, nvg__mini((int)(nvg__absf(da) / (NVG_PI*0.5f) + 0.5f), 5)); + hda = (da / (float)ndivs) / 2.0f; + kappa = nvg__absf(4.0f / 3.0f * (1.0f - nvg__cosf(hda)) / nvg__sinf(hda)); + + if (dir == NVG_CCW) + kappa = -kappa; + + nvals = 0; + for (i = 0; i <= ndivs; i++) { + a = a0 + da * (i/(float)ndivs); + dx = nvg__cosf(a); + dy = nvg__sinf(a); + x = cx + dx*r; + y = cy + dy*r; + tanx = -dy*r*kappa; + tany = dx*r*kappa; + + if (i == 0) { + vals[nvals++] = (float)move; + vals[nvals++] = x; + vals[nvals++] = y; + } else { + vals[nvals++] = NVG_BEZIERTO; + vals[nvals++] = px+ptanx; + vals[nvals++] = py+ptany; + vals[nvals++] = x-tanx; + vals[nvals++] = y-tany; + vals[nvals++] = x; + vals[nvals++] = y; + } + px = x; + py = y; + ptanx = tanx; + ptany = tany; + } + + nvg__appendCommands(ctx, vals, nvals); +} + +void nvgRect(NVGcontext* ctx, float x, float y, float w, float h) +{ + float vals[] = { + NVG_MOVETO, x,y, + NVG_LINETO, x,y+h, + NVG_LINETO, x+w,y+h, + NVG_LINETO, x+w,y, + NVG_CLOSE + }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgRoundedRect(NVGcontext* ctx, float x, float y, float w, float h, float r) +{ + nvgRoundedRectVarying(ctx, x, y, w, h, r, r, r, r); +} + +void nvgRoundedRectVarying(NVGcontext* ctx, float x, float y, float w, float h, float radTopLeft, float radTopRight, float radBottomRight, float radBottomLeft) +{ + if(radTopLeft < 0.1f && radTopRight < 0.1f && radBottomRight < 0.1f && radBottomLeft < 0.1f) { + nvgRect(ctx, x, y, w, h); + return; + } else { + float halfw = nvg__absf(w)*0.5f; + float halfh = nvg__absf(h)*0.5f; + float rxBL = nvg__minf(radBottomLeft, halfw) * nvg__signf(w), ryBL = nvg__minf(radBottomLeft, halfh) * nvg__signf(h); + float rxBR = nvg__minf(radBottomRight, halfw) * nvg__signf(w), ryBR = nvg__minf(radBottomRight, halfh) * nvg__signf(h); + float rxTR = nvg__minf(radTopRight, halfw) * nvg__signf(w), ryTR = nvg__minf(radTopRight, halfh) * nvg__signf(h); + float rxTL = nvg__minf(radTopLeft, halfw) * nvg__signf(w), ryTL = nvg__minf(radTopLeft, halfh) * nvg__signf(h); + float vals[] = { + NVG_MOVETO, x, y + ryTL, + NVG_LINETO, x, y + h - ryBL, + NVG_BEZIERTO, x, y + h - ryBL*(1 - NVG_KAPPA90), x + rxBL*(1 - NVG_KAPPA90), y + h, x + rxBL, y + h, + NVG_LINETO, x + w - rxBR, y + h, + NVG_BEZIERTO, x + w - rxBR*(1 - NVG_KAPPA90), y + h, x + w, y + h - ryBR*(1 - NVG_KAPPA90), x + w, y + h - ryBR, + NVG_LINETO, x + w, y + ryTR, + NVG_BEZIERTO, x + w, y + ryTR*(1 - NVG_KAPPA90), x + w - rxTR*(1 - NVG_KAPPA90), y, x + w - rxTR, y, + NVG_LINETO, x + rxTL, y, + NVG_BEZIERTO, x + rxTL*(1 - NVG_KAPPA90), y, x, y + ryTL*(1 - NVG_KAPPA90), x, y + ryTL, + NVG_CLOSE + }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); + } +} + +void nvgEllipse(NVGcontext* ctx, float cx, float cy, float rx, float ry) +{ + float vals[] = { + NVG_MOVETO, cx-rx, cy, + NVG_BEZIERTO, cx-rx, cy+ry*NVG_KAPPA90, cx-rx*NVG_KAPPA90, cy+ry, cx, cy+ry, + NVG_BEZIERTO, cx+rx*NVG_KAPPA90, cy+ry, cx+rx, cy+ry*NVG_KAPPA90, cx+rx, cy, + NVG_BEZIERTO, cx+rx, cy-ry*NVG_KAPPA90, cx+rx*NVG_KAPPA90, cy-ry, cx, cy-ry, + NVG_BEZIERTO, cx-rx*NVG_KAPPA90, cy-ry, cx-rx, cy-ry*NVG_KAPPA90, cx-rx, cy, + NVG_CLOSE + }; + nvg__appendCommands(ctx, vals, NVG_COUNTOF(vals)); +} + +void nvgCircle(NVGcontext* ctx, float cx, float cy, float r) +{ + nvgEllipse(ctx, cx,cy, r,r); +} + +void nvgDebugDumpPathCache(NVGcontext* ctx) +{ + const NVGpath* path; + int i, j; + + printf("Dumping %d cached paths\n", ctx->cache->npaths); + for (i = 0; i < ctx->cache->npaths; i++) { + path = &ctx->cache->paths[i]; + printf(" - Path %d\n", i); + if (path->nfill) { + printf(" - fill: %d\n", path->nfill); + for (j = 0; j < path->nfill; j++) + printf("%f\t%f\n", path->fill[j].x, path->fill[j].y); + } + if (path->nstroke) { + printf(" - stroke: %d\n", path->nstroke); + for (j = 0; j < path->nstroke; j++) + printf("%f\t%f\n", path->stroke[j].x, path->stroke[j].y); + } + } +} + +void nvgFill(NVGcontext* ctx) +{ + NVGstate* state = nvg__getState(ctx); + const NVGpath* path; + NVGpaint fillPaint = state->fill; + int i; + + nvg__flattenPaths(ctx); + if (ctx->params.edgeAntiAlias && state->shapeAntiAlias) + nvg__expandFill(ctx, ctx->fringeWidth, NVG_MITER, 2.4f); + else + nvg__expandFill(ctx, 0.0f, NVG_MITER, 2.4f); + + // Apply global alpha + fillPaint.innerColor.a *= state->alpha; + fillPaint.outerColor.a *= state->alpha; + + ctx->params.renderFill(ctx->params.userPtr, &fillPaint, state->compositeOperation, &state->scissor, ctx->fringeWidth, + ctx->cache->bounds, ctx->cache->paths, ctx->cache->npaths); + + // Count triangles + for (i = 0; i < ctx->cache->npaths; i++) { + path = &ctx->cache->paths[i]; + ctx->fillTriCount += path->nfill-2; + ctx->fillTriCount += path->nstroke-2; + ctx->drawCallCount += 2; + } +} + +void nvgStroke(NVGcontext* ctx) +{ + NVGstate* state = nvg__getState(ctx); + float scale = nvg__getAverageScale(state->xform); + float strokeWidth = nvg__clampf(state->strokeWidth * scale, 0.0f, 200.0f); + NVGpaint strokePaint = state->stroke; + const NVGpath* path; + int i; + + + if (strokeWidth < ctx->fringeWidth) { + // If the stroke width is less than pixel size, use alpha to emulate coverage. + // Since coverage is area, scale by alpha*alpha. + float alpha = nvg__clampf(strokeWidth / ctx->fringeWidth, 0.0f, 1.0f); + strokePaint.innerColor.a *= alpha*alpha; + strokePaint.outerColor.a *= alpha*alpha; + strokeWidth = ctx->fringeWidth; + } + + // Apply global alpha + strokePaint.innerColor.a *= state->alpha; + strokePaint.outerColor.a *= state->alpha; + + nvg__flattenPaths(ctx); + + if (ctx->params.edgeAntiAlias && state->shapeAntiAlias) + nvg__expandStroke(ctx, strokeWidth*0.5f, ctx->fringeWidth, state->lineCap, state->lineJoin, state->miterLimit); + else + nvg__expandStroke(ctx, strokeWidth*0.5f, 0.0f, state->lineCap, state->lineJoin, state->miterLimit); + + ctx->params.renderStroke(ctx->params.userPtr, &strokePaint, state->compositeOperation, &state->scissor, ctx->fringeWidth, + strokeWidth, ctx->cache->paths, ctx->cache->npaths); + + // Count triangles + for (i = 0; i < ctx->cache->npaths; i++) { + path = &ctx->cache->paths[i]; + ctx->strokeTriCount += path->nstroke-2; + ctx->drawCallCount++; + } +} + +// Add fonts +int nvgCreateFont(NVGcontext* ctx, const char* name, const char* filename) +{ + return fonsAddFont(ctx->fs, name, filename, 0); +} + +int nvgCreateFontAtIndex(NVGcontext* ctx, const char* name, const char* filename, const int fontIndex) +{ + return fonsAddFont(ctx->fs, name, filename, fontIndex); +} + +int nvgCreateFontMem(NVGcontext* ctx, const char* name, unsigned char* data, int ndata, int freeData) +{ + return fonsAddFontMem(ctx->fs, name, data, ndata, freeData, 0); +} + +int nvgCreateFontMemAtIndex(NVGcontext* ctx, const char* name, unsigned char* data, int ndata, int freeData, const int fontIndex) +{ + return fonsAddFontMem(ctx->fs, name, data, ndata, freeData, fontIndex); +} + +int nvgFindFont(NVGcontext* ctx, const char* name) +{ + if (name == NULL) return -1; + return fonsGetFontByName(ctx->fs, name); +} + + +int nvgAddFallbackFontId(NVGcontext* ctx, int baseFont, int fallbackFont) +{ + if(baseFont == -1 || fallbackFont == -1) return 0; + return fonsAddFallbackFont(ctx->fs, baseFont, fallbackFont); +} + +int nvgAddFallbackFont(NVGcontext* ctx, const char* baseFont, const char* fallbackFont) +{ + return nvgAddFallbackFontId(ctx, nvgFindFont(ctx, baseFont), nvgFindFont(ctx, fallbackFont)); +} + +void nvgResetFallbackFontsId(NVGcontext* ctx, int baseFont) +{ + fonsResetFallbackFont(ctx->fs, baseFont); +} + +void nvgResetFallbackFonts(NVGcontext* ctx, const char* baseFont) +{ + nvgResetFallbackFontsId(ctx, nvgFindFont(ctx, baseFont)); +} + +// State setting +void nvgFontSize(NVGcontext* ctx, float size) +{ + NVGstate* state = nvg__getState(ctx); + state->fontSize = size; +} + +void nvgFontBlur(NVGcontext* ctx, float blur) +{ + NVGstate* state = nvg__getState(ctx); + state->fontBlur = blur; +} + +void nvgTextLetterSpacing(NVGcontext* ctx, float spacing) +{ + NVGstate* state = nvg__getState(ctx); + state->letterSpacing = spacing; +} + +void nvgTextLineHeight(NVGcontext* ctx, float lineHeight) +{ + NVGstate* state = nvg__getState(ctx); + state->lineHeight = lineHeight; +} + +void nvgTextAlign(NVGcontext* ctx, int align) +{ + NVGstate* state = nvg__getState(ctx); + state->textAlign = align; +} + +void nvgFontFaceId(NVGcontext* ctx, int font) +{ + NVGstate* state = nvg__getState(ctx); + state->fontId = font; +} + +void nvgFontFace(NVGcontext* ctx, const char* font) +{ + NVGstate* state = nvg__getState(ctx); + state->fontId = fonsGetFontByName(ctx->fs, font); +} + +static float nvg__quantize(float a, float d) +{ + return ((int)(a / d + 0.5f)) * d; +} + +static float nvg__getFontScale(NVGstate* state) +{ + return nvg__minf(nvg__quantize(nvg__getAverageScale(state->xform), 0.01f), 4.0f); +} + +static void nvg__flushTextTexture(NVGcontext* ctx) +{ + int dirty[4]; + + if (fonsValidateTexture(ctx->fs, dirty)) { + int fontImage = ctx->fontImages[ctx->fontImageIdx]; + // Update texture + if (fontImage != 0) { + int iw, ih; + const unsigned char* data = fonsGetTextureData(ctx->fs, &iw, &ih); + int x = dirty[0]; + int y = dirty[1]; + int w = dirty[2] - dirty[0]; + int h = dirty[3] - dirty[1]; + ctx->params.renderUpdateTexture(ctx->params.userPtr, fontImage, x,y, w,h, data); + } + } +} + +static int nvg__allocTextAtlas(NVGcontext* ctx) +{ + int iw, ih; + nvg__flushTextTexture(ctx); + if (ctx->fontImageIdx >= NVG_MAX_FONTIMAGES-1) + return 0; + // if next fontImage already have a texture + if (ctx->fontImages[ctx->fontImageIdx+1] != 0) + nvgImageSize(ctx, ctx->fontImages[ctx->fontImageIdx+1], &iw, &ih); + else { // calculate the new font image size and create it. + nvgImageSize(ctx, ctx->fontImages[ctx->fontImageIdx], &iw, &ih); + if (iw > ih) + ih *= 2; + else + iw *= 2; + if (iw > NVG_MAX_FONTIMAGE_SIZE || ih > NVG_MAX_FONTIMAGE_SIZE) + iw = ih = NVG_MAX_FONTIMAGE_SIZE; + ctx->fontImages[ctx->fontImageIdx+1] = ctx->params.renderCreateTexture(ctx->params.userPtr, NVG_TEXTURE_ALPHA, iw, ih, 0, NULL); + } + ++ctx->fontImageIdx; + fonsResetAtlas(ctx->fs, iw, ih); + return 1; +} + +static void nvg__renderText(NVGcontext* ctx, NVGvertex* verts, int nverts) +{ + NVGstate* state = nvg__getState(ctx); + NVGpaint paint = state->fill; + + // Render triangles. + paint.image = ctx->fontImages[ctx->fontImageIdx]; + + // Apply global alpha + paint.innerColor.a *= state->alpha; + paint.outerColor.a *= state->alpha; + + ctx->params.renderTriangles(ctx->params.userPtr, &paint, state->compositeOperation, &state->scissor, verts, nverts, ctx->fringeWidth); + + ctx->drawCallCount++; + ctx->textTriCount += nverts/3; +} + +float nvgText(NVGcontext* ctx, float x, float y, const char* string, const char* end) +{ + NVGstate* state = nvg__getState(ctx); + FONStextIter iter, prevIter; + FONSquad q; + NVGvertex* verts; + float scale = nvg__getFontScale(state) * ctx->devicePxRatio; + float invscale = 1.0f / scale; + int cverts = 0; + int nverts = 0; + + if (end == NULL) + end = string + strlen(string); + + if (state->fontId == FONS_INVALID) return x; + + fonsSetSize(ctx->fs, state->fontSize*scale); + fonsSetSpacing(ctx->fs, state->letterSpacing*scale); + fonsSetBlur(ctx->fs, state->fontBlur*scale); + fonsSetAlign(ctx->fs, state->textAlign); + fonsSetFont(ctx->fs, state->fontId); + + cverts = nvg__maxi(2, (int)(end - string)) * 6; // conservative estimate. + verts = nvg__allocTempVerts(ctx, cverts); + if (verts == NULL) return x; + + fonsTextIterInit(ctx->fs, &iter, x*scale, y*scale, string, end, FONS_GLYPH_BITMAP_REQUIRED); + prevIter = iter; + while (fonsTextIterNext(ctx->fs, &iter, &q)) { + float c[4*2]; + if (iter.prevGlyphIndex == -1) { // can not retrieve glyph? + if (nverts != 0) { + nvg__renderText(ctx, verts, nverts); + nverts = 0; + } + if (!nvg__allocTextAtlas(ctx)) + break; // no memory :( + iter = prevIter; + fonsTextIterNext(ctx->fs, &iter, &q); // try again + if (iter.prevGlyphIndex == -1) // still can not find glyph? + break; + } + prevIter = iter; + // Transform corners. + nvgTransformPoint(&c[0],&c[1], state->xform, q.x0*invscale, q.y0*invscale); + nvgTransformPoint(&c[2],&c[3], state->xform, q.x1*invscale, q.y0*invscale); + nvgTransformPoint(&c[4],&c[5], state->xform, q.x1*invscale, q.y1*invscale); + nvgTransformPoint(&c[6],&c[7], state->xform, q.x0*invscale, q.y1*invscale); + // Create triangles + if (nverts+6 <= cverts) { + nvg__vset(&verts[nverts], c[0], c[1], q.s0, q.t0); nverts++; + nvg__vset(&verts[nverts], c[4], c[5], q.s1, q.t1); nverts++; + nvg__vset(&verts[nverts], c[2], c[3], q.s1, q.t0); nverts++; + nvg__vset(&verts[nverts], c[0], c[1], q.s0, q.t0); nverts++; + nvg__vset(&verts[nverts], c[6], c[7], q.s0, q.t1); nverts++; + nvg__vset(&verts[nverts], c[4], c[5], q.s1, q.t1); nverts++; + } + } + + // TODO: add back-end bit to do this just once per frame. + nvg__flushTextTexture(ctx); + + nvg__renderText(ctx, verts, nverts); + + return iter.nextx / scale; +} + +void nvgTextBox(NVGcontext* ctx, float x, float y, float breakRowWidth, const char* string, const char* end) +{ + NVGstate* state = nvg__getState(ctx); + NVGtextRow rows[2]; + int nrows = 0, i; + int oldAlign = state->textAlign; + int haling = state->textAlign & (NVG_ALIGN_LEFT | NVG_ALIGN_CENTER | NVG_ALIGN_RIGHT); + int valign = state->textAlign & (NVG_ALIGN_TOP | NVG_ALIGN_MIDDLE | NVG_ALIGN_BOTTOM | NVG_ALIGN_BASELINE); + float lineh = 0; + + if (state->fontId == FONS_INVALID) return; + + nvgTextMetrics(ctx, NULL, NULL, &lineh); + + state->textAlign = NVG_ALIGN_LEFT | valign; + + while ((nrows = nvgTextBreakLines(ctx, string, end, breakRowWidth, rows, 2))) { + for (i = 0; i < nrows; i++) { + NVGtextRow* row = &rows[i]; + if (haling & NVG_ALIGN_LEFT) + nvgText(ctx, x, y, row->start, row->end); + else if (haling & NVG_ALIGN_CENTER) + nvgText(ctx, x + breakRowWidth*0.5f - row->width*0.5f, y, row->start, row->end); + else if (haling & NVG_ALIGN_RIGHT) + nvgText(ctx, x + breakRowWidth - row->width, y, row->start, row->end); + y += lineh * state->lineHeight; + } + string = rows[nrows-1].next; + } + + state->textAlign = oldAlign; +} + +int nvgTextGlyphPositions(NVGcontext* ctx, float x, float y, const char* string, const char* end, NVGglyphPosition* positions, int maxPositions) +{ + NVGstate* state = nvg__getState(ctx); + float scale = nvg__getFontScale(state) * ctx->devicePxRatio; + float invscale = 1.0f / scale; + FONStextIter iter, prevIter; + FONSquad q; + int npos = 0; + + if (state->fontId == FONS_INVALID) return 0; + + if (end == NULL) + end = string + strlen(string); + + if (string == end) + return 0; + + fonsSetSize(ctx->fs, state->fontSize*scale); + fonsSetSpacing(ctx->fs, state->letterSpacing*scale); + fonsSetBlur(ctx->fs, state->fontBlur*scale); + fonsSetAlign(ctx->fs, state->textAlign); + fonsSetFont(ctx->fs, state->fontId); + + fonsTextIterInit(ctx->fs, &iter, x*scale, y*scale, string, end, FONS_GLYPH_BITMAP_OPTIONAL); + prevIter = iter; + while (fonsTextIterNext(ctx->fs, &iter, &q)) { + if (iter.prevGlyphIndex < 0 && nvg__allocTextAtlas(ctx)) { // can not retrieve glyph? + iter = prevIter; + fonsTextIterNext(ctx->fs, &iter, &q); // try again + } + prevIter = iter; + positions[npos].str = iter.str; + positions[npos].x = iter.x * invscale; + positions[npos].minx = nvg__minf(iter.x, q.x0) * invscale; + positions[npos].maxx = nvg__maxf(iter.nextx, q.x1) * invscale; + npos++; + if (npos >= maxPositions) + break; + } + + return npos; +} + +enum NVGcodepointType { + NVG_SPACE, + NVG_NEWLINE, + NVG_CHAR, + NVG_CJK_CHAR, +}; + +int nvgTextBreakLines(NVGcontext* ctx, const char* string, const char* end, float breakRowWidth, NVGtextRow* rows, int maxRows) +{ + NVGstate* state = nvg__getState(ctx); + float scale = nvg__getFontScale(state) * ctx->devicePxRatio; + float invscale = 1.0f / scale; + FONStextIter iter, prevIter; + FONSquad q; + int nrows = 0; + float rowStartX = 0; + float rowWidth = 0; + float rowMinX = 0; + float rowMaxX = 0; + const char* rowStart = NULL; + const char* rowEnd = NULL; + const char* wordStart = NULL; + float wordStartX = 0; + float wordMinX = 0; + const char* breakEnd = NULL; + float breakWidth = 0; + float breakMaxX = 0; + int type = NVG_SPACE, ptype = NVG_SPACE; + unsigned int pcodepoint = 0; + + if (maxRows == 0) return 0; + if (state->fontId == FONS_INVALID) return 0; + + if (end == NULL) + end = string + strlen(string); + + if (string == end) return 0; + + fonsSetSize(ctx->fs, state->fontSize*scale); + fonsSetSpacing(ctx->fs, state->letterSpacing*scale); + fonsSetBlur(ctx->fs, state->fontBlur*scale); + fonsSetAlign(ctx->fs, state->textAlign); + fonsSetFont(ctx->fs, state->fontId); + + breakRowWidth *= scale; + + fonsTextIterInit(ctx->fs, &iter, 0, 0, string, end, FONS_GLYPH_BITMAP_OPTIONAL); + prevIter = iter; + while (fonsTextIterNext(ctx->fs, &iter, &q)) { + if (iter.prevGlyphIndex < 0 && nvg__allocTextAtlas(ctx)) { // can not retrieve glyph? + iter = prevIter; + fonsTextIterNext(ctx->fs, &iter, &q); // try again + } + prevIter = iter; + switch (iter.codepoint) { + case 9: // \t + case 11: // \v + case 12: // \f + case 32: // space + case 0x00a0: // NBSP + type = NVG_SPACE; + break; + case 10: // \n + type = pcodepoint == 13 ? NVG_SPACE : NVG_NEWLINE; + break; + case 13: // \r + type = pcodepoint == 10 ? NVG_SPACE : NVG_NEWLINE; + break; + case 0x0085: // NEL + type = NVG_NEWLINE; + break; + default: + if ((iter.codepoint >= 0x4E00 && iter.codepoint <= 0x9FFF) || + (iter.codepoint >= 0x3000 && iter.codepoint <= 0x30FF) || + (iter.codepoint >= 0xFF00 && iter.codepoint <= 0xFFEF) || + (iter.codepoint >= 0x1100 && iter.codepoint <= 0x11FF) || + (iter.codepoint >= 0x3130 && iter.codepoint <= 0x318F) || + (iter.codepoint >= 0xAC00 && iter.codepoint <= 0xD7AF)) + type = NVG_CJK_CHAR; + else + type = NVG_CHAR; + break; + } + + if (type == NVG_NEWLINE) { + // Always handle new lines. + rows[nrows].start = rowStart != NULL ? rowStart : iter.str; + rows[nrows].end = rowEnd != NULL ? rowEnd : iter.str; + rows[nrows].width = rowWidth * invscale; + rows[nrows].minx = rowMinX * invscale; + rows[nrows].maxx = rowMaxX * invscale; + rows[nrows].next = iter.next; + nrows++; + if (nrows >= maxRows) + return nrows; + // Set null break point + breakEnd = rowStart; + breakWidth = 0.0; + breakMaxX = 0.0; + // Indicate to skip the white space at the beginning of the row. + rowStart = NULL; + rowEnd = NULL; + rowWidth = 0; + rowMinX = rowMaxX = 0; + } else { + if (rowStart == NULL) { + // Skip white space until the beginning of the line + if (type == NVG_CHAR || type == NVG_CJK_CHAR) { + // The current char is the row so far + rowStartX = iter.x; + rowStart = iter.str; + rowEnd = iter.next; + rowWidth = iter.nextx - rowStartX; + rowMinX = q.x0 - rowStartX; + rowMaxX = q.x1 - rowStartX; + wordStart = iter.str; + wordStartX = iter.x; + wordMinX = q.x0 - rowStartX; + // Set null break point + breakEnd = rowStart; + breakWidth = 0.0; + breakMaxX = 0.0; + } + } else { + float nextWidth = iter.nextx - rowStartX; + + // track last non-white space character + if (type == NVG_CHAR || type == NVG_CJK_CHAR) { + rowEnd = iter.next; + rowWidth = iter.nextx - rowStartX; + rowMaxX = q.x1 - rowStartX; + } + // track last end of a word + if (((ptype == NVG_CHAR || ptype == NVG_CJK_CHAR) && type == NVG_SPACE) || type == NVG_CJK_CHAR) { + breakEnd = iter.str; + breakWidth = rowWidth; + breakMaxX = rowMaxX; + } + // track last beginning of a word + if ((ptype == NVG_SPACE && (type == NVG_CHAR || type == NVG_CJK_CHAR)) || type == NVG_CJK_CHAR) { + wordStart = iter.str; + wordStartX = iter.x; + wordMinX = q.x0; + } + + // Break to new line when a character is beyond break width. + if ((type == NVG_CHAR || type == NVG_CJK_CHAR) && nextWidth > breakRowWidth) { + // The run length is too long, need to break to new line. + if (breakEnd == rowStart) { + // The current word is longer than the row length, just break it from here. + rows[nrows].start = rowStart; + rows[nrows].end = iter.str; + rows[nrows].width = rowWidth * invscale; + rows[nrows].minx = rowMinX * invscale; + rows[nrows].maxx = rowMaxX * invscale; + rows[nrows].next = iter.str; + nrows++; + if (nrows >= maxRows) + return nrows; + rowStartX = iter.x; + rowStart = iter.str; + rowEnd = iter.next; + rowWidth = iter.nextx - rowStartX; + rowMinX = q.x0 - rowStartX; + rowMaxX = q.x1 - rowStartX; + wordStart = iter.str; + wordStartX = iter.x; + wordMinX = q.x0 - rowStartX; + } else { + // Break the line from the end of the last word, and start new line from the beginning of the new. + rows[nrows].start = rowStart; + rows[nrows].end = breakEnd; + rows[nrows].width = breakWidth * invscale; + rows[nrows].minx = rowMinX * invscale; + rows[nrows].maxx = breakMaxX * invscale; + rows[nrows].next = wordStart; + nrows++; + if (nrows >= maxRows) + return nrows; + // Update row + rowStartX = wordStartX; + rowStart = wordStart; + rowEnd = iter.next; + rowWidth = iter.nextx - rowStartX; + rowMinX = wordMinX - rowStartX; + rowMaxX = q.x1 - rowStartX; + } + // Set null break point + breakEnd = rowStart; + breakWidth = 0.0; + breakMaxX = 0.0; + } + } + } + + pcodepoint = iter.codepoint; + ptype = type; + } + + // Break the line from the end of the last word, and start new line from the beginning of the new. + if (rowStart != NULL) { + rows[nrows].start = rowStart; + rows[nrows].end = rowEnd; + rows[nrows].width = rowWidth * invscale; + rows[nrows].minx = rowMinX * invscale; + rows[nrows].maxx = rowMaxX * invscale; + rows[nrows].next = end; + nrows++; + } + + return nrows; +} + +float nvgTextBounds(NVGcontext* ctx, float x, float y, const char* string, const char* end, float* bounds) +{ + NVGstate* state = nvg__getState(ctx); + float scale = nvg__getFontScale(state) * ctx->devicePxRatio; + float invscale = 1.0f / scale; + float width; + + if (state->fontId == FONS_INVALID) return 0; + + fonsSetSize(ctx->fs, state->fontSize*scale); + fonsSetSpacing(ctx->fs, state->letterSpacing*scale); + fonsSetBlur(ctx->fs, state->fontBlur*scale); + fonsSetAlign(ctx->fs, state->textAlign); + fonsSetFont(ctx->fs, state->fontId); + + width = fonsTextBounds(ctx->fs, x*scale, y*scale, string, end, bounds); + if (bounds != NULL) { + // Use line bounds for height. + fonsLineBounds(ctx->fs, y*scale, &bounds[1], &bounds[3]); + bounds[0] *= invscale; + bounds[1] *= invscale; + bounds[2] *= invscale; + bounds[3] *= invscale; + } + return width * invscale; +} + +void nvgTextBoxBounds(NVGcontext* ctx, float x, float y, float breakRowWidth, const char* string, const char* end, float* bounds) +{ + NVGstate* state = nvg__getState(ctx); + NVGtextRow rows[2]; + float scale = nvg__getFontScale(state) * ctx->devicePxRatio; + float invscale = 1.0f / scale; + int nrows = 0, i; + int oldAlign = state->textAlign; + int haling = state->textAlign & (NVG_ALIGN_LEFT | NVG_ALIGN_CENTER | NVG_ALIGN_RIGHT); + int valign = state->textAlign & (NVG_ALIGN_TOP | NVG_ALIGN_MIDDLE | NVG_ALIGN_BOTTOM | NVG_ALIGN_BASELINE); + float lineh = 0, rminy = 0, rmaxy = 0; + float minx, miny, maxx, maxy; + + if (state->fontId == FONS_INVALID) { + if (bounds != NULL) + bounds[0] = bounds[1] = bounds[2] = bounds[3] = 0.0f; + return; + } + + nvgTextMetrics(ctx, NULL, NULL, &lineh); + + state->textAlign = NVG_ALIGN_LEFT | valign; + + minx = maxx = x; + miny = maxy = y; + + fonsSetSize(ctx->fs, state->fontSize*scale); + fonsSetSpacing(ctx->fs, state->letterSpacing*scale); + fonsSetBlur(ctx->fs, state->fontBlur*scale); + fonsSetAlign(ctx->fs, state->textAlign); + fonsSetFont(ctx->fs, state->fontId); + fonsLineBounds(ctx->fs, 0, &rminy, &rmaxy); + rminy *= invscale; + rmaxy *= invscale; + + while ((nrows = nvgTextBreakLines(ctx, string, end, breakRowWidth, rows, 2))) { + for (i = 0; i < nrows; i++) { + NVGtextRow* row = &rows[i]; + float rminx, rmaxx, dx = 0; + // Horizontal bounds + if (haling & NVG_ALIGN_LEFT) + dx = 0; + else if (haling & NVG_ALIGN_CENTER) + dx = breakRowWidth*0.5f - row->width*0.5f; + else if (haling & NVG_ALIGN_RIGHT) + dx = breakRowWidth - row->width; + rminx = x + row->minx + dx; + rmaxx = x + row->maxx + dx; + minx = nvg__minf(minx, rminx); + maxx = nvg__maxf(maxx, rmaxx); + // Vertical bounds. + miny = nvg__minf(miny, y + rminy); + maxy = nvg__maxf(maxy, y + rmaxy); + + y += lineh * state->lineHeight; + } + string = rows[nrows-1].next; + } + + state->textAlign = oldAlign; + + if (bounds != NULL) { + bounds[0] = minx; + bounds[1] = miny; + bounds[2] = maxx; + bounds[3] = maxy; + } +} + +void nvgTextMetrics(NVGcontext* ctx, float* ascender, float* descender, float* lineh) +{ + NVGstate* state = nvg__getState(ctx); + float scale = nvg__getFontScale(state) * ctx->devicePxRatio; + float invscale = 1.0f / scale; + + if (state->fontId == FONS_INVALID) return; + + fonsSetSize(ctx->fs, state->fontSize*scale); + fonsSetSpacing(ctx->fs, state->letterSpacing*scale); + fonsSetBlur(ctx->fs, state->fontBlur*scale); + fonsSetAlign(ctx->fs, state->textAlign); + fonsSetFont(ctx->fs, state->fontId); + + fonsVertMetrics(ctx->fs, ascender, descender, lineh); + if (ascender != NULL) + *ascender *= invscale; + if (descender != NULL) + *descender *= invscale; + if (lineh != NULL) + *lineh *= invscale; +} +// vim: ft=c nu noet ts=4 diff --git a/troposphere/daybreak/source/ams_su.c b/troposphere/daybreak/source/ams_su.c new file mode 100644 index 000000000..a5ef3282b --- /dev/null +++ b/troposphere/daybreak/source/ams_su.c @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include "ams_su.h" +#include "service_guard.h" + +static Service g_amssuSrv; +static TransferMemory g_tmem; + +NX_GENERATE_SERVICE_GUARD(amssu); + +Result _amssuInitialize(void) { + return smGetService(&g_amssuSrv, "ams:su"); +} + +void _amssuCleanup(void) { + serviceClose(&g_amssuSrv); + tmemClose(&g_tmem); +} + +Service *amssuGetServiceSession(void) { + return &g_amssuSrv; +} + +Result amssuGetUpdateInformation(AmsSuUpdateInformation *out, const char *path) { + char send_path[FS_MAX_PATH] = {0}; + strncpy(send_path, path, FS_MAX_PATH-1); + send_path[FS_MAX_PATH-1] = 0; + + return serviceDispatchOut(&g_amssuSrv, 0, *out, + .buffer_attrs = { SfBufferAttr_In | SfBufferAttr_HipcPointer | SfBufferAttr_FixedSize }, + .buffers = { { send_path, FS_MAX_PATH } }, + ); +} + +Result amssuValidateUpdate(AmsSuUpdateValidationInfo *out, const char *path) { + char send_path[FS_MAX_PATH] = {0}; + strncpy(send_path, path, FS_MAX_PATH-1); + send_path[FS_MAX_PATH-1] = 0; + + return serviceDispatchOut(&g_amssuSrv, 1, *out, + .buffer_attrs = { SfBufferAttr_In | SfBufferAttr_HipcPointer | SfBufferAttr_FixedSize }, + .buffers = { { send_path, FS_MAX_PATH } }, + ); +} + +Result amssuSetupUpdate(void *buffer, size_t size, const char *path, bool exfat) { + Result rc = 0; + + if (buffer == NULL) { + rc = tmemCreate(&g_tmem, size, Perm_None); + } else { + rc = tmemCreateFromMemory(&g_tmem, buffer, size, Perm_None); + } + if (R_FAILED(rc)) return rc; + + char send_path[FS_MAX_PATH] = {0}; + strncpy(send_path, path, FS_MAX_PATH-1); + send_path[FS_MAX_PATH-1] = 0; + + const struct { + u8 exfat; + u64 size; + } in = { exfat, g_tmem.size }; + + rc = serviceDispatchIn(&g_amssuSrv, 2, in, + .in_num_handles = 1, + .in_handles = { g_tmem.handle }, + .buffer_attrs = { SfBufferAttr_In | SfBufferAttr_HipcPointer | SfBufferAttr_FixedSize }, + .buffers = { { send_path, FS_MAX_PATH } }, + ); + if (R_FAILED((rc))) { + tmemClose(&g_tmem); + } + + return rc; +} + +Result amssuSetupUpdateWithVariation(void *buffer, size_t size, const char *path, bool exfat, u32 variation) { + Result rc = 0; + + if (buffer == NULL) { + rc = tmemCreate(&g_tmem, size, Perm_None); + } else { + rc = tmemCreateFromMemory(&g_tmem, buffer, size, Perm_None); + } + if (R_FAILED(rc)) return rc; + + char send_path[FS_MAX_PATH] = {0}; + strncpy(send_path, path, FS_MAX_PATH-1); + send_path[FS_MAX_PATH-1] = 0; + + const struct { + u8 exfat; + u32 variation; + u64 size; + } in = { exfat, variation, g_tmem.size }; + + rc = serviceDispatchIn(&g_amssuSrv, 3, in, + .in_num_handles = 1, + .in_handles = { g_tmem.handle }, + .buffer_attrs = { SfBufferAttr_In | SfBufferAttr_HipcPointer | SfBufferAttr_FixedSize }, + .buffers = { { send_path, FS_MAX_PATH } }, + ); + if (R_FAILED((rc))) { + tmemClose(&g_tmem); + } + + return rc; +} + +Result amssuRequestPrepareUpdate(AsyncResult *a) { + memset(a, 0, sizeof(*a)); + + Handle event = INVALID_HANDLE; + Result rc = serviceDispatch(&g_amssuSrv, 4, + .out_num_objects = 1, + .out_objects = &a->s, + .out_handle_attrs = { SfOutHandleAttr_HipcCopy }, + .out_handles = &event, + ); + + if (R_SUCCEEDED(rc)) + eventLoadRemote(&a->event, event, false); + + return rc; +} + +Result amssuGetPrepareUpdateProgress(NsSystemUpdateProgress *out) { + return serviceDispatchOut(&g_amssuSrv, 5, *out); +} + +Result amssuHasPreparedUpdate(bool *out) { + u8 outval = 0; + Result rc = serviceDispatchOut(&g_amssuSrv, 6, outval); + if (R_SUCCEEDED(rc)) { + if (out) *out = outval & 1; + } + return rc; +} + +Result amssuApplyPreparedUpdate() { + return serviceDispatch(&g_amssuSrv, 7); +} \ No newline at end of file diff --git a/troposphere/daybreak/source/ams_su.h b/troposphere/daybreak/source/ams_su.h new file mode 100644 index 000000000..35ef62146 --- /dev/null +++ b/troposphere/daybreak/source/ams_su.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + u32 version; + bool exfat_supported; + u32 num_firmware_variations; + u32 firmware_variation_ids[16]; +} AmsSuUpdateInformation; + +typedef struct { + Result result; + Result exfat_result; + NcmContentMetaKey invalid_key; + NcmContentId invalid_content_id; +} AmsSuUpdateValidationInfo; + +Result amssuInitialize(); +void amssuExit(); +Service *amssuGetServiceSession(void); + +Result amssuGetUpdateInformation(AmsSuUpdateInformation *out, const char *path); +Result amssuValidateUpdate(AmsSuUpdateValidationInfo *out, const char *path); +Result amssuSetupUpdate(void *buffer, size_t size, const char *path, bool exfat); +Result amssuSetupUpdateWithVariation(void *buffer, size_t size, const char *path, bool exfat, u32 variation); +Result amssuRequestPrepareUpdate(AsyncResult *a); +Result amssuGetPrepareUpdateProgress(NsSystemUpdateProgress *out); +Result amssuHasPreparedUpdate(bool *out); +Result amssuApplyPreparedUpdate(); + +#ifdef __cplusplus +} +#endif \ No newline at end of file diff --git a/troposphere/daybreak/source/assert.hpp b/troposphere/daybreak/source/assert.hpp new file mode 100644 index 000000000..1b8ed4d5f --- /dev/null +++ b/troposphere/daybreak/source/assert.hpp @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2020 Adubbz + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +#include + +#define DBK_ABORT_UNLESS(expr) \ + if (!static_cast(expr)) { \ + std::abort(); \ + } diff --git a/troposphere/daybreak/source/main.cpp b/troposphere/daybreak/source/main.cpp new file mode 100644 index 000000000..0eb1d04bc --- /dev/null +++ b/troposphere/daybreak/source/main.cpp @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2020 Adubbz + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include +#include +#include "ui.hpp" +#include "ams_su.h" + +extern "C" { + + void userAppInit(void) { + Result rc = 0; + + if (R_FAILED(rc = romfsInit())) { + fatalThrow(rc); + } + + if (R_FAILED(rc = spsmInitialize())) { + fatalThrow(rc); + } + + if (R_FAILED(rc = plInitialize(PlServiceType_User))) { + fatalThrow(rc); + } + + if (R_FAILED(rc = splInitialize())) { + fatalThrow(rc); + } + + if (R_FAILED(rc = nsInitialize())) { + fatalThrow(rc); + } + + if (R_FAILED(rc = hiddbgInitialize())) { + fatalThrow(rc); + } + + } + + void userAppExit(void) { + hiddbgExit(); + nsExit(); + splExit(); + plExit(); + spsmExit(); + romfsExit(); + amssuExit(); + } + +} + +namespace { + + static constexpr u32 FramebufferWidth = 1280; + static constexpr u32 FramebufferHeight = 720; + +} + +class Daybreak : public CApplication { + private: + static constexpr unsigned NumFramebuffers = 2; + static constexpr unsigned StaticCmdSize = 0x1000; + + dk::UniqueDevice m_device; + dk::UniqueQueue m_queue; + dk::UniqueSwapchain m_swapchain; + + std::optional m_pool_images; + std::optional m_pool_code; + std::optional m_pool_data; + + dk::UniqueCmdBuf m_cmd_buf; + DkCmdList m_render_cmdlist; + + dk::Image m_depth_buffer; + CMemPool::Handle m_depth_buffer_mem; + dk::Image m_framebuffers[NumFramebuffers]; + CMemPool::Handle m_framebuffers_mem[NumFramebuffers]; + DkCmdList m_framebuffer_cmdlists[NumFramebuffers]; + + std::optional m_renderer; + NVGcontext *m_vg; + int m_standard_font; + public: + Daybreak() { + Result rc = 0; + + /* Create the deko3d device. */ + m_device = dk::DeviceMaker{}.create(); + + /* Create the main queue. */ + m_queue = dk::QueueMaker{m_device}.setFlags(DkQueueFlags_Graphics).create(); + + /* Create the memory pools. */ + m_pool_images.emplace(m_device, DkMemBlockFlags_GpuCached | DkMemBlockFlags_Image, 16*1024*1024); + m_pool_code.emplace(m_device, DkMemBlockFlags_CpuUncached | DkMemBlockFlags_GpuCached | DkMemBlockFlags_Code, 128*1024); + m_pool_data.emplace(m_device, DkMemBlockFlags_CpuUncached | DkMemBlockFlags_GpuCached, 1*1024*1024); + + /* Create the static command buffer and feed it freshly allocated memory. */ + m_cmd_buf = dk::CmdBufMaker{m_device}.create(); + CMemPool::Handle cmdmem = m_pool_data->allocate(StaticCmdSize); + m_cmd_buf.addMemory(cmdmem.getMemBlock(), cmdmem.getOffset(), cmdmem.getSize()); + + /* Create the framebuffer resources. */ + this->CreateFramebufferResources(); + + m_renderer.emplace(FramebufferWidth, FramebufferHeight, m_device, m_queue, *m_pool_images, *m_pool_code, *m_pool_data); + m_vg = nvgCreateDk(&*m_renderer, NVG_ANTIALIAS | NVG_STENCIL_STROKES); + + + PlFontData font; + if (R_FAILED(rc = plGetSharedFontByType(&font, PlSharedFontType_Standard))) { + fatalThrow(rc); + } + + m_standard_font = nvgCreateFontMem(m_vg, "switch-standard", static_cast(font.address), font.size, 0); + } + + ~Daybreak() { + /* Destroy the framebuffer resources. This should be done first. */ + this->DestroyFramebufferResources(); + + /* Cleanup vg. */ + nvgDeleteDk(m_vg); + + /* Destroy the renderer. */ + m_renderer.reset(); + } + private: + void CreateFramebufferResources() { + /* Create layout for the depth buffer. */ + dk::ImageLayout layout_depth_buffer; + dk::ImageLayoutMaker{m_device} + .setFlags(DkImageFlags_UsageRender | DkImageFlags_HwCompression) + .setFormat(DkImageFormat_S8) + .setDimensions(FramebufferWidth, FramebufferHeight) + .initialize(layout_depth_buffer); + + /* Create the depth buffer. */ + m_depth_buffer_mem = m_pool_images->allocate(layout_depth_buffer.getSize(), layout_depth_buffer.getAlignment()); + m_depth_buffer.initialize(layout_depth_buffer, m_depth_buffer_mem.getMemBlock(), m_depth_buffer_mem.getOffset()); + + /* Create layout for the framebuffers. */ + dk::ImageLayout layout_framebuffer; + dk::ImageLayoutMaker{m_device} + .setFlags(DkImageFlags_UsageRender | DkImageFlags_UsagePresent | DkImageFlags_HwCompression) + .setFormat(DkImageFormat_RGBA8_Unorm) + .setDimensions(FramebufferWidth, FramebufferHeight) + .initialize(layout_framebuffer); + + /* Create the framebuffers. */ + std::array fb_array; + const u64 fb_size = layout_framebuffer.getSize(); + const u32 fb_align = layout_framebuffer.getAlignment(); + + for (unsigned int i = 0; i < NumFramebuffers; i++) { + /* Allocate a framebuffer. */ + m_framebuffers_mem[i] = m_pool_images->allocate(fb_size, fb_align); + m_framebuffers[i].initialize(layout_framebuffer, m_framebuffers_mem[i].getMemBlock(), m_framebuffers_mem[i].getOffset()); + + /* Generate a command list that binds it. */ + dk::ImageView color_target{ m_framebuffers[i] }, depth_target{ m_depth_buffer }; + m_cmd_buf.bindRenderTargets(&color_target, &depth_target); + m_framebuffer_cmdlists[i] = m_cmd_buf.finishList(); + + /* Fill in the array for use later by the swapchain creation code. */ + fb_array[i] = &m_framebuffers[i]; + } + + /* Create the swapchain using the framebuffers. */ + m_swapchain = dk::SwapchainMaker{m_device, nwindowGetDefault(), fb_array}.create(); + + /* Generate the main rendering cmdlist. */ + this->RecordStaticCommands(); + } + + void DestroyFramebufferResources() { + /* Return early if we have nothing to destroy. */ + if (!m_swapchain) return; + + /* Make sure the queue is idle before destroying anything. */ + m_queue.waitIdle(); + + /* Clear the static cmdbuf, destroying the static cmdlists in the process. */ + m_cmd_buf.clear(); + + /* Destroy the swapchain. */ + m_swapchain.destroy(); + + /* Destroy the framebuffers. */ + for (unsigned int i = 0; i < NumFramebuffers; i ++) { + m_framebuffers_mem[i].destroy(); + } + + /* Destroy the depth buffer. */ + m_depth_buffer_mem.destroy(); + } + + void RecordStaticCommands() { + /* Initialize state structs with deko3d defaults. */ + dk::RasterizerState rasterizer_state; + dk::ColorState color_state; + dk::ColorWriteState color_write_state; + + /* Configure the viewport and scissor. */ + m_cmd_buf.setViewports(0, { { 0.0f, 0.0f, FramebufferWidth, FramebufferHeight, 0.0f, 1.0f } }); + m_cmd_buf.setScissors(0, { { 0, 0, FramebufferWidth, FramebufferHeight } }); + + /* Clear the color and depth buffers. */ + m_cmd_buf.clearColor(0, DkColorMask_RGBA, 0.f, 0.f, 0.f, 1.0f); + m_cmd_buf.clearDepthStencil(true, 1.0f, 0xFF, 0); + + /* Bind required state. */ + m_cmd_buf.bindRasterizerState(rasterizer_state); + m_cmd_buf.bindColorState(color_state); + m_cmd_buf.bindColorWriteState(color_write_state); + + m_render_cmdlist = m_cmd_buf.finishList(); + } + + void Render(u64 ns) { + /* Acquire a framebuffer from the swapchain (and wait for it to be available). */ + int slot = m_queue.acquireImage(m_swapchain); + + /* Run the command list that attaches said framebuffer to the queue. */ + m_queue.submitCommands(m_framebuffer_cmdlists[slot]); + + /* Run the main rendering command list. */ + m_queue.submitCommands(m_render_cmdlist); + + nvgBeginFrame(m_vg, FramebufferWidth, FramebufferHeight, 1.0f); + dbk::RenderMenu(m_vg, ns); + nvgEndFrame(m_vg); + + /* Now that we are done rendering, present it to the screen. */ + m_queue.presentImage(m_swapchain, slot); + } + + public: + bool onFrame(u64 ns) override { + dbk::UpdateMenu(ns); + this->Render(ns); + return !dbk::IsExitRequested(); + } +}; + +int main(int argc, char **argv) { + /* Initialize the menu. */ + dbk::InitializeMenu(FramebufferWidth, FramebufferHeight); + + Daybreak daybreak; + daybreak.run(); + return 0; +} diff --git a/troposphere/daybreak/source/service_guard.h b/troposphere/daybreak/source/service_guard.h new file mode 100644 index 000000000..5fbc5fca9 --- /dev/null +++ b/troposphere/daybreak/source/service_guard.h @@ -0,0 +1,56 @@ +#pragma once +#include +#include +#include +#include +#include + +typedef struct ServiceGuard { + Mutex mutex; + u32 refCount; +} ServiceGuard; + +NX_INLINE bool serviceGuardBeginInit(ServiceGuard* g) +{ + mutexLock(&g->mutex); + return (g->refCount++) == 0; +} + +NX_INLINE Result serviceGuardEndInit(ServiceGuard* g, Result rc, void (*cleanupFunc)(void)) +{ + if (R_FAILED(rc)) { + cleanupFunc(); + --g->refCount; + } + mutexUnlock(&g->mutex); + return rc; +} + +NX_INLINE void serviceGuardExit(ServiceGuard* g, void (*cleanupFunc)(void)) +{ + mutexLock(&g->mutex); + if (g->refCount && (--g->refCount) == 0) + cleanupFunc(); + mutexUnlock(&g->mutex); +} + +#define NX_GENERATE_SERVICE_GUARD_PARAMS(name, _paramdecl, _parampass) \ +\ +static ServiceGuard g_##name##Guard; \ +NX_INLINE Result _##name##Initialize _paramdecl; \ +static void _##name##Cleanup(void); \ +\ +Result name##Initialize _paramdecl \ +{ \ + Result rc = 0; \ + if (serviceGuardBeginInit(&g_##name##Guard)) \ + rc = _##name##Initialize _parampass; \ + return serviceGuardEndInit(&g_##name##Guard, rc, _##name##Cleanup); \ +} \ +\ +void name##Exit(void) \ +{ \ + serviceGuardExit(&g_##name##Guard, _##name##Cleanup); \ +} + +#define NX_GENERATE_SERVICE_GUARD(name) NX_GENERATE_SERVICE_GUARD_PARAMS(name, (void), ()) \ No newline at end of file diff --git a/troposphere/daybreak/source/ui.cpp b/troposphere/daybreak/source/ui.cpp new file mode 100644 index 000000000..dfbd4151e --- /dev/null +++ b/troposphere/daybreak/source/ui.cpp @@ -0,0 +1,1265 @@ +/* + * Copyright (c) 2020 Adubbz + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include +#include +#include +#include +#include "ui.hpp" +#include "ui_util.hpp" +#include "assert.hpp" + +namespace dbk { + + namespace { + + static constexpr u32 ExosphereApiVersionConfigItem = 65000; + static constexpr u32 ExosphereHasRcmBugPatch = 65004; + static constexpr u32 ExosphereEmummcType = 65007; + + /* Insets of content within windows. */ + static constexpr float HorizontalInset = 20.0f; + static constexpr float BottomInset = 20.0f; + + /* Insets of content within text areas. */ + static constexpr float TextHorizontalInset = 8.0f; + static constexpr float TextVerticalInset = 8.0f; + + static constexpr float ButtonHeight = 60.0f; + static constexpr float ButtonHorizontalGap = 10.0f; + + static constexpr float VerticalGap = 10.0f; + + + u32 g_screen_width; + u32 g_screen_height; + + std::shared_ptr g_current_menu; + bool g_initialized = false; + bool g_exit_requested = false; + + u32 g_prev_touch_count = -1; + touchPosition g_start_touch_position; + bool g_started_touching = false; + bool g_tapping = false; + bool g_touches_moving = false; + bool g_finished_touching = false; + + /* Update install state. */ + char g_update_path[FS_MAX_PATH]; + bool g_reset_to_factory = false; + bool g_exfat_supported = false; + bool g_use_exfat = false; + + constexpr u32 MaxTapMovement = 20; + + void UpdateInput() { + /* Update the previous touch count. */ + g_prev_touch_count = hidTouchCount(); + + /* Scan for input and update touch state. */ + hidScanInput(); + const u32 touch_count = hidTouchCount(); + + if (g_prev_touch_count == 0 && touch_count > 0) { + hidTouchRead(&g_start_touch_position, 0); + g_started_touching = true; + g_tapping = true; + } else { + g_started_touching = false; + } + + if (g_prev_touch_count > 0 && touch_count == 0) { + g_finished_touching = true; + g_tapping = false; + } else { + g_finished_touching = false; + } + + /* Check if currently moving. */ + if (g_prev_touch_count > 0 && touch_count > 0) { + touchPosition current_touch_position; + hidTouchRead(¤t_touch_position, 0); + + if ((abs(current_touch_position.px - g_start_touch_position.px) > MaxTapMovement || abs(current_touch_position.py - g_start_touch_position.py) > MaxTapMovement)) { + g_touches_moving = true; + g_tapping = false; + } else { + g_touches_moving = false; + } + } else { + g_touches_moving = false; + } + } + + void ChangeMenu(std::shared_ptr menu) { + g_current_menu = menu; + } + + void ReturnToPreviousMenu() { + /* Go to the previous menu if there is one. */ + if (g_current_menu->GetPrevMenu() != nullptr) { + g_current_menu = g_current_menu->GetPrevMenu(); + } + } + + Result IsPathBottomLevel(const char *path, bool *out) { + Result rc = 0; + FsFileSystem *fs; + char translated_path[FS_MAX_PATH] = {}; + DBK_ABORT_UNLESS(fsdevTranslatePath(path, &fs, translated_path) != -1); + + FsDir dir; + if (R_FAILED(rc = fsFsOpenDirectory(fs, translated_path, FsDirOpenMode_ReadDirs, &dir))) { + return rc; + } + + s64 entry_count; + if (R_FAILED(rc = fsDirGetEntryCount(&dir, &entry_count))) { + return rc; + } + + *out = entry_count == 0; + fsDirClose(&dir); + return rc; + } + + } + + void Menu::AddButton(u32 id, const char *text, float x, float y, float w, float h) { + DBK_ABORT_UNLESS(id < MaxButtons); + Button button = { + .id = id, + .selected = false, + .enabled = true, + .x = x, + .y = y, + .w = w, + .h = h, + }; + + strncpy(button.text, text, sizeof(button.text)-1); + m_buttons[id] = button; + } + + void Menu::SetButtonSelected(u32 id, bool selected) { + DBK_ABORT_UNLESS(id < MaxButtons); + auto &button = m_buttons[id]; + + if (button) { + button->selected = selected; + } + } + + void Menu::DeselectAllButtons() { + for (auto &button : m_buttons) { + /* Ensure button is present. */ + if (!button) { + continue; + } + button->selected = false; + } + } + + void Menu::SetButtonEnabled(u32 id, bool enabled) { + DBK_ABORT_UNLESS(id < MaxButtons); + auto &button = m_buttons[id]; + button->enabled = enabled; + } + + Button *Menu::GetButton(u32 id) { + DBK_ABORT_UNLESS(id < MaxButtons); + return !m_buttons[id] ? nullptr : &(*m_buttons[id]); + } + + Button *Menu::GetSelectedButton() { + for (auto &button : m_buttons) { + if (button && button->enabled && button->selected) { + return &(*button); + } + } + + return nullptr; + } + + Button *Menu::GetClosestButtonToSelection(Direction direction) { + const Button *selected_button = this->GetSelectedButton(); + + if (selected_button == nullptr || direction == Direction::Invalid) { + return nullptr; + } + + Button *closest_button = nullptr; + float closest_distance = 0.0f; + + for (auto &button : m_buttons) { + /* Skip absent button. */ + if (!button || !button->enabled) { + continue; + } + + /* Skip buttons that are in the wrong direction. */ + if ((direction == Direction::Down && button->y <= selected_button->y) || + (direction == Direction::Up && button->y >= selected_button->y) || + (direction == Direction::Right && button->x <= selected_button->x) || + (direction == Direction::Left && button->x >= selected_button->x)) { + continue; + } + + const float x_dist = button->x - selected_button->x; + const float y_dist = button->y - selected_button->y; + const float sq_dist = x_dist * x_dist + y_dist * y_dist; + + /* If we don't already have a closest button, set it. */ + if (closest_button == nullptr) { + closest_button = &(*button); + closest_distance = sq_dist; + continue; + } + + /* Update the closest button if this one is closer. */ + if (sq_dist < closest_distance) { + closest_button = &(*button); + closest_distance = sq_dist; + } + } + + return closest_button; + } + + Button *Menu::GetTouchedButton() { + touchPosition touch; + const u32 touch_count = hidTouchCount(); + + for (u32 i = 0; i < touch_count && g_started_touching; i++) { + hidTouchRead(&touch, i); + + for (auto &button : m_buttons) { + if (button && button->enabled && button->IsPositionInBounds(touch.px, touch.py)) { + return &(*button); + } + } + } + + return nullptr; + } + + Button *Menu::GetActivatedButton() { + Button *selected_button = this->GetSelectedButton(); + + if (selected_button == nullptr) { + return nullptr; + } + + const u64 k_down = hidKeysDown(CONTROLLER_P1_AUTO); + + if (k_down & KEY_A || this->GetTouchedButton() == selected_button) { + return selected_button; + } + + return nullptr; + } + + void Menu::UpdateButtons() { + const u64 k_down = hidKeysDown(CONTROLLER_P1_AUTO); + Direction direction = Direction::Invalid; + + if (k_down & KEY_DOWN) { + direction = Direction::Down; + } else if (k_down & KEY_UP) { + direction = Direction::Up; + } else if (k_down & KEY_LEFT) { + direction = Direction::Left; + } else if (k_down & KEY_RIGHT) { + direction = Direction::Right; + } + + /* Select the closest button. */ + if (const Button *closest_button = this->GetClosestButtonToSelection(direction); closest_button != nullptr) { + this->DeselectAllButtons(); + this->SetButtonSelected(closest_button->id, true); + } + + /* Select the touched button. */ + if (const Button *touched_button = this->GetTouchedButton(); touched_button != nullptr) { + this->DeselectAllButtons(); + this->SetButtonSelected(touched_button->id, true); + } + } + + void Menu::DrawButtons(NVGcontext *vg, u64 ns) { + for (auto &button : m_buttons) { + /* Ensure button is present. */ + if (!button) { + continue; + } + + /* Set the button style. */ + auto style = ButtonStyle::StandardDisabled; + if (button->enabled) { + style = button->selected ? ButtonStyle::StandardSelected : ButtonStyle::Standard; + } + + DrawButton(vg, button->text, button->x, button->y, button->w, button->h, style, ns); + } + } + + void Menu::LogText(const char *format, ...) { + /* Create a temporary string. */ + char tmp[0x100]; + va_list args; + va_start(args, format); + vsnprintf(tmp, sizeof(tmp), format, args); + va_end(args); + + /* Append the text to the log buffer. */ + strncat(m_log_buffer, tmp, sizeof(m_log_buffer)-1); + } + + std::shared_ptr Menu::GetPrevMenu() { + return m_prev_menu; + } + + AlertMenu::AlertMenu(std::shared_ptr prev_menu, const char *text, const char *subtext, Result rc) : Menu(prev_menu), m_text{}, m_subtext{}, m_result_text{}, m_rc(rc){ + /* Copy the input text. */ + strncpy(m_text, text, sizeof(m_text)-1); + strncpy(m_subtext, subtext, sizeof(m_subtext)-1); + + /* Copy result text if there is a result. */ + if (R_FAILED(rc)) { + snprintf(m_result_text, sizeof(m_result_text), "Result: 0x%08x", rc); + } + } + + void AlertMenu::Draw(NVGcontext *vg, u64 ns) { + const float window_height = WindowHeight + (R_FAILED(m_rc) ? SubTextHeight : 0.0f); + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - window_height / 2.0f; + + DrawWindow(vg, m_text, x, y, WindowWidth, window_height); + DrawText(vg, x + HorizontalInset, y + TitleGap, WindowWidth - HorizontalInset * 2.0f, m_subtext); + + /* Draw the result if there is one. */ + if (R_FAILED(m_rc)) { + DrawText(vg, x + HorizontalInset, y + TitleGap + SubTextHeight, WindowWidth - HorizontalInset * 2.0f, m_result_text); + } + + this->DrawButtons(vg, ns); + } + + ErrorMenu::ErrorMenu(const char *text, const char *subtext, Result rc) : AlertMenu(nullptr, text, subtext, rc) { + const float window_height = WindowHeight + (R_FAILED(m_rc) ? SubTextHeight : 0.0f); + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - window_height / 2.0f; + const float button_y = y + TitleGap + SubTextHeight + VerticalGap * 2.0f + (R_FAILED(m_rc) ? SubTextHeight : 0.0f); + const float button_width = WindowWidth - HorizontalInset * 2.0f; + + /* Add buttons. */ + this->AddButton(ExitButtonId, "Exit", x + HorizontalInset, button_y, button_width, ButtonHeight); + this->SetButtonSelected(ExitButtonId, true); + } + + void ErrorMenu::Update(u64 ns) { + u64 k_down = hidKeysDown(CONTROLLER_P1_AUTO); + + /* Go back if B is pressed. */ + if (k_down & KEY_B) { + g_exit_requested = true; + return; + } + + /* Take action if a button has been activated. */ + if (const Button *activated_button = this->GetActivatedButton(); activated_button != nullptr) { + switch (activated_button->id) { + case ExitButtonId: + g_exit_requested = true; + break; + } + } + + this->UpdateButtons(); + + /* Fallback on selecting the exfat button. */ + if (const Button *selected_button = this->GetSelectedButton(); k_down && selected_button == nullptr) { + this->SetButtonSelected(ExitButtonId, true); + } + } + + WarningMenu::WarningMenu(std::shared_ptr prev_menu, std::shared_ptr next_menu, const char *text, const char *subtext, Result rc) : AlertMenu(prev_menu, text, subtext, rc), m_next_menu(next_menu) { + const float window_height = WindowHeight + (R_FAILED(m_rc) ? SubTextHeight : 0.0f); + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - window_height / 2.0f; + + const float button_y = y + TitleGap + SubTextHeight + VerticalGap * 2.0f + (R_FAILED(m_rc) ? SubTextHeight : 0.0f); + const float button_width = (WindowWidth - HorizontalInset * 2.0f) / 2.0f - ButtonHorizontalGap; + this->AddButton(BackButtonId, "Back", x + HorizontalInset, button_y, button_width, ButtonHeight); + this->AddButton(ContinueButtonId, "Continue", x + HorizontalInset + button_width + ButtonHorizontalGap, button_y, button_width, ButtonHeight); + this->SetButtonSelected(ContinueButtonId, true); + } + + void WarningMenu::Update(u64 ns) { + u64 k_down = hidKeysDown(CONTROLLER_P1_AUTO); + + /* Go back if B is pressed. */ + if (k_down & KEY_B) { + ReturnToPreviousMenu(); + return; + } + + /* Take action if a button has been activated. */ + if (const Button *activated_button = this->GetActivatedButton(); activated_button != nullptr) { + switch (activated_button->id) { + case BackButtonId: + ReturnToPreviousMenu(); + return; + case ContinueButtonId: + ChangeMenu(m_next_menu); + return; + } + } + + this->UpdateButtons(); + + /* Fallback on selecting the exfat button. */ + if (const Button *selected_button = this->GetSelectedButton(); k_down && selected_button == nullptr) { + this->SetButtonSelected(ContinueButtonId, true); + } + } + + MainMenu::MainMenu() : Menu(nullptr) { + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - WindowHeight / 2.0f; + + this->AddButton(InstallButtonId, "Install", x + HorizontalInset, y + TitleGap, WindowWidth - HorizontalInset * 2, ButtonHeight); + this->AddButton(ExitButtonId, "Exit", x + HorizontalInset, y + TitleGap + ButtonHeight + VerticalGap, WindowWidth - HorizontalInset * 2, ButtonHeight); + this->SetButtonSelected(InstallButtonId, true); + } + + void MainMenu::Update(u64 ns) { + u64 k_down = hidKeysDown(CONTROLLER_P1_AUTO); + + if (k_down & KEY_B) { + g_exit_requested = true; + } + + /* Take action if a button has been activated. */ + if (const Button *activated_button = this->GetActivatedButton(); activated_button != nullptr) { + switch (activated_button->id) { + case InstallButtonId: + { + const auto file_menu = std::make_shared(g_current_menu, "/"); + + Result rc = 0; + u64 hardware_type; + u64 has_rcm_bug_patch; + u64 is_emummc; + + if (R_FAILED(rc = splGetConfig(SplConfigItem_HardwareType, &hardware_type))) { + ChangeMenu(std::make_shared("An error has occurred", "Failed to get hardware type.", rc)); + return; + } + + if (R_FAILED(rc = splGetConfig(static_cast(ExosphereHasRcmBugPatch), &has_rcm_bug_patch))) { + ChangeMenu(std::make_shared("An error has occurred", "Failed to check RCM bug status.", rc)); + return; + } + + if (R_FAILED(rc = splGetConfig(static_cast(ExosphereEmummcType), &is_emummc))) { + ChangeMenu(std::make_shared("An error has occurred", "Failed to chech emuMMC status.", rc)); + return; + } + + /* Warn if we're working with a patched unit. */ + const bool is_erista = hardware_type == 0 || hardware_type == 1; + if (is_erista && has_rcm_bug_patch && !is_emummc) { + ChangeMenu(std::make_shared(g_current_menu, file_menu, "Warning: Patched unit detected", "You may burn fuses or render your switch inoperable.")); + } else { + ChangeMenu(file_menu); + } + + return; + } + case ExitButtonId: + g_exit_requested = true; + return; + } + } + + this->UpdateButtons(); + + /* Fallback on selecting the install button. */ + if (const Button *selected_button = this->GetSelectedButton(); k_down && selected_button == nullptr) { + this->SetButtonSelected(InstallButtonId, true); + } + } + + void MainMenu::Draw(NVGcontext *vg, u64 ns) { + DrawWindow(vg, "Daybreak", g_screen_width / 2.0f - WindowWidth / 2.0f, g_screen_height / 2.0f - WindowHeight / 2.0f, WindowWidth, WindowHeight); + this->DrawButtons(vg, ns); + } + + FileMenu::FileMenu(std::shared_ptr prev_menu, const char *root) : Menu(prev_menu), m_current_index(0), m_scroll_offset(0), m_touch_start_scroll_offset(0), m_touch_finalize_selection(false) { + Result rc = 0; + + strncpy(m_root, root, sizeof(m_root)-1); + + if (R_FAILED(rc = this->PopulateFileEntries())) { + fatalThrow(rc); + } + } + + Result FileMenu::PopulateFileEntries() { + /* Open the directory. */ + DIR *dir = opendir(m_root); + if (dir == nullptr) { + return fsdevGetLastResult(); + } + + /* Add file entries to the list. */ + struct dirent *ent; + while ((ent = readdir(dir)) != nullptr) { + if (ent->d_type == DT_DIR) { + FileEntry file_entry = {}; + strncpy(file_entry.name, ent->d_name, sizeof(file_entry.name)); + m_file_entries.push_back(file_entry); + } + } + + /* Close the directory. */ + closedir(dir); + + /* Sort the file entries. */ + std::sort(m_file_entries.begin(), m_file_entries.end(), [](const FileEntry &a, const FileEntry &b) { + return strncmp(a.name, b.name, sizeof(a.name)) < 0; + }); + + return 0; + } + + bool FileMenu::IsSelectionVisible() { + const float visible_start = m_scroll_offset; + const float visible_end = visible_start + FileListHeight; + const float entry_start = static_cast(m_current_index) * (FileRowHeight + FileRowGap); + const float entry_end = entry_start + (FileRowHeight + FileRowGap); + return entry_start >= visible_start && entry_end <= visible_end; + } + + void FileMenu::ScrollToSelection() { + const float visible_start = m_scroll_offset; + const float visible_end = visible_start + FileListHeight; + const float entry_start = static_cast(m_current_index) * (FileRowHeight + FileRowGap); + const float entry_end = entry_start + (FileRowHeight + FileRowGap); + + if (entry_end > visible_end) { + m_scroll_offset += entry_end - visible_end; + } else if (entry_end < visible_end) { + m_scroll_offset = entry_start; + } + } + + bool FileMenu::IsEntryTouched(u32 i) { + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - WindowHeight / 2.0f; + + touchPosition current_pos; + hidTouchRead(¤t_pos, 0); + + /* Check if the tap is within the x bounds. */ + if (current_pos.px >= x + TextBackgroundOffset + FileRowHorizontalInset && current_pos.px <= WindowWidth - (TextBackgroundOffset + FileRowHorizontalInset) * 2.0f) { + const float y_min = y + TitleGap + FileRowGap + i * (FileRowHeight + FileRowGap) - m_scroll_offset; + const float y_max = y_min + FileRowHeight; + + /* Check if the tap is within the y bounds. */ + if (current_pos.py >= y_min && current_pos.py <= y_max) { + return true; + } + } + + return false; + } + + void FileMenu::UpdateTouches() { + /* Setup values on initial touch. */ + if (g_started_touching) { + m_touch_start_scroll_offset = m_scroll_offset; + + /* We may potentially finalize the selection later if we start off touching it. */ + if (this->IsEntryTouched(m_current_index)) { + m_touch_finalize_selection = true; + } + } + + /* Scroll based on touch movement. */ + if (g_touches_moving) { + touchPosition current_pos; + hidTouchRead(¤t_pos, 0); + + const int dist_y = current_pos.py - g_start_touch_position.py; + float new_scroll_offset = m_touch_start_scroll_offset - static_cast(dist_y); + float max_scroll = (FileRowHeight + FileRowGap) * static_cast(m_file_entries.size()) - FileListHeight; + + /* Don't allow scrolling if there is not enough elements. */ + if (max_scroll < 0.0f) { + max_scroll = 0.0f; + } + + /* Don't allow scrolling before the first element. */ + if (new_scroll_offset < 0.0f) { + new_scroll_offset = 0.0f; + } + + /* Don't allow scrolling past the last element. */ + if (new_scroll_offset > max_scroll) { + new_scroll_offset = max_scroll; + } + + m_scroll_offset = new_scroll_offset; + } + + /* Select any tapped entries. */ + if (g_tapping) { + for (u32 i = 0; i < m_file_entries.size(); i++) { + if (this->IsEntryTouched(i)) { + /* The current index is checked later. */ + if (i == m_current_index) { + continue; + } + + m_current_index = i; + + /* Don't finalize selection if we touch something else. */ + m_touch_finalize_selection = false; + break; + } + } + } + + /* Don't finalize selection if we aren't finished and we've either stopped tapping or are no longer touching the selection. */ + if (!g_finished_touching && (!g_tapping || !this->IsEntryTouched(m_current_index))) { + m_touch_finalize_selection = false; + } + + /* Finalize selection if the currently selected entry is touched for the second time. */ + if (g_finished_touching && m_touch_finalize_selection) { + this->FinalizeSelection(); + m_touch_finalize_selection = false; + } + } + + void FileMenu::FinalizeSelection() { + DBK_ABORT_UNLESS(m_current_index < m_file_entries.size()); + FileEntry &entry = m_file_entries[m_current_index]; + + /* Determine the selected path. */ + char current_path[FS_MAX_PATH] = {}; + const int path_len = snprintf(current_path, sizeof(current_path), "%s%s/", m_root, entry.name); + DBK_ABORT_UNLESS(path_len >= 0 && path_len < static_cast(sizeof(current_path))); + + /* Determine if the chosen path is the bottom level. */ + Result rc = 0; + bool bottom_level; + if (R_FAILED(rc = IsPathBottomLevel(current_path, &bottom_level))) { + fatalThrow(rc); + } + + /* Show exfat settings or the next file menu. */ + if (bottom_level) { + /* Set the update path. */ + snprintf(g_update_path, sizeof(g_update_path), "%s", current_path); + + /* Change the menu. */ + ChangeMenu(std::make_shared(g_current_menu)); + } else { + ChangeMenu(std::make_shared(g_current_menu, current_path)); + } + } + + void FileMenu::Update(u64 ns) { + u64 k_down = hidKeysDown(CONTROLLER_P1_AUTO); + + /* Go back if B is pressed. */ + if (k_down & KEY_B) { + ReturnToPreviousMenu(); + return; + } + + /* Finalize selection on pressing A. */ + if (k_down & KEY_A) { + this->FinalizeSelection(); + } + + /* Update touch input. */ + this->UpdateTouches(); + + const u32 prev_index = m_current_index; + + if (k_down & KEY_DOWN) { + /* Scroll down. */ + if (m_current_index >= (m_file_entries.size() - 1)) { + m_current_index = 0; + } else { + m_current_index++; + } + } else if (k_down & KEY_UP) { + /* Scroll up. */ + if (m_current_index == 0) { + m_current_index = m_file_entries.size() - 1; + } else { + m_current_index--; + } + } + + /* Scroll to the selection if it isn't visible. */ + if (prev_index != m_current_index && !this->IsSelectionVisible()) { + this->ScrollToSelection(); + } + } + + void FileMenu::Draw(NVGcontext *vg, u64 ns) { + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - WindowHeight / 2.0f; + + DrawWindow(vg, "Select an update directory", x, y, WindowWidth, WindowHeight); + DrawTextBackground(vg, x + TextBackgroundOffset, y + TitleGap, WindowWidth - TextBackgroundOffset * 2.0f, (FileRowHeight + FileRowGap) * MaxFileRows + FileRowGap); + + nvgSave(vg); + nvgScissor(vg, x + TextBackgroundOffset, y + TitleGap, WindowWidth - TextBackgroundOffset * 2.0f, (FileRowHeight + FileRowGap) * MaxFileRows + FileRowGap); + + for (u32 i = 0; i < m_file_entries.size(); i++) { + FileEntry &entry = m_file_entries[i]; + auto style = ButtonStyle::FileSelect; + + if (i == m_current_index) { + style = ButtonStyle::FileSelectSelected; + } + + DrawButton(vg, entry.name, x + TextBackgroundOffset + FileRowHorizontalInset, y + TitleGap + FileRowGap + i * (FileRowHeight + FileRowGap) - m_scroll_offset, WindowWidth - (TextBackgroundOffset + FileRowHorizontalInset) * 2.0f, FileRowHeight, style, ns); + } + + nvgRestore(vg); + } + + ValidateUpdateMenu::ValidateUpdateMenu(std::shared_ptr prev_menu) : Menu(prev_menu), m_has_drawn(false), m_has_info(false), m_has_validated(false) { + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - WindowHeight / 2.0f; + const float button_width = (WindowWidth - HorizontalInset * 2.0f) / 2.0f - ButtonHorizontalGap; + + /* Add buttons. */ + this->AddButton(BackButtonId, "Back", x + HorizontalInset, y + WindowHeight - BottomInset - ButtonHeight, button_width, ButtonHeight); + this->AddButton(ContinueButtonId, "Continue", x + HorizontalInset + button_width + ButtonHorizontalGap, y + WindowHeight - BottomInset - ButtonHeight, button_width, ButtonHeight); + this->SetButtonEnabled(BackButtonId, false); + this->SetButtonEnabled(ContinueButtonId, false); + + /* Obtain update information. */ + if (R_FAILED(this->GetUpdateInformation())) { + this->SetButtonEnabled(BackButtonId, true); + this->SetButtonSelected(BackButtonId, true); + } else { + /* Log this early so it is printed out before validation causes stalling. */ + this->LogText("Validating update, this may take a moment...\n"); + } + } + + Result ValidateUpdateMenu::GetUpdateInformation() { + Result rc = 0; + this->LogText("Directory %s\n", g_update_path); + + /* Attempt to get the update information. */ + if (R_FAILED(rc = amssuGetUpdateInformation(&m_update_info, g_update_path))) { + if (rc == 0x1a405) { + this->LogText("No update found in folder.\nEnsure your ncas are named correctly!\nResult: 0x%08x\n", rc); + } else { + this->LogText("Failed to get update information.\nResult: 0x%08x\n", rc); + } + return rc; + } + + /* Print update information. */ + this->LogText("- Version: %d.%d.%d\n", (m_update_info.version >> 26) & 0x1f, (m_update_info.version >> 20) & 0x1f, (m_update_info.version >> 16) & 0xf); + if (m_update_info.exfat_supported) { + this->LogText("- exFAT: Supported\n"); + } else { + this->LogText("- exFAT: Unsupported\n"); + } + this->LogText("- Firmware variations: %d\n", m_update_info.num_firmware_variations); + + /* Mark as having obtained update info. */ + m_has_info = true; + return rc; + } + + void ValidateUpdateMenu::ValidateUpdate() { + Result rc = 0; + + /* Validate the update. */ + if (R_FAILED(rc = amssuValidateUpdate(&m_validation_info, g_update_path))) { + this->LogText("Failed to validate update.\nResult: 0x%08x\n", rc); + return; + } + + /* Check the result. */ + if (R_SUCCEEDED(m_validation_info.result)) { + this->LogText("Update is valid!\n"); + + if (R_FAILED(m_validation_info.exfat_result)) { + const u32 version = m_validation_info.invalid_key.version; + this->LogText("exFAT Validation failed with result: 0x%08x\n", m_validation_info.exfat_result); + this->LogText("Missing content:\n- Program id: %016lx\n- Version: %d.%d.%d\n", m_validation_info.invalid_key.id, (version >> 26) & 0x1f, (version >> 20) & 0x1f, (version >> 16) & 0xf); + + /* Log the missing content id. */ + this->LogText("- Content id: "); + for (size_t i = 0; i < sizeof(NcmContentId); i++) { + this->LogText("%02x", m_validation_info.invalid_content_id.c[i]); + } + this->LogText("\n"); + } + + /* Enable the back and continue buttons and select the continue button. */ + this->SetButtonEnabled(BackButtonId, true); + this->SetButtonEnabled(ContinueButtonId, true); + this->SetButtonSelected(ContinueButtonId, true); + } else { + /* Log the missing content info. */ + const u32 version = m_validation_info.invalid_key.version; + this->LogText("Validation failed with result: 0x%08x\n", m_validation_info.result); + this->LogText("Missing content:\n- Program id: %016lx\n- Version: %d.%d.%d\n", m_validation_info.invalid_key.id, (version >> 26) & 0x1f, (version >> 20) & 0x1f, (version >> 16) & 0xf); + + /* Log the missing content id. */ + this->LogText("- Content id: "); + for (size_t i = 0; i < sizeof(NcmContentId); i++) { + this->LogText("%02x", m_validation_info.invalid_content_id.c[i]); + } + this->LogText("\n"); + + /* Enable the back button and select it. */ + this->SetButtonEnabled(BackButtonId, true); + this->SetButtonSelected(BackButtonId, true); + } + + /* Mark validation as being complete. */ + m_has_validated = true; + } + + void ValidateUpdateMenu::Update(u64 ns) { + /* Perform validation if it hasn't been done already. */ + if (m_has_info && m_has_drawn && !m_has_validated) { + this->ValidateUpdate(); + } + + u64 k_down = hidKeysDown(CONTROLLER_P1_AUTO); + + /* Go back if B is pressed. */ + if (k_down & KEY_B) { + ReturnToPreviousMenu(); + return; + } + + /* Take action if a button has been activated. */ + if (const Button *activated_button = this->GetActivatedButton(); activated_button != nullptr) { + switch (activated_button->id) { + case BackButtonId: + ReturnToPreviousMenu(); + return; + case ContinueButtonId: + /* Don't continue if validation hasn't been done or has failed. */ + if (!m_has_validated || R_FAILED(m_validation_info.result)) { + break; + } + + /* Check if exfat is supported. */ + g_exfat_supported = m_update_info.exfat_supported && R_SUCCEEDED(m_validation_info.exfat_result); + if (!g_exfat_supported) { + g_use_exfat = false; + } + + /* Warn the user if they're updating with exFAT supposed to be supported but not present/corrupted. */ + if (m_update_info.exfat_supported && R_FAILED(m_validation_info.exfat_result)) { + ChangeMenu(std::make_shared(g_current_menu, std::make_shared(g_current_menu), "Warning: exFAT firmware is missing or corrupt", "Are you sure you want to proceed?")); + } else { + ChangeMenu(std::make_shared(g_current_menu)); + } + + return; + } + } + + this->UpdateButtons(); + } + + void ValidateUpdateMenu::Draw(NVGcontext *vg, u64 ns) { + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - WindowHeight / 2.0f; + + DrawWindow(vg, "Update information", x, y, WindowWidth, WindowHeight); + DrawTextBackground(vg, x + HorizontalInset, y + TitleGap, WindowWidth - HorizontalInset * 2.0f, TextAreaHeight); + DrawTextBlock(vg, m_log_buffer, x + HorizontalInset + TextHorizontalInset, y + TitleGap + TextVerticalInset, WindowWidth - (HorizontalInset + TextHorizontalInset) * 2.0f, TextAreaHeight - TextVerticalInset * 2.0f); + + this->DrawButtons(vg, ns); + m_has_drawn = true; + } + + ChooseResetMenu::ChooseResetMenu(std::shared_ptr prev_menu) : Menu(prev_menu) { + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - WindowHeight / 2.0f; + const float button_width = (WindowWidth - HorizontalInset * 2.0f) / 2.0f - ButtonHorizontalGap; + + /* Add buttons. */ + this->AddButton(ResetToFactorySettingsButtonId, "Reset to factory settings", x + HorizontalInset, y + TitleGap, button_width, ButtonHeight); + this->AddButton(PreserveSettingsButtonId, "Preserve settings", x + HorizontalInset + button_width + ButtonHorizontalGap, y + TitleGap, button_width, ButtonHeight); + this->SetButtonSelected(PreserveSettingsButtonId, true); + } + + void ChooseResetMenu::Update(u64 ns) { + u64 k_down = hidKeysDown(CONTROLLER_P1_AUTO); + + /* Go back if B is pressed. */ + if (k_down & KEY_B) { + ReturnToPreviousMenu(); + return; + } + + /* Take action if a button has been activated. */ + if (const Button *activated_button = this->GetActivatedButton(); activated_button != nullptr) { + switch (activated_button->id) { + case ResetToFactorySettingsButtonId: + g_reset_to_factory = true; + break; + case PreserveSettingsButtonId: + g_reset_to_factory = false; + break; + } + + if (g_exfat_supported) { + ChangeMenu(std::make_shared(g_current_menu)); + } else { + ChangeMenu(std::make_shared(g_current_menu, std::make_shared(g_current_menu), "Ready to begin update installation", "Are you sure you want to proceed?")); + } + } + + this->UpdateButtons(); + + /* Fallback on selecting the exfat button. */ + if (const Button *selected_button = this->GetSelectedButton(); k_down && selected_button == nullptr) { + this->SetButtonSelected(PreserveSettingsButtonId, true); + } + } + + void ChooseResetMenu::Draw(NVGcontext *vg, u64 ns) { + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - WindowHeight / 2.0f; + + DrawWindow(vg, "Select settings mode", x, y, WindowWidth, WindowHeight); + this->DrawButtons(vg, ns); + } + + ChooseExfatMenu::ChooseExfatMenu(std::shared_ptr prev_menu) : Menu(prev_menu) { + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - WindowHeight / 2.0f; + const float button_width = (WindowWidth - HorizontalInset * 2.0f) / 2.0f - ButtonHorizontalGap; + + /* Add buttons. */ + this->AddButton(Fat32ButtonId, "Install (FAT32)", x + HorizontalInset, y + TitleGap, button_width, ButtonHeight); + this->AddButton(ExFatButtonId, "Install (FAT32 + exFAT)", x + HorizontalInset + button_width + ButtonHorizontalGap, y + TitleGap, button_width, ButtonHeight); + + /* Set the default selected button based on the user's current install. We aren't particularly concerned if fsIsExFatSupported fails. */ + bool exfat_supported = false; + fsIsExFatSupported(&exfat_supported); + + if (exfat_supported) { + this->SetButtonSelected(ExFatButtonId, true); + } else { + this->SetButtonSelected(Fat32ButtonId, true); + } + } + + void ChooseExfatMenu::Update(u64 ns) { + u64 k_down = hidKeysDown(CONTROLLER_P1_AUTO); + + /* Go back if B is pressed. */ + if (k_down & KEY_B) { + ReturnToPreviousMenu(); + return; + } + + /* Take action if a button has been activated. */ + if (const Button *activated_button = this->GetActivatedButton(); activated_button != nullptr) { + switch (activated_button->id) { + case Fat32ButtonId: + g_use_exfat = false; + break; + case ExFatButtonId: + g_use_exfat = true; + break; + } + + ChangeMenu(std::make_shared(g_current_menu, std::make_shared(g_current_menu), "Ready to begin update installation", "Are you sure you want to proceed?")); + } + + this->UpdateButtons(); + + /* Fallback on selecting the exfat button. */ + if (const Button *selected_button = this->GetSelectedButton(); k_down && selected_button == nullptr) { + this->SetButtonSelected(ExFatButtonId, true); + } + } + + void ChooseExfatMenu::Draw(NVGcontext *vg, u64 ns) { + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - WindowHeight / 2.0f; + + DrawWindow(vg, "Select driver variant", x, y, WindowWidth, WindowHeight); + this->DrawButtons(vg, ns); + } + + InstallUpdateMenu::InstallUpdateMenu(std::shared_ptr prev_menu) : Menu(prev_menu), m_install_state(InstallState::NeedsDraw), m_progress_percent(0.0f) { + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - WindowHeight / 2.0f; + const float button_width = (WindowWidth - HorizontalInset * 2.0f) / 2.0f - ButtonHorizontalGap; + + /* Add buttons. */ + this->AddButton(ShutdownButtonId, "Shutdown", x + HorizontalInset, y + WindowHeight - BottomInset - ButtonHeight, button_width, ButtonHeight); + this->AddButton(RebootButtonId, "Reboot", x + HorizontalInset + button_width + ButtonHorizontalGap, y + WindowHeight - BottomInset - ButtonHeight, button_width, ButtonHeight); + this->SetButtonEnabled(ShutdownButtonId, false); + this->SetButtonEnabled(RebootButtonId, false); + + /* Prevent the home button from being pressed during installation. */ + hiddbgDeactivateHomeButton(); + } + + void InstallUpdateMenu::MarkForReboot() { + this->SetButtonEnabled(ShutdownButtonId, true); + this->SetButtonEnabled(RebootButtonId, true); + this->SetButtonSelected(RebootButtonId, true); + m_install_state = InstallState::AwaitingReboot; + } + + Result InstallUpdateMenu::TransitionUpdateState() { + Result rc = 0; + if (m_install_state == InstallState::NeedsSetup) { + /* Setup the update. */ + if (R_FAILED(rc = amssuSetupUpdate(nullptr, UpdateTaskBufferSize, g_update_path, g_use_exfat))) { + this->LogText("Failed to setup update.\nResult: 0x%08x\n", rc); + this->MarkForReboot(); + return rc; + } + + /* Log setup completion. */ + this->LogText("Update setup complete.\n"); + m_install_state = InstallState::NeedsPrepare; + } else if (m_install_state == InstallState::NeedsPrepare) { + /* Request update preparation. */ + if (R_FAILED(rc = amssuRequestPrepareUpdate(&m_prepare_result))) { + this->LogText("Failed to request update preparation.\nResult: 0x%08x\n", rc); + this->MarkForReboot(); + return rc; + } + + /* Log awaiting prepare. */ + this->LogText("Preparing update...\n"); + m_install_state = InstallState::AwaitingPrepare; + } else if (m_install_state == InstallState::AwaitingPrepare) { + /* Check if preparation has a result. */ + if (R_FAILED(rc = asyncResultWait(&m_prepare_result, 0)) && rc != 0xea01) { + this->LogText("Failed to check update preparation result.\nResult: 0x%08x\n", rc); + this->MarkForReboot(); + return rc; + } else if (R_SUCCEEDED(rc)) { + if (R_FAILED(rc = asyncResultGet(&m_prepare_result))) { + this->LogText("Failed to prepare update.\nResult: 0x%08x\n", rc); + this->MarkForReboot(); + return rc; + } + } + + /* Check if the update has been prepared. */ + bool prepared; + if (R_FAILED(rc = amssuHasPreparedUpdate(&prepared))) { + this->LogText("Failed to check if update has been prepared.\nResult: 0x%08x\n", rc); + this->MarkForReboot(); + return rc; + } + + /* Mark for application if preparation complete. */ + if (prepared) { + this->LogText("Update preparation complete.\nApplying update...\n"); + m_install_state = InstallState::NeedsApply; + return rc; + } + + /* Check update progress. */ + NsSystemUpdateProgress update_progress = {}; + if (R_FAILED(rc = amssuGetPrepareUpdateProgress(&update_progress))) { + this->LogText("Failed to check update progress.\nResult: 0x%08x\n", rc); + this->MarkForReboot(); + return rc; + } + + /* Update progress percent. */ + if (update_progress.total_size > 0.0f) { + m_progress_percent = static_cast(update_progress.current_size) / static_cast(update_progress.total_size); + } else { + m_progress_percent = 0.0f; + } + } else if (m_install_state == InstallState::NeedsApply) { + /* Apply the prepared update. */ + if (R_FAILED(rc = amssuApplyPreparedUpdate())) { + this->LogText("Failed to apply update.\nResult: 0x%08x\n", rc); + } else { + /* Log success. */ + this->LogText("Update applied successfully.\n"); + + if (g_reset_to_factory) { + if (R_FAILED(rc = nsResetToFactorySettingsForRefurbishment())) { + /* Fallback on ResetToFactorySettings. */ + if (rc == MAKERESULT(Module_Libnx, LibnxError_IncompatSysVer)) { + if (R_FAILED(rc = nsResetToFactorySettings())) { + this->LogText("Failed to reset to factory settings.\nResult: 0x%08x\n", rc); + this->MarkForReboot(); + return rc; + } + } else { + this->LogText("Failed to reset to factory settings for refurbishment.\nResult: 0x%08x\n", rc); + this->MarkForReboot(); + return rc; + } + } + + this->LogText("Successfully reset to factory settings.\n", rc); + } + } + + this->MarkForReboot(); + return rc; + } + + return rc; + } + + void InstallUpdateMenu::Update(u64 ns) { + /* Transition to the next update state. */ + if (m_install_state != InstallState::NeedsDraw && m_install_state != InstallState::AwaitingReboot) { + this->TransitionUpdateState(); + } + + /* Take action if a button has been activated. */ + if (const Button *activated_button = this->GetActivatedButton(); activated_button != nullptr) { + switch (activated_button->id) { + case ShutdownButtonId: + if (R_FAILED(appletRequestToShutdown())) { + spsmShutdown(false); + } + break; + case RebootButtonId: + if (R_FAILED(appletRequestToReboot())) { + spsmShutdown(true); + } + break; + } + } + + this->UpdateButtons(); + } + + void InstallUpdateMenu::Draw(NVGcontext *vg, u64 ns) { + const float x = g_screen_width / 2.0f - WindowWidth / 2.0f; + const float y = g_screen_height / 2.0f - WindowHeight / 2.0f; + + DrawWindow(vg, "Installing update", x, y, WindowWidth, WindowHeight); + DrawProgressText(vg, x + HorizontalInset, y + TitleGap, m_progress_percent); + DrawProgressBar(vg, x + HorizontalInset, y + TitleGap + ProgressTextHeight, WindowWidth - HorizontalInset * 2.0f, ProgressBarHeight, m_progress_percent); + DrawTextBackground(vg, x + HorizontalInset, y + TitleGap + ProgressTextHeight + ProgressBarHeight + VerticalGap, WindowWidth - HorizontalInset * 2.0f, TextAreaHeight); + DrawTextBlock(vg, m_log_buffer, x + HorizontalInset + TextHorizontalInset, y + TitleGap + ProgressTextHeight + ProgressBarHeight + VerticalGap + TextVerticalInset, WindowWidth - (HorizontalInset + TextHorizontalInset) * 2.0f, TextAreaHeight - TextVerticalInset * 2.0f); + + this->DrawButtons(vg, ns); + + /* We have drawn now, allow setup to occur. */ + if (m_install_state == InstallState::NeedsDraw) { + this->LogText("Beginning update setup...\n"); + m_install_state = InstallState::NeedsSetup; + } + } + + void InitializeMenu(u32 screen_width, u32 screen_height) { + Result rc = 0; + + /* Set the screen width and height. */ + g_screen_width = screen_width; + g_screen_height = screen_height; + + /* Mark as initialized. */ + g_initialized = true; + + /* Attempt to get the exosphere version. */ + u64 version; + if (R_FAILED(rc = splGetConfig(static_cast(ExosphereApiVersionConfigItem), &version))) { + ChangeMenu(std::make_shared("Atmosphere not found", "Daybreak requires Atmosphere to be installed.", rc)); + return; + } + + const u32 version_micro = (version >> 40) & 0xff; + const u32 version_minor = (version >> 48) & 0xff; + const u32 version_major = (version >> 56) & 0xff; + + /* Validate the exosphere version. */ + const bool ams_supports_sysupdate_api = version_major >= 0 && version_minor >= 14 && version_micro >= 0; + if (!ams_supports_sysupdate_api) { + ChangeMenu(std::make_shared("Outdated Atmosphere version", "Daybreak requires Atmosphere 0.14.0 or later.", rc)); + return; + } + + /* Initialize ams:su. */ + if (R_FAILED(rc = amssuInitialize())) { + fatalThrow(rc); + } + + /* Change the current menu to the main menu. */ + g_current_menu = std::make_shared(); + } + + void UpdateMenu(u64 ns) { + DBK_ABORT_UNLESS(g_initialized); + DBK_ABORT_UNLESS(g_current_menu != nullptr); + UpdateInput(); + g_current_menu->Update(ns); + } + + void RenderMenu(NVGcontext *vg, u64 ns) { + DBK_ABORT_UNLESS(g_initialized); + DBK_ABORT_UNLESS(g_current_menu != nullptr); + + /* Draw background. */ + DrawBackground(vg, g_screen_width, g_screen_height); + + /* Draw stars. */ + DrawStar(vg, 40.0f, 64.0f, 3.0f); + DrawStar(vg, 110.0f, 300.0f, 3.0f); + DrawStar(vg, 200.0f, 150.0f, 4.0f); + DrawStar(vg, 370.0f, 280.0f, 3.0f); + DrawStar(vg, 450.0f, 40.0f, 3.5f); + DrawStar(vg, 710.0f, 90.0f, 3.0f); + DrawStar(vg, 900.0f, 240.0f, 3.0f); + DrawStar(vg, 970.0f, 64.0f, 4.0f); + DrawStar(vg, 1160.0f, 160.0f, 3.5f); + DrawStar(vg, 1210.0f, 350.0f, 3.0f); + + g_current_menu->Draw(vg, ns); + } + + bool IsExitRequested() { + return g_exit_requested; + } + +} diff --git a/troposphere/daybreak/source/ui.hpp b/troposphere/daybreak/source/ui.hpp new file mode 100644 index 000000000..481ad4069 --- /dev/null +++ b/troposphere/daybreak/source/ui.hpp @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2020 Adubbz + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +#include +#include +#include +#include +#include +#include +#include "ams_su.h" + +namespace dbk { + + struct Button { + static constexpr u32 InvalidButtonId = -1; + + u32 id; + bool selected; + bool enabled; + char text[256]; + float x; + float y; + float w; + float h; + + inline bool IsPositionInBounds(float x, float y) { + return x >= this->x && y >= this->y && x < (this->x + this->w) && y < (this->y + this->h); + } + }; + + enum class Direction { + Up, + Down, + Left, + Right, + Invalid, + }; + + class Menu { + protected: + static constexpr size_t MaxButtons = 32; + static constexpr size_t LogBufferSize = 0x1000; + protected: + std::array, MaxButtons> m_buttons; + const std::shared_ptr m_prev_menu; + char m_log_buffer[LogBufferSize]; + protected: + void AddButton(u32 id, const char *text, float x, float y, float w, float h); + void SetButtonSelected(u32 id, bool selected); + void DeselectAllButtons(); + void SetButtonEnabled(u32 id, bool enabled); + + Button *GetButton(u32 id); + Button *GetSelectedButton(); + Button *GetClosestButtonToSelection(Direction direction); + Button *GetTouchedButton(); + Button *GetActivatedButton(); + + void UpdateButtons(); + void DrawButtons(NVGcontext *vg, u64 ns); + + void LogText(const char *format, ...); + public: + Menu(std::shared_ptr prev_menu) : m_buttons({}), m_prev_menu(prev_menu), m_log_buffer{} { /* ... */ } + + std::shared_ptr GetPrevMenu(); + virtual void Update(u64 ns) = 0; + virtual void Draw(NVGcontext *vg, u64 ns) = 0; + }; + + class AlertMenu : public Menu { + protected: + static constexpr float WindowWidth = 600.0f; + static constexpr float WindowHeight = 214.0f; + static constexpr float TitleGap = 90.0f; + static constexpr float SubTextHeight = 24.0f; + protected: + char m_text[0x100]; + char m_subtext[0x100]; + char m_result_text[0x20]; + Result m_rc; + public: + AlertMenu(std::shared_ptr prev_menu, const char *text, const char *subtext, Result rc = 0); + + virtual void Draw(NVGcontext *vg, u64 ns) override; + }; + + class ErrorMenu : public AlertMenu { + private: + static constexpr u32 ExitButtonId = 0; + public: + ErrorMenu(const char *text, const char *subtext, Result rc = 0); + + virtual void Update(u64 ns) override; + }; + + class WarningMenu : public AlertMenu { + private: + static constexpr u32 BackButtonId = 0; + static constexpr u32 ContinueButtonId = 1; + private: + const std::shared_ptr m_next_menu; + public: + WarningMenu(std::shared_ptr prev_menu, std::shared_ptr next_menu, const char *text, const char *subtext, Result rc = 0); + + virtual void Update(u64 ns) override; + }; + + class MainMenu : public Menu { + private: + static constexpr u32 InstallButtonId = 0; + static constexpr u32 ExitButtonId = 1; + + static constexpr float WindowWidth = 400.0f; + static constexpr float WindowHeight = 240.0f; + static constexpr float TitleGap = 90.0f; + public: + MainMenu(); + + virtual void Update(u64 ns) override; + virtual void Draw(NVGcontext *vg, u64 ns) override; + }; + + class FileMenu : public Menu { + private: + struct FileEntry { + char name[FS_MAX_PATH]; + }; + private: + static constexpr size_t MaxFileRows = 11; + + static constexpr float WindowWidth = 1200.0f; + static constexpr float WindowHeight = 680.0f; + static constexpr float TitleGap = 90.0f; + static constexpr float TextBackgroundOffset = 20.0f; + static constexpr float FileRowHeight = 40.0f; + static constexpr float FileRowGap = 10.0f; + static constexpr float FileRowHorizontalInset = 10.0f; + static constexpr float FileListHeight = MaxFileRows * (FileRowHeight + FileRowGap); + private: + char m_root[FS_MAX_PATH]; + std::vector m_file_entries; + u32 m_current_index; + float m_scroll_offset; + float m_touch_start_scroll_offset; + bool m_touch_finalize_selection; + + Result PopulateFileEntries(); + bool IsSelectionVisible(); + void ScrollToSelection(); + bool IsEntryTouched(u32 i); + void UpdateTouches(); + void FinalizeSelection(); + public: + FileMenu(std::shared_ptr prev_menu, const char *root); + + virtual void Update(u64 ns) override; + virtual void Draw(NVGcontext *vg, u64 ns) override; + }; + + class ValidateUpdateMenu : public Menu { + private: + static constexpr u32 BackButtonId = 0; + static constexpr u32 ContinueButtonId = 1; + + static constexpr float WindowWidth = 600.0f; + static constexpr float WindowHeight = 600.0f; + static constexpr float TitleGap = 90.0f; + static constexpr float TextAreaHeight = 410.0f; + private: + AmsSuUpdateInformation m_update_info; + AmsSuUpdateValidationInfo m_validation_info; + bool m_has_drawn; + bool m_has_info; + bool m_has_validated; + + Result GetUpdateInformation(); + void ValidateUpdate(); + public: + ValidateUpdateMenu(std::shared_ptr prev_menu); + + virtual void Update(u64 ns) override; + virtual void Draw(NVGcontext *vg, u64 ns) override; + }; + + class ChooseResetMenu : public Menu { + private: + static constexpr u32 ResetToFactorySettingsButtonId = 0; + static constexpr u32 PreserveSettingsButtonId = 1; + + static constexpr float WindowWidth = 600.0f; + static constexpr float WindowHeight = 170.0f; + static constexpr float TitleGap = 90.0f; + public: + ChooseResetMenu(std::shared_ptr prev_menu); + + virtual void Update(u64 ns) override; + virtual void Draw(NVGcontext *vg, u64 ns) override; + }; + + class ChooseExfatMenu : public Menu { + private: + static constexpr u32 Fat32ButtonId = 0; + static constexpr u32 ExFatButtonId = 1; + + static constexpr float WindowWidth = 600.0f; + static constexpr float WindowHeight = 170.0f; + static constexpr float TitleGap = 90.0f; + public: + ChooseExfatMenu(std::shared_ptr prev_menu); + + virtual void Update(u64 ns) override; + virtual void Draw(NVGcontext *vg, u64 ns) override; + }; + + class InstallUpdateMenu : public Menu { + private: + enum class InstallState { + NeedsDraw, + NeedsSetup, + NeedsPrepare, + AwaitingPrepare, + NeedsApply, + AwaitingReboot, + }; + private: + static constexpr u32 ShutdownButtonId = 0; + static constexpr u32 RebootButtonId = 1; + + static constexpr float WindowWidth = 600.0f; + static constexpr float WindowHeight = 600.0f; + static constexpr float TitleGap = 120.0f; + static constexpr float ProgressTextHeight = 20.0f; + static constexpr float ProgressBarHeight = 30.0f; + static constexpr float TextAreaHeight = 320.0f; + + static constexpr size_t UpdateTaskBufferSize = 0x100000; + private: + InstallState m_install_state; + AsyncResult m_prepare_result; + float m_progress_percent; + + void MarkForReboot(); + Result TransitionUpdateState(); + public: + InstallUpdateMenu(std::shared_ptr prev_menu); + + virtual void Update(u64 ns) override; + virtual void Draw(NVGcontext *vg, u64 ns) override; + }; + + void InitializeMenu(u32 screen_width, u32 screen_height); + void UpdateMenu(u64 ns); + void RenderMenu(NVGcontext *vg, u64 ns); + bool IsExitRequested(); + +} diff --git a/troposphere/daybreak/source/ui_util.cpp b/troposphere/daybreak/source/ui_util.cpp new file mode 100644 index 000000000..3ed6a66f8 --- /dev/null +++ b/troposphere/daybreak/source/ui_util.cpp @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2020 Adubbz + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include "ui_util.hpp" +#include +#include + +namespace dbk { + + namespace { + + constexpr const char *SwitchStandardFont = "switch-standard"; + constexpr float WindowCornerRadius = 20.0f; + constexpr float TextAreaCornerRadius = 10.0f; + constexpr float ButtonCornerRaidus = 3.0f; + + NVGcolor GetSelectionRGB2(u64 ns) { + /* Calculate the rgb values for the breathing colour effect. */ + const double t = static_cast(ns) / 1'000'000'000.0d; + const float d = -0.5 * cos(3.0f*t) + 0.5f; + const int r2 = 83 + (float)(128 - 83) * (d * 0.7f + 0.3f); + const int g2 = 71 + (float)(126 - 71) * (d * 0.7f + 0.3f); + const int b2 = 185 + (float)(230 - 185) * (d * 0.7f + 0.3f); + return nvgRGB(r2, g2, b2); + } + + } + + void DrawStar(NVGcontext *vg, float x, float y, float width) { + nvgBeginPath(vg); + nvgEllipse(vg, x, y, width, width * 3.0f); + nvgEllipse(vg, x, y, width * 3.0f, width); + nvgFillColor(vg, nvgRGB(65, 71, 115)); + nvgFill(vg); + } + + void DrawBackground(NVGcontext *vg, float w, float h) { + /* Draw the background gradient. */ + const NVGpaint bg_paint = nvgLinearGradient(vg, w / 2.0f, 0, w / 2.0f, h + 20.0f, nvgRGB(20, 24, 50), nvgRGB(46, 57, 127)); + nvgBeginPath(vg); + nvgRect(vg, 0, 0, w, h); + nvgFillPaint(vg, bg_paint); + nvgFill(vg); + } + + void DrawWindow(NVGcontext *vg, const char *title, float x, float y, float w, float h) { + /* Draw the window background. */ + const NVGpaint window_bg_paint = nvgLinearGradient(vg, x + w / 2.0f, y, x + w / 2.0f, y + h + h / 4.0f, nvgRGB(255, 255, 255), nvgRGB(188, 214, 234)); + nvgBeginPath(vg); + nvgRoundedRect(vg, x, y, w, h, WindowCornerRadius); + nvgFillPaint(vg, window_bg_paint); + nvgFill(vg); + + /* Draw the shadow surrounding the window. */ + NVGpaint shadowPaint = nvgBoxGradient(vg, x, y + 2, w, h, WindowCornerRadius * 2, 10, nvgRGBA(0, 0, 0, 128), nvgRGBA(0, 0, 0, 0)); + nvgBeginPath(vg); + nvgRect(vg, x - 10, y - 10, w + 20, h + 30); + nvgRoundedRect(vg, x, y, w, h, WindowCornerRadius); + nvgPathWinding(vg, NVG_HOLE); + nvgFillPaint(vg, shadowPaint); + nvgFill(vg); + + /* Setup the font. */ + nvgFontSize(vg, 32.0f); + nvgFontFace(vg, SwitchStandardFont); + nvgTextAlign(vg, NVG_ALIGN_LEFT | NVG_ALIGN_MIDDLE); + nvgFillColor(vg, nvgRGB(0, 0, 0)); + + /* Draw the title. */ + const float tw = nvgTextBounds(vg, 0, 0, title, nullptr, nullptr); + nvgText(vg, x + w * 0.5f - tw * 0.5f, y + 40.0f, title, nullptr); + } + + void DrawButton(NVGcontext *vg, const char *text, float x, float y, float w, float h, ButtonStyle style, u64 ns) { + /* Fill the background if selected. */ + if (style == ButtonStyle::StandardSelected || style == ButtonStyle::FileSelectSelected) { + NVGpaint bg_paint = nvgLinearGradient(vg, x, y + h / 2.0f, x + w, y + h / 2.0f, nvgRGB(83, 71, 185), GetSelectionRGB2(ns)); + nvgBeginPath(vg); + nvgRoundedRect(vg, x, y, w, h, ButtonCornerRaidus); + nvgFillPaint(vg, bg_paint); + nvgFill(vg); + } + + /* Draw the shadow surrounding the button. */ + if (style == ButtonStyle::Standard || style == ButtonStyle::StandardSelected || style == ButtonStyle::StandardDisabled || style == ButtonStyle::FileSelectSelected) { + const unsigned char shadow_color = style == ButtonStyle::Standard ? 128 : 64; + NVGpaint shadow_paint = nvgBoxGradient(vg, x, y, w, h, ButtonCornerRaidus, 5, nvgRGBA(0, 0, 0, shadow_color), nvgRGBA(0, 0, 0, 0)); + nvgBeginPath(vg); + nvgRect(vg, x - 10, y - 10, w + 20, h + 30); + nvgRoundedRect(vg, x, y, w, h, ButtonCornerRaidus); + nvgPathWinding(vg, NVG_HOLE); + nvgFillPaint(vg, shadow_paint); + nvgFill(vg); + } + + /* Setup the font. */ + nvgFontSize(vg, 20.0f); + nvgFontFace(vg, SwitchStandardFont); + nvgTextAlign(vg, NVG_ALIGN_LEFT | NVG_ALIGN_MIDDLE); + + /* Set the text colour. */ + if (style == ButtonStyle::StandardSelected || style == ButtonStyle::FileSelectSelected) { + nvgFillColor(vg, nvgRGB(255, 255, 255)); + } else { + const unsigned char alpha = style == ButtonStyle::StandardDisabled ? 64 : 255; + nvgFillColor(vg, nvgRGBA(0, 0, 0, alpha)); + } + + /* Draw the button text. */ + const float tw = nvgTextBounds(vg, 0, 0, text, nullptr, nullptr); + + if (style == ButtonStyle::Standard || style == ButtonStyle::StandardSelected || style == ButtonStyle::StandardDisabled) { + nvgText(vg, x + w * 0.5f - tw * 0.5f, y + h * 0.5f, text, nullptr); + } else { + nvgText(vg, x + 10.0f, y + h * 0.5f, text, nullptr); + } + } + + void DrawTextBackground(NVGcontext *vg, float x, float y, float w, float h) { + nvgBeginPath(vg); + nvgRoundedRect(vg, x, y, w, h, TextAreaCornerRadius); + nvgFillColor(vg, nvgRGBA(0, 0, 0, 16)); + nvgFill(vg); + } + + void DrawText(NVGcontext *vg, float x, float y, float w, const char *text) { + nvgFontSize(vg, 20.0f); + nvgFontFace(vg, SwitchStandardFont); + nvgTextAlign(vg, NVG_ALIGN_LEFT | NVG_ALIGN_TOP); + nvgFillColor(vg, nvgRGB(0, 0, 0)); + + const float tw = nvgTextBounds(vg, 0, 0, text, nullptr, nullptr); + nvgText(vg, x + w * 0.5f - tw * 0.5f, y, text, nullptr); + } + + void DrawProgressText(NVGcontext *vg, float x, float y, float progress) { + char progress_text[32] = {}; + snprintf(progress_text, sizeof(progress_text)-1, "%d%% complete", static_cast(progress * 100.0f)); + + nvgFontSize(vg, 24.0f); + nvgFontFace(vg, SwitchStandardFont); + nvgTextAlign(vg, NVG_ALIGN_LEFT | NVG_ALIGN_MIDDLE); + nvgFillColor(vg, nvgRGB(0, 0, 0)); + nvgText(vg, x, y, progress_text, nullptr); + } + + void DrawProgressBar(NVGcontext *vg, float x, float y, float w, float h, float progress) { + /* Draw the progress bar background. */ + nvgBeginPath(vg); + nvgRoundedRect(vg, x, y, w, h, WindowCornerRadius); + nvgFillColor(vg, nvgRGBA(0, 0, 0, 128)); + nvgFill(vg); + + /* Draw the progress bar fill. */ + if (progress > 0.0f) { + NVGpaint progress_fill_paint = nvgLinearGradient(vg, x, y + 0.5f * h, x + w, y + 0.5f * h, nvgRGB(83, 71, 185), nvgRGB(128, 126, 230)); + nvgBeginPath(vg); + nvgRoundedRect(vg, x, y, WindowCornerRadius + (w - WindowCornerRadius) * progress, h, WindowCornerRadius); + nvgFillPaint(vg, progress_fill_paint); + nvgFill(vg); + } + } + + void DrawTextBlock(NVGcontext *vg, const char *text, float x, float y, float w, float h) { + /* Save state and scissor. */ + nvgSave(vg); + nvgScissor(vg, x, y, w, h); + + /* Configure the text. */ + nvgFontSize(vg, 18.0f); + nvgFontFace(vg, SwitchStandardFont); + nvgTextLineHeight(vg, 1.3f); + nvgTextAlign(vg, NVG_ALIGN_LEFT | NVG_ALIGN_TOP); + nvgFillColor(vg, nvgRGB(0, 0, 0)); + + /* Determine the bounds of the text box. */ + float bounds[4]; + nvgTextBoxBounds(vg, 0, 0, w, text, nullptr, bounds); + + /* Adjust the y to only show the last part of the text that fits. */ + float y_adjustment = 0.0f; + if (bounds[3] > h) { + y_adjustment = bounds[3] - h; + } + + /* Draw the text box and restore state. */ + nvgTextBox(vg, x, y - y_adjustment, w, text, nullptr); + nvgRestore(vg); + } + +} diff --git a/troposphere/daybreak/source/ui_util.hpp b/troposphere/daybreak/source/ui_util.hpp new file mode 100644 index 000000000..224f36c91 --- /dev/null +++ b/troposphere/daybreak/source/ui_util.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2020 Adubbz + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace dbk { + + enum class ButtonStyle { + Standard, + StandardSelected, + StandardDisabled, + FileSelect, + FileSelectSelected, + }; + + void DrawStar(NVGcontext *vg, float x, float y, float width); + void DrawBackground(NVGcontext *vg, float w, float h); + void DrawWindow(NVGcontext *vg, const char *title, float x, float y, float w, float h); + void DrawButton(NVGcontext *vg, const char *text, float x, float y, float w, float h, ButtonStyle style, u64 ns); + void DrawTextBackground(NVGcontext *vg, float x, float y, float w, float h); + void DrawText(NVGcontext *vg, float x, float y, float w, const char *text); + void DrawProgressText(NVGcontext *vg, float x, float y, float progress); + void DrawProgressBar(NVGcontext *vg, float x, float y, float w, float h, float progress); + void DrawTextBlock(NVGcontext *vg, const char *text, float x, float y, float w, float h); + +}