diff --git a/rpcs3/Emu/SysCalls/Modules/cellFs.cpp b/rpcs3/Emu/SysCalls/Modules/cellFs.cpp index 971dca28b9..9bbb275ea6 100644 --- a/rpcs3/Emu/SysCalls/Modules/cellFs.cpp +++ b/rpcs3/Emu/SysCalls/Modules/cellFs.cpp @@ -638,16 +638,18 @@ std::mutex g_fs_aio_mutex; using fs_aio_cb_t = vm::ptr xaio, s32 error, s32 xid, u64 size)>; -void fsAioRead(vm::ptr aio, s32 xid, fs_aio_cb_t func) +void fsAio(vm::ptr aio, bool write, s32 xid, fs_aio_cb_t func) { std::lock_guard lock(g_fs_aio_mutex); + cellFs.Notice("FS AIO Request(%d): fd=0x%x, offset=0x%llx, buf=*0x%x, size=0x%llx, user_data=0x%llx", xid, aio->fd, aio->offset, aio->buf, aio->size, aio->user_data); + s32 error = CELL_OK; - u64 nread = 0; + u64 result = 0; std::shared_ptr file; - if (!Emu.GetIdManager().GetIDData(aio->fd, file) || file->flags & CELL_FS_O_WRONLY) + if (!Emu.GetIdManager().GetIDData(aio->fd, file) || (!write && file->flags & CELL_FS_O_WRONLY) || (write && !(file->flags & CELL_FS_O_ACCMODE))) { error = CELL_FS_EBADF; } @@ -657,46 +659,15 @@ void fsAioRead(vm::ptr aio, s32 xid, fs_aio_cb_t func) file->file->Seek(aio->offset); - nread = file->file->Read(aio->buf.get_ptr(), aio->size); + result = write ? file->file->Write(aio->buf.get_ptr(), aio->size) : file->file->Read(aio->buf.get_ptr(), aio->size); file->file->Seek(old_position); } // should be executed directly by FS AIO thread - Emu.GetCallbackManager().Async([func, aio, error, xid, nread](PPUThread& CPU) + Emu.GetCallbackManager().Async([func, aio, error, xid, result](PPUThread& CPU) { - func(CPU, aio, error, xid, nread); - }); -} - -void fsAioWrite(vm::ptr aio, s32 xid, fs_aio_cb_t func) -{ - std::lock_guard lock(g_fs_aio_mutex); - - s32 error = CELL_OK; - u64 nwritten = 0; - - std::shared_ptr file; - - if (!Emu.GetIdManager().GetIDData(aio->fd, file) || !(file->flags & CELL_FS_O_ACCMODE)) - { - error = CELL_FS_EBADF; - } - else - { - const auto old_position = file->file->Tell(); - - file->file->Seek(aio->offset); - - nwritten = file->file->Write(aio->buf.get_ptr(), aio->size); - - file->file->Seek(old_position); - } - - // should be executed directly by FS AIO thread - Emu.GetCallbackManager().Async([func, aio, error, xid, nwritten](PPUThread& CPU) - { - func(CPU, aio, error, xid, nwritten); + func(CPU, aio, error, xid, result); }); } @@ -733,14 +704,14 @@ s32 cellFsAioRead(vm::ptr aio, vm::ptr id, fs_aio_cb_t func) // TODO: detect mount point and send AIO request to the AIO thread of this mount point - thread_t("FS AIO Read Thread", std::bind(fsAioRead, aio, (*id = ++g_fs_aio_id), func)).detach(); + thread_t("FS AIO Read Thread", std::bind(fsAio, aio, false, (*id = ++g_fs_aio_id), func)).detach(); return CELL_OK; } s32 cellFsAioWrite(vm::ptr aio, vm::ptr id, fs_aio_cb_t func) { - cellFs.Todo("cellFsAioWrite(aio=*0x%x, id=*0x%x, func=*0x%x)", aio, id, func); + cellFs.Warning("cellFsAioWrite(aio=*0x%x, id=*0x%x, func=*0x%x)", aio, id, func); if (!Emu.GetIdManager().CheckID(aio->fd)) { @@ -749,14 +720,14 @@ s32 cellFsAioWrite(vm::ptr aio, vm::ptr id, fs_aio_cb_t func) // TODO: detect mount point and send AIO request to the AIO thread of this mount point - thread_t("FS AIO Write Thread", std::bind(fsAioWrite, aio, (*id = ++g_fs_aio_id), func)).detach(); + thread_t("FS AIO Write Thread", std::bind(fsAio, aio, true, (*id = ++g_fs_aio_id), func)).detach(); return CELL_OK; } s32 cellFsAioCancel(s32 id) { - cellFs.Todo("cellFsAioCancel(id=%d) -> CELL_FS_EINVAL", id); + cellFs.Warning("cellFsAioCancel(id=%d) -> CELL_FS_EINVAL", id); // TODO: cancelled requests return CELL_FS_ECANCELED through their own callbacks diff --git a/rpcs3/Loader/ELF64.cpp b/rpcs3/Loader/ELF64.cpp index d4d6b6b01c..b75ace91b8 100644 --- a/rpcs3/Loader/ELF64.cpp +++ b/rpcs3/Loader/ELF64.cpp @@ -493,40 +493,59 @@ namespace loader ppu_thr_stop_data[1] = BLR(); Emu.SetCPUThreadStop(ppu_thr_stop_data.addr()); - /* - //TODO - static const int branch_size = 6 * 4; + static const int branch_size = 8 * 4; + auto make_branch = [](vm::ptr& ptr, u32 addr) { u32 stub = vm::read32(addr); u32 rtoc = vm::read32(addr + 4); - *ptr++ = implicts::LI(r0, stub >> 16); - *ptr++ = ORIS(r0, r0, stub & 0xffff); - *ptr++ = implicts::LI(r2, rtoc >> 16); - *ptr++ = ORIS(r2, r2, rtoc & 0xffff); + *ptr++ = LI_(r0, 0); + *ptr++ = ORI(r0, r0, stub & 0xffff); + *ptr++ = ORIS(r0, r0, stub >> 16); + *ptr++ = LI_(r2, 0); + *ptr++ = ORI(r2, r2, rtoc & 0xffff); + *ptr++ = ORIS(r2, r2, rtoc >> 16); *ptr++ = MTCTR(r0); *ptr++ = BCTRL(); }; - auto entry = vm::ptr::make(vm::alloc(branch_size * (start_funcs.size() + 1), vm::main)); + auto entry = vm::ptr::make(vm::alloc(56 + branch_size * (start_funcs.size() + 1), vm::main)); - auto OPD = vm::ptr::make(vm::alloc(2 * 4)); - OPD[0] = entry.addr(); - OPD[1] = 0; + const auto OPD = entry; + + // make initial OPD + *entry++ = OPD.addr() + 8; + *entry++ = 0xdeadbeef; + + // save initialization args + *entry++ = MR(r14, r3); + *entry++ = MR(r15, r4); + *entry++ = MR(r16, r5); + *entry++ = MR(r17, r6); + *entry++ = MR(r18, r11); + *entry++ = MR(r19, r12); for (auto &f : start_funcs) { make_branch(entry, f); } - make_branch(entry, m_ehdr.e_entry); - */ + // restore initialization args + *entry++ = MR(r3, r14); + *entry++ = MR(r4, r15); + *entry++ = MR(r5, r16); + *entry++ = MR(r6, r17); + *entry++ = MR(r11, r18); + *entry++ = MR(r12, r19); - ppu_thread main_thread(m_ehdr.e_entry, "main_thread"); + // branch to initialization + make_branch(entry, m_ehdr.e_entry); + + ppu_thread main_thread(OPD.addr(), "main_thread"); main_thread.args({ Emu.GetPath()/*, "-emu"*/ }).run(); - main_thread.gpr(11, m_ehdr.e_entry).gpr(12, Emu.GetMallocPageSize()); + main_thread.gpr(11, OPD.addr()).gpr(12, Emu.GetMallocPageSize()); return ok; }