SPURS: System service workload

This commit is contained in:
S Gopal Rajagopal 2015-01-02 01:33:36 +05:30
parent 6664116042
commit 4a83d43a8f
3 changed files with 159 additions and 25 deletions

View file

@ -104,7 +104,7 @@ s64 spursInit(
spurs->m.sysSrvMsgUpdateTrace = 0;
for (u32 i = 0; i < 8; i++)
{
spurs->m.xC0[i] = -1;
spurs->m.sysSrvWorkload[i] = -1;
}
// default or system workload:
@ -755,7 +755,7 @@ s64 spursInit(
}
}
spurs->m.traceBuffer = 0;
spurs->m.traceBuffer.set(0);
// can also use cellLibprof if available (omitted)
// some unknown subroutine
@ -3418,13 +3418,13 @@ s64 spursTraceInitialize(vm::ptr<CellSpurs> spurs, vm::ptr<CellSpursTraceInfo> b
buffer->spu_thread_grp = spurs->m.spuTG;
buffer->nspu = spurs->m.nSpus;
spurs->m.traceBuffer = buffer.addr() | (mode & CELL_SPURS_TRACE_MODE_FLAG_WRAP_BUFFER ? 1 : 0);
spurs->m.traceBuffer.set(buffer.addr() | (mode & CELL_SPURS_TRACE_MODE_FLAG_WRAP_BUFFER ? 1 : 0));
spurs->m.traceMode = mode;
u32 spuTraceDataCount = (spurs->m.traceDataSize / CellSpursTracePacket::size) / spurs->m.nSpus;
for (u32 i = 0, j = 8; i < 6; i++)
{
spurs->m.x908[i] = j;
spurs->m.traceStartIndex[i] = j;
j += spuTraceDataCount;
}
@ -3548,7 +3548,7 @@ s64 cellSpursTraceFinalize(vm::ptr<CellSpurs> spurs)
spurs->m.sysSrvTraceControl = 0;
spurs->m.traceMode = 0;
spurs->m.traceBuffer = 0;
spurs->m.traceBuffer.set(0);
spursTraceStatusUpdate(spurs);
return CELL_OK;
}

View file

@ -249,6 +249,8 @@ struct CellSpursWorkloadFlag
typedef void(*CellSpursShutdownCompletionEventHook)(vm::ptr<CellSpurs>, u32 wid, vm::ptr<void> arg);
struct CellSpursTraceInfo;
// Core CellSpurs structures
struct CellSpurs
{
@ -336,9 +338,9 @@ struct CellSpurs
atomic_t<u32> wklMskB; // 0xB4 - System service - Available module id
u8 xB8[5]; // 0xB8 - 0xBC - Syetem service exit barrier
atomic_t<u8> sysSrvMsgUpdateWorkload; // 0xBD
u8 xBE[2]; // 0xBE
u8 xBE; // 0xBE
u8 sysSrvMsgTerminate; // 0xBF
u8 xC0[8]; // 0xC0 - System workload
u8 sysSrvWorkload[8]; // 0xC0
u8 sysSrvOnSpu; // 0xC8
u8 spuPort; // 0xC9 - SPU port for system service
u8 xCA; // 0xCA
@ -351,8 +353,8 @@ struct CellSpurs
u8 wklStatus2[0x10]; // 0xE0
u8 wklEvent2[0x10]; // 0xF0
_sub_str1 wklF1[0x10]; // 0x100
be_t<u64> traceBuffer; // 0x900
be_t<u32> x908[6]; // 0x908 - Indices to traceData (a guess)
vm::bptr<CellSpursTraceInfo, 1, u64> traceBuffer; // 0x900
be_t<u32> traceStartIndex[6]; // 0x908
u8 unknown7[0x948 - 0x920]; // 0x920
be_t<u64> traceDataSize; // 0x948
be_t<u32> traceMode; // 0x950
@ -810,7 +812,7 @@ struct SpursKernelMgmtData {
u8 sysSrvInitialised; // 0x1EA
u8 spuIdling; // 0x1EB
be_t<u16> wklRunnable1; // 0x1EC
be_t<u16> wklRunnable2; // 0x1EE
be_t<u16> wklRunnable2; // 0x1EE
u8 x1F0[0x210 - 0x1F0]; // 0x1F0
be_t<u64> traceBuffer; // 0x210
be_t<u32> traceMsgCount; // 0x218

View file

@ -15,14 +15,80 @@ unsigned cellSpursModulePollStatus(CellSpursModulePollStatus * status) {
return 0;
}
void spursSysServiceCleanup(SPUThread & spu, SpursKernelMgmtData * mgmt) {
// TODO: Implement this
/// Restore scheduling paraneters to the right values after a workload has been preempted by the system service workload
void spursSysServiceCleanupAfterPreemption(SPUThread & spu, SpursKernelMgmtData * mgmt) {
if (mgmt->spurs->m.sysSrvWorkload[mgmt->spuNum] != -1) {
auto wklId = mgmt->spurs->m.sysSrvWorkload[mgmt->spuNum];
mgmt->spurs->m.sysSrvWorkload[mgmt->spuNum] = -1;
spursSysServiceUpdateWorkload(spu, mgmt);
if (wklId >= CELL_SPURS_MAX_WORKLOAD) {
mgmt->spurs->m.wklCurrentContention[wklId & 0x0F] -= 0x10;
mgmt->spurs->m.wklReadyCount1[wklId & 0x0F].write_relaxed(mgmt->spurs->m.wklReadyCount1[wklId & 0x0F].read_relaxed() - 1);
} else {
mgmt->spurs->m.wklCurrentContention[wklId & 0x0F] -= 0x01;
mgmt->spurs->m.wklIdleSpuCountOrReadyCount2[wklId & 0x0F].write_relaxed(mgmt->spurs->m.wklIdleSpuCountOrReadyCount2[wklId & 0x0F].read_relaxed() - 1);
}
auto wklIdSaved = mgmt->wklCurrentId;
mgmt->wklCurrentId = wklId;
// Trace - STOP: GUID
CellSpursTracePacket pkt;
memset(&pkt, 0, sizeof(pkt));
pkt.header.tag = CELL_SPURS_TRACE_TAG_STOP;
pkt.data.stop = 0; // TODO: Put GUID of the sys service workload here
cellSpursModulePutTrace(&pkt, mgmt->dmaTagId);
mgmt->wklCurrentId = wklIdSaved;
}
}
/// Updatre the trace count for this SPU in CellSpurs
void spursSysServiceUpdateTraceCount(SPUThread & spu, SpursKernelMgmtData * mgmt) {
if (mgmt->traceBuffer) {
mgmt->spurs->m.traceBuffer->count[mgmt->spuNum] = mgmt->traceMsgCount;
}
}
/// Update trace control in SPU from CellSpurs
void spursSysServiceUpdateTrace(SPUThread & spu, SpursKernelMgmtData * mgmt, u32 arg2, u32 arg3, u32 arg4) {
// TODO: Implement this
auto sysSrvMsgUpdateTrace = mgmt->spurs->m.sysSrvMsgUpdateTrace;
mgmt->spurs->m.sysSrvMsgUpdateTrace &= ~(1 << mgmt->spuNum);
mgmt->spurs->m.xCC &= ~(1 << mgmt->spuNum);
mgmt->spurs->m.xCC |= arg2 << mgmt->spuNum;
bool notify = false;
if ((sysSrvMsgUpdateTrace & (1 << mgmt->spuNum) != 0) && (mgmt->spurs->m.sysSrvMsgUpdateTrace == 0) && (mgmt->spurs->m.xCD != 0)) {
mgmt->spurs->m.xCD = 0;
notify = true;
}
if (arg4 && mgmt->spurs->m.xCD != 0) {
mgmt->spurs->m.xCD = 0;
notify = true;
}
if ((sysSrvMsgUpdateTrace & (1 << mgmt->spuNum) != 0) || (arg3 != 0)) {
if (mgmt->traceMsgCount != 0xFF || mgmt->traceBuffer == 0 || mgmt->spurs->m.traceBuffer.addr() == 0) {
spursSysServiceUpdateTraceCount(spu, mgmt);
} else {
mgmt->traceMsgCount = mgmt->spurs->m.traceBuffer->count[mgmt->spuNum];
}
mgmt->traceBuffer = mgmt->spurs->m.traceBuffer.addr() + (mgmt->spurs->m.traceStartIndex[mgmt->spuNum] << 4);
mgmt->traceMaxCount = mgmt->spurs->m.traceStartIndex[1] - mgmt->spurs->m.traceStartIndex[0];
if (mgmt->traceBuffer == 0) {
mgmt->traceMsgCount = 0;
}
}
if (notify) {
// TODO: sys_spu_thread_send_event(mgmt->spurs->m.spuPort, 2, 0);
}
}
/// Update events in CellSpurs
void spursSysServiceUpdateEvent(SPUThread & spu, SpursKernelMgmtData * mgmt, u32 wklShutdownMask) {
u32 wklNotifyMask = 0;
for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) {
@ -48,7 +114,7 @@ void spursSysServiceUpdateEvent(SPUThread & spu, SpursKernelMgmtData * mgmt, u32
}
}
/// Update workload information in the LS from main memory
/// Update workload information in the SPU LS from CellSpurs
void spursSysServiceUpdateWorkload(SPUThread & spu, SpursKernelMgmtData * mgmt) {
u32 wklShutdownMask = 0;
mgmt->wklRunnable1 = 0;
@ -65,7 +131,7 @@ void spursSysServiceUpdateWorkload(SPUThread & spu, SpursKernelMgmtData * mgmt)
}
if (mgmt->spurs->m.wklState1[i].read_relaxed() == SPURS_WKL_STATE_SHUTTING_DOWN) {
if (((wklStatus & (1 << mgmt->spuNum)) != 0) && ((mgmt->spurs->m.wklStatus1[i] & (1 << mgmt->spuNum)) == 0)) {
if (((wklStatus & (1 << mgmt->spuNum)) != 0) && (mgmt->spurs->m.wklStatus1[i] == 0)) {
mgmt->spurs->m.wklState1[i].write_relaxed(SPURS_WKL_STATE_REMOVABLE);
wklShutdownMask |= 0x80000000 >> i;
}
@ -88,7 +154,7 @@ void spursSysServiceUpdateWorkload(SPUThread & spu, SpursKernelMgmtData * mgmt)
}
if (mgmt->spurs->m.wklState2[i].read_relaxed() == SPURS_WKL_STATE_SHUTTING_DOWN) {
if (((wklStatus & (1 << mgmt->spuNum)) != 0) && ((mgmt->spurs->m.wklStatus2[i] & (1 << mgmt->spuNum)) == 0)) {
if (((wklStatus & (1 << mgmt->spuNum)) != 0) && (mgmt->spurs->m.wklStatus2[i] == 0)) {
mgmt->spurs->m.wklState2[i].write_relaxed(SPURS_WKL_STATE_REMOVABLE);
wklShutdownMask |= 0x8000 >> i;
}
@ -103,23 +169,82 @@ void spursSysServiceUpdateWorkload(SPUThread & spu, SpursKernelMgmtData * mgmt)
/// Process any messages
void spursSysServiceProcessMessages(SPUThread & spu, SpursKernelMgmtData * mgmt) {
// Process update workload message
if (mgmt->spurs->m.sysSrvMsgUpdateWorkload.read_relaxed() & (1 << mgmt->spuNum)) {
mgmt->spurs->m.sysSrvMsgUpdateWorkload &= ~(1 << mgmt->spuNum);
spursSysServiceUpdateWorkload(spu, mgmt);
}
// Process update trace message
if (mgmt->spurs->m.sysSrvMsgUpdateTrace & (1 << mgmt->spuNum)) {
spursSysServiceUpdateTrace(spu, mgmt, 1, 0, 0);
}
// Process terminate request
if (mgmt->spurs->m.sysSrvMsgTerminate & (1 << mgmt->spuNum)) {
mgmt->spurs->m.sysSrvOnSpu &= ~(1 << mgmt->spuNum);
// TODO: Rest of the terminate processing
}
}
void spursSysServiceExitIfRequired() {
// TODO: Implement this
/// Wait for an external event or exit the SPURS thread group if no workloads can be scheduled
void spursSysServiceWaitOrExit(SPUThread & spu, SpursKernelMgmtData * mgmt) {
while (true) {
u32 nIdlingSpus = 0;
for (u32 i = 0; i < 8; i++) {
if (mgmt->spurs->m.spuIdling & (1 << i)) {
nIdlingSpus++;
}
}
bool shouldExit = nIdlingSpus != mgmt->spurs->m.nSpus ? false : mgmt->spurs->m.flags1 & SF1_EXIT_IF_NO_WORK ? true : false;
bool foundSchedulableWorkload = false;
if (mgmt->spurs->m.sysSrvMessage.read_relaxed() & (1 << mgmt->spuNum)) {
foundSchedulableWorkload = true;
} else {
for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) {
if ((mgmt->wklRunnable1 & (0x8000 >> i)) &&
(mgmt->priority[i] & 0x0F) != 0 &&
(mgmt->spurs->m.wklMaxContention[i].read_relaxed() & 0x0F) > (mgmt->spurs->m.wklCurrentContention[i] & 0x0F)) {
foundSchedulableWorkload = true;
break;
}
}
if (mgmt->spurs->m.flags1 & SF1_32_WORKLOADS && foundSchedulableWorkload == false) {
for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) {
if ((mgmt->wklRunnable2 & (0x8000 >> i)) &&
(mgmt->priority[i] & 0xF0) != 0 &&
(mgmt->spurs->m.wklMaxContention[i].read_relaxed() & 0xF0) > (mgmt->spurs->m.wklCurrentContention[i] & 0xF0)) {
foundSchedulableWorkload = true;
break;
}
}
}
}
if ((mgmt->spurs->m.spuIdling & (1 << mgmt->spuNum)) && shouldExit == false && foundSchedulableWorkload == false) {
// TODO: Wait for events
}
if (shouldExit || foundSchedulableWorkload == false) {
mgmt->spurs->m.spuIdling |= 1 << mgmt->spuNum;
} else {
mgmt->spurs->m.spuIdling &= ~(1 << mgmt->spuNum);
}
if (shouldExit == false && foundSchedulableWorkload == false) {
continue;
}
if (shouldExit == false) {
return;
}
break;
}
// TODO: exit spu thread group
}
/// Main function for the system service workload
@ -127,15 +252,15 @@ void spursSysServiceWorkloadMain(SPUThread & spu, u32 pollStatus) {
auto mgmt = vm::get_ptr<SpursKernelMgmtData>(spu.ls_offset);
if (mgmt->spurs.addr() % CellSpurs::align) {
// TODO: Halt
assert(0);
}
// Initialise the system service if this is the first time its being started on this SPU
if (mgmt->sysSrvInitialised != 0) {
if (mgmt->sysSrvInitialised == 0) {
mgmt->sysSrvInitialised = 1;
if (mgmt->spurs->m.sysSrvOnSpu & (1 << mgmt->spuNum)) {
// TODO: Halt
assert(0);
}
mgmt->spurs->m.sysSrvOnSpu |= 1 << mgmt->spuNum;
@ -143,7 +268,7 @@ void spursSysServiceWorkloadMain(SPUThread & spu, u32 pollStatus) {
mgmt->traceMsgCount = -1;
spursSysServiceUpdateTrace(spu, mgmt, 1, 1, 0);
spursSysServiceCleanup(spu, mgmt);
spursSysServiceCleanupAfterPreemption(spu, mgmt);
// Trace - SERVICE: INIT
CellSpursTracePacket pkt;
@ -163,8 +288,10 @@ void spursSysServiceWorkloadMain(SPUThread & spu, u32 pollStatus) {
cellSpursModulePutTrace(&pkt, mgmt->dmaTagId);
while (true) {
// Process messages for the system service workload
spursSysServiceProcessMessages(spu, mgmt);
poll:
if (cellSpursModulePollStatus(nullptr)) {
// Trace - SERVICE: EXIT
CellSpursTracePacket pkt;
@ -181,10 +308,16 @@ void spursSysServiceWorkloadMain(SPUThread & spu, u32 pollStatus) {
break;
}
// If we reach here it means that either there are more system service messages to be processed
// or there are no workloads that can be scheduled.
// If the SPU is not idling then process the remaining system service messages
if (mgmt->spuIdling == 0) {
continue;
}
// If we reach here it means that the SPU is idling
// Trace - SERVICE: WAIT
CellSpursTracePacket pkt;
memset(&pkt, 0, sizeof(pkt));
@ -192,7 +325,8 @@ void spursSysServiceWorkloadMain(SPUThread & spu, u32 pollStatus) {
pkt.data.service.incident = CELL_SPURS_TRACE_SERVICE_WAIT;
cellSpursModulePutTrace(&pkt, mgmt->dmaTagId);
spursSysServiceExitIfRequired();
spursSysServiceWaitOrExit(spu, mgmt);
goto poll;
}
}
@ -206,8 +340,6 @@ void spursSysServiceWorkloadEntry(SPUThread & spu) {
*(vm::ptr<u32>::make(spu.GPR[1]._u32[3])) = 0x3FFF0;
memset(vm::get_ptr<void>(spu.ls_offset + 0x3FFE0), 0, 32);
LV2_LOCK(0);
if (mgmt->wklCurrentId == CELL_SPURS_SYS_SERVICE_WORKLOAD_ID) {
spursSysServiceWorkloadMain(spu, pollStatus);
} else {